hash
stringlengths 64
64
| content
stringlengths 0
1.51M
|
---|---|
991b3b5b511eb684dcecb40139c00b7452e12f6491479f01e7ead86dbc636104 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
__doctest_skip__ = ["quantity_support"]
def quantity_support(format="latex_inline"):
"""
Enable support for plotting `astropy.units.Quantity` instances in
matplotlib.
May be (optionally) used with a ``with`` statement.
>>> import matplotlib.pyplot as plt
>>> from astropy import units as u
>>> from astropy import visualization
>>> with visualization.quantity_support():
... plt.figure()
... plt.plot([1, 2, 3] * u.m)
[...]
... plt.plot([101, 125, 150] * u.cm)
[...]
... plt.draw()
Parameters
----------
format : `astropy.units.format.Base` instance or str
The name of a format or a formatter object. If not
provided, defaults to ``latex_inline``.
"""
from matplotlib import ticker, units
from astropy import units as u
def rad_fn(x, pos=None):
n = int((x / np.pi) * 2.0 + 0.25)
if n == 0:
return "0"
elif n == 1:
return "π/2"
elif n == 2:
return "π"
elif n % 2 == 0:
return f"{n // 2}π"
else:
return f"{n}π/2"
class MplQuantityConverter(units.ConversionInterface):
def __init__(self):
# Keep track of original converter in case the context manager is
# used in a nested way.
self._original_converter = {u.Quantity: units.registry.get(u.Quantity)}
units.registry[u.Quantity] = self
@staticmethod
def axisinfo(unit, axis):
if unit == u.radian:
return units.AxisInfo(
majloc=ticker.MultipleLocator(base=np.pi / 2),
majfmt=ticker.FuncFormatter(rad_fn),
label=unit.to_string(),
)
elif unit == u.degree:
return units.AxisInfo(
majloc=ticker.AutoLocator(),
majfmt=ticker.FormatStrFormatter("%i°"),
label=unit.to_string(),
)
elif unit is not None:
return units.AxisInfo(label=unit.to_string(format))
return None
@staticmethod
def convert(val, unit, axis):
if isinstance(val, u.Quantity):
return val.to_value(unit)
elif isinstance(val, list) and val and isinstance(val[0], u.Quantity):
return [v.to_value(unit) for v in val]
else:
return val
@staticmethod
def default_units(x, axis):
if hasattr(x, "unit"):
return x.unit
return None
def __enter__(self):
return self
def __exit__(self, type, value, tb):
if self._original_converter[u.Quantity] is None:
del units.registry[u.Quantity]
else:
units.registry[u.Quantity] = self._original_converter[u.Quantity]
return MplQuantityConverter()
|
795b532e029242316028c1b1c8359e23c25eb39c83e5a175ec03b83b8d96b5e8 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from datetime import datetime
import numpy as np
from astropy import units as u
from astropy.time import Time
__all__ = ["time_support"]
__doctest_requires__ = {"time_support": ["matplotlib"]}
UNSUPPORTED_FORMATS = ("datetime", "datetime64")
YMDHMS_FORMATS = ("fits", "iso", "isot", "yday")
STR_FORMATS = YMDHMS_FORMATS + ("byear_str", "jyear_str")
def time_support(*, scale=None, format=None, simplify=True):
"""
Enable support for plotting `astropy.time.Time` instances in
matplotlib.
May be (optionally) used with a ``with`` statement.
>>> import matplotlib.pyplot as plt
>>> from astropy import units as u
>>> from astropy import visualization
>>> with visualization.time_support(): # doctest: +IGNORE_OUTPUT
... plt.figure()
... plt.plot(Time(['2016-03-22T12:30:31', '2016-03-22T12:30:38', '2016-03-22T12:34:40']))
... plt.draw()
Parameters
----------
scale : str, optional
The time scale to use for the times on the axis. If not specified,
the scale of the first Time object passed to Matplotlib is used.
format : str, optional
The time format to use for the times on the axis. If not specified,
the format of the first Time object passed to Matplotlib is used.
simplify : bool, optional
If possible, simplify labels, e.g. by removing 00:00:00.000 times from
ISO strings if all labels fall on that time.
"""
import matplotlib.units as units
from matplotlib.ticker import MaxNLocator, ScalarFormatter
from astropy.visualization.wcsaxes.utils import select_step_hour, select_step_scalar
class AstropyTimeLocator(MaxNLocator):
# Note: we default to AutoLocator since many time formats
# can just use this.
def __init__(self, converter, *args, **kwargs):
kwargs["nbins"] = 4
super().__init__(*args, **kwargs)
self._converter = converter
def tick_values(self, vmin, vmax):
# Where we put the ticks depends on the format we are using
if self._converter.format in YMDHMS_FORMATS:
# If we are here, we need to check what the range of values
# is and decide how to find tick locations accordingly
vrange = vmax - vmin
if (
self._converter.format != "yday" and vrange > 31
) or vrange > 366: # greater than a month
# We need to be careful here since not all years and months have
# the same length
# Start off by converting the values from the range to
# datetime objects, so that we can easily extract the year and
# month.
tmin = Time(
vmin, scale=self._converter.scale, format="mjd"
).datetime
tmax = Time(
vmax, scale=self._converter.scale, format="mjd"
).datetime
# Find the range of years
ymin = tmin.year
ymax = tmax.year
if ymax > ymin + 1: # greater than a year
# Find the step we want to use
ystep = int(select_step_scalar(max(1, (ymax - ymin) / 3)))
ymin = ystep * (ymin // ystep)
# Generate the years for these steps
times = []
for year in range(ymin, ymax + 1, ystep):
times.append(datetime(year=year, month=1, day=1))
else: # greater than a month but less than a year
mmin = tmin.month
mmax = tmax.month + 12 * (ymax - ymin)
mstep = int(select_step_scalar(max(1, (mmax - mmin) / 3)))
mmin = mstep * max(1, mmin // mstep)
# Generate the months for these steps
times = []
for month in range(mmin, mmax + 1, mstep):
times.append(
datetime(
year=ymin + (month - 1) // 12,
month=(month - 1) % 12 + 1,
day=1,
)
)
# Convert back to MJD
values = Time(times, scale=self._converter.scale).mjd
elif vrange > 1: # greater than a day
self.set_params(steps=[1, 2, 5, 10])
values = super().tick_values(vmin, vmax)
else:
# Determine ideal step
dv = (vmax - vmin) / 3 * 24 << u.hourangle
# And round to nearest sensible value
dv = select_step_hour(dv).to_value(u.hourangle) / 24
# Determine tick locations
imin = np.ceil(vmin / dv)
imax = np.floor(vmax / dv)
values = np.arange(imin, imax + 1, dtype=np.int64) * dv
else:
values = super().tick_values(vmin, vmax)
# Get rid of values outside of the input interval
values = values[(values >= vmin) & (values <= vmax)]
return values
def __call__(self):
vmin, vmax = self.axis.get_view_interval()
return self.tick_values(vmin, vmax)
class AstropyTimeFormatter(ScalarFormatter):
def __init__(self, converter, *args, **kwargs):
super().__init__(*args, **kwargs)
self._converter = converter
self.set_useOffset(False)
self.set_scientific(False)
def format_ticks(self, values):
if len(values) == 0:
return []
if self._converter.format in YMDHMS_FORMATS:
times = Time(values, format="mjd", scale=self._converter.scale)
formatted = getattr(times, self._converter.format)
if self._converter.simplify:
if self._converter.format in ("fits", "iso", "isot"):
if all(x.endswith("00:00:00.000") for x in formatted):
split = " " if self._converter.format == "iso" else "T"
formatted = [x.split(split)[0] for x in formatted]
elif self._converter.format == "yday":
if all(x.endswith(":001:00:00:00.000") for x in formatted):
formatted = [x.split(":", 1)[0] for x in formatted]
return formatted
elif self._converter.format == "byear_str":
return Time(
values, format="byear", scale=self._converter.scale
).byear_str
elif self._converter.format == "jyear_str":
return Time(
values, format="jyear", scale=self._converter.scale
).jyear_str
else:
return super().format_ticks(values)
class MplTimeConverter(units.ConversionInterface):
def __init__(self, scale=None, format=None, simplify=None):
super().__init__()
self.format = format
self.scale = scale
self.simplify = simplify
# Keep track of original converter in case the context manager is
# used in a nested way.
self._original_converter = units.registry.get(Time)
units.registry[Time] = self
@property
def format(self):
return self._format
@format.setter
def format(self, value):
if value in UNSUPPORTED_FORMATS:
raise ValueError(f"time_support does not support format={value}")
self._format = value
def __enter__(self):
return self
def __exit__(self, type, value, tb):
if self._original_converter is None:
del units.registry[Time]
else:
units.registry[Time] = self._original_converter
def default_units(self, x, axis):
if isinstance(x, tuple):
x = x[0]
if self.format is None:
self.format = x.format
if self.scale is None:
self.scale = x.scale
return "astropy_time"
def convert(self, value, unit, axis):
"""
Convert a Time value to a scalar or array.
"""
scaled = getattr(value, self.scale)
if self.format in YMDHMS_FORMATS:
return scaled.mjd
elif self.format == "byear_str":
return scaled.byear
elif self.format == "jyear_str":
return scaled.jyear
else:
return getattr(scaled, self.format)
def axisinfo(self, unit, axis):
"""
Return major and minor tick locators and formatters.
"""
majloc = AstropyTimeLocator(self)
majfmt = AstropyTimeFormatter(self)
return units.AxisInfo(
majfmt=majfmt, majloc=majloc, label=f"Time ({self.scale})"
)
return MplTimeConverter(scale=scale, format=format, simplify=simplify)
|
f8b526b91a4536a2e6f5227fff29bf5a6d6c9b273c1ef29cb7756c1981c1d658 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
from copy import deepcopy
from functools import wraps
from inspect import signature
from itertools import islice
from astropy.utils.exceptions import AstropyUserWarning
from .nddata import NDData
__all__ = ["support_nddata"]
# All supported properties are optional except "data" which is mandatory!
SUPPORTED_PROPERTIES = ["data", "uncertainty", "mask", "meta", "unit", "wcs", "flags"]
def support_nddata(
_func=None,
accepts=NDData,
repack=False,
returns=None,
keeps=None,
**attribute_argument_mapping
):
"""Decorator to wrap functions that could accept an NDData instance with
its properties passed as function arguments.
Parameters
----------
_func : callable, None, optional
The function to decorate or ``None`` if used as factory. The first
positional argument should be ``data`` and take a numpy array. It is
possible to overwrite the name, see ``attribute_argument_mapping``
argument.
Default is ``None``.
accepts : class, optional
The class or subclass of ``NDData`` that should be unpacked before
calling the function.
Default is ``NDData``
repack : bool, optional
Should be ``True`` if the return should be converted to the input
class again after the wrapped function call.
Default is ``False``.
.. note::
Must be ``True`` if either one of ``returns`` or ``keeps``
is specified.
returns : iterable, None, optional
An iterable containing strings which returned value should be set
on the class. For example if a function returns data and mask, this
should be ``['data', 'mask']``. If ``None`` assume the function only
returns one argument: ``'data'``.
Default is ``None``.
.. note::
Must be ``None`` if ``repack=False``.
keeps : iterable. None, optional
An iterable containing strings that indicate which values should be
copied from the original input to the returned class. If ``None``
assume that no attributes are copied.
Default is ``None``.
.. note::
Must be ``None`` if ``repack=False``.
attribute_argument_mapping :
Keyword parameters that optionally indicate which function argument
should be interpreted as which attribute on the input. By default
it assumes the function takes a ``data`` argument as first argument,
but if the first argument is called ``input`` one should pass
``support_nddata(..., data='input')`` to the function.
Returns
-------
decorator_factory or decorated_function : callable
If ``_func=None`` this returns a decorator, otherwise it returns the
decorated ``_func``.
Notes
-----
If properties of ``NDData`` are set but have no corresponding function
argument a Warning is shown.
If a property is set of the ``NDData`` are set and an explicit argument is
given, the explicitly given argument is used and a Warning is shown.
The supported properties are:
- ``mask``
- ``unit``
- ``wcs``
- ``meta``
- ``uncertainty``
- ``flags``
Examples
--------
This function takes a Numpy array for the data, and some WCS information
with the ``wcs`` keyword argument::
def downsample(data, wcs=None):
# downsample data and optionally WCS here
pass
However, you might have an NDData instance that has the ``wcs`` property
set and you would like to be able to call the function with
``downsample(my_nddata)`` and have the WCS information, if present,
automatically be passed to the ``wcs`` keyword argument.
This decorator can be used to make this possible::
@support_nddata
def downsample(data, wcs=None):
# downsample data and optionally WCS here
pass
This function can now either be called as before, specifying the data and
WCS separately, or an NDData instance can be passed to the ``data``
argument.
"""
if (returns is not None or keeps is not None) and not repack:
raise ValueError("returns or keeps should only be set if repack=True.")
elif returns is None and repack:
raise ValueError("returns should be set if repack=True.")
else:
# Use empty lists for returns and keeps so we don't need to check
# if any of those is None later on.
if returns is None:
returns = []
if keeps is None:
keeps = []
# Short version to avoid the long variable name later.
attr_arg_map = attribute_argument_mapping
if any(keep in returns for keep in keeps):
raise ValueError("cannot specify the same attribute in `returns` and `keeps`.")
all_returns = returns + keeps
def support_nddata_decorator(func):
# Find out args and kwargs
func_args, func_kwargs = [], []
sig = signature(func).parameters
for param_name, param in sig.items():
if param.kind in (param.VAR_POSITIONAL, param.VAR_KEYWORD):
raise ValueError("func may not have *args or **kwargs.")
try:
if param.default == param.empty:
func_args.append(param_name)
else:
func_kwargs.append(param_name)
# The comparison to param.empty may fail if the default is a
# numpy array or something similar. So if the comparison fails then
# it's quite obvious that there was a default and it should be
# appended to the "func_kwargs".
except ValueError as exc:
if (
"The truth value of an array with more than one element "
"is ambiguous." in str(exc)
):
func_kwargs.append(param_name)
else:
raise
# First argument should be data
if not func_args or func_args[0] != attr_arg_map.get("data", "data"):
raise ValueError(
"Can only wrap functions whose first positional "
"argument is `{}`"
"".format(attr_arg_map.get("data", "data"))
)
@wraps(func)
def wrapper(data, *args, **kwargs):
bound_args = signature(func).bind(data, *args, **kwargs)
unpack = isinstance(data, accepts)
input_data = data
ignored = []
if not unpack and isinstance(data, NDData):
raise TypeError(
"Only NDData sub-classes that inherit from {}"
" can be used by this function"
"".format(accepts.__name__)
)
# If data is an NDData instance, we can try and find properties
# that can be passed as kwargs.
if unpack:
# We loop over a list of pre-defined properties
for prop in islice(SUPPORTED_PROPERTIES, 1, None):
# We only need to do something if the property exists on
# the NDData object
try:
value = getattr(data, prop)
except AttributeError:
continue
# Skip if the property exists but is None or empty.
if prop == "meta" and not value:
continue
elif value is None:
continue
# Warn if the property is set but not used by the function.
propmatch = attr_arg_map.get(prop, prop)
if propmatch not in func_kwargs:
ignored.append(prop)
continue
# Check if the property was explicitly given and issue a
# Warning if it is.
if propmatch in bound_args.arguments:
# If it's in the func_args it's trivial but if it was
# in the func_kwargs we need to compare it to the
# default.
# Comparison to the default is done by comparing their
# identity, this works because defaults in function
# signatures are only created once and always reference
# the same item.
# FIXME: Python interns some values, for example the
# integers from -5 to 255 (any maybe some other types
# as well). In that case the default is
# indistinguishable from an explicitly passed kwarg
# and it won't notice that and use the attribute of the
# NDData.
if propmatch in func_args or (
propmatch in func_kwargs
and (
bound_args.arguments[propmatch]
is not sig[propmatch].default
)
):
warnings.warn(
"Property {} has been passed explicitly and "
"as an NDData property{}, using explicitly "
"specified value"
"".format(
propmatch, "" if prop == propmatch else " " + prop
),
AstropyUserWarning,
)
continue
# Otherwise use the property as input for the function.
kwargs[propmatch] = value
# Finally, replace data by the data attribute
data = data.data
if ignored:
warnings.warn(
"The following attributes were set on the "
"data object, but will be ignored by the "
"function: " + ", ".join(ignored),
AstropyUserWarning,
)
result = func(data, *args, **kwargs)
if unpack and repack:
# If there are multiple required returned arguments make sure
# the result is a tuple (because we don't want to unpack
# numpy arrays or compare their length, never!) and has the
# same length.
if len(returns) > 1:
if not isinstance(result, tuple) or len(returns) != len(result):
raise ValueError(
"Function did not return the expected number of arguments."
)
elif len(returns) == 1:
result = [result]
if keeps is not None:
for keep in keeps:
result.append(deepcopy(getattr(input_data, keep)))
resultdata = result[all_returns.index("data")]
resultkwargs = {
ret: res for ret, res in zip(all_returns, result) if ret != "data"
}
return input_data.__class__(resultdata, **resultkwargs)
else:
return result
return wrapper
# If _func is set, this means that the decorator was used without
# parameters so we have to return the result of the
# support_nddata_decorator decorator rather than the decorator itself
if _func is not None:
return support_nddata_decorator(_func)
else:
return support_nddata_decorator
|
71843f6a059ed1f664bed9106b7e49a0421ff00a52c91dbe15163fee6b16a3eb | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import weakref
from abc import ABCMeta, abstractmethod
from copy import deepcopy
import numpy as np
# from astropy.utils.compat import ignored
from astropy import log
from astropy.units import Quantity, Unit, UnitConversionError
__all__ = [
"MissingDataAssociationException",
"IncompatibleUncertaintiesException",
"NDUncertainty",
"StdDevUncertainty",
"UnknownUncertainty",
"VarianceUncertainty",
"InverseVariance",
]
# mapping from collapsing operations to the complementary methods used for `to_variance`
collapse_to_variance_mapping = {
np.sum: np.square,
np.mean: np.square,
}
def _move_preserved_axes_first(arr, preserve_axes):
# When collapsing an ND array and preserving M axes, move the
# preserved axes to the first M axes of the output. For example,
# if arr.shape == (6, 5, 4, 3, 2) and we're preserving axes (1, 2),
# then the output should have shape (20, 6, 3, 2). Axes 1 and 2 have
# shape 5 and 4, so we take their product and put them both in the zeroth
# axis.
zeroth_axis_after_reshape = np.prod(np.array(arr.shape)[list(preserve_axes)])
collapse_axes = [i for i in range(arr.ndim) if i not in preserve_axes]
return arr.reshape(
[zeroth_axis_after_reshape] + np.array(arr.shape)[collapse_axes].tolist()
)
def _unravel_preserved_axes(arr, collapsed_arr, preserve_axes):
# After reshaping an array with _move_preserved_axes_first and collapsing
# the result, convert the reshaped first axis back into the shape of each
# of the original preserved axes.
# For example, if arr.shape == (6, 5, 4, 3, 2) and we're preserving axes (1, 2),
# then the output of _move_preserved_axes_first should have shape (20, 6, 3, 2).
# This method unravels the first axis in the output *after* a collapse, so the
# output with shape (20,) becomes shape (5, 4).
if collapsed_arr.ndim != len(preserve_axes):
arr_shape = np.array(arr.shape)
return collapsed_arr.reshape(arr_shape[np.asarray(preserve_axes)])
return collapsed_arr
def from_variance_for_mean(x, axis):
if axis is None:
# do operation on all dimensions:
denom = np.ma.count(x)
else:
denom = np.ma.count(x, axis)
return np.sqrt(np.ma.sum(x, axis)) / denom
# mapping from collapsing operations to the complementary methods used for `from_variance`
collapse_from_variance_mapping = {
np.sum: lambda x, axis: np.sqrt(np.ma.sum(x, axis)),
np.mean: from_variance_for_mean,
np.median: None,
}
class IncompatibleUncertaintiesException(Exception):
"""This exception should be used to indicate cases in which uncertainties
with two different classes can not be propagated.
"""
class MissingDataAssociationException(Exception):
"""This exception should be used to indicate that an uncertainty instance
has not been associated with a parent `~astropy.nddata.NDData` object.
"""
class NDUncertainty(metaclass=ABCMeta):
"""This is the metaclass for uncertainty classes used with `NDData`.
Parameters
----------
array : any type, optional
The array or value (the parameter name is due to historical reasons) of
the uncertainty. `numpy.ndarray`, `~astropy.units.Quantity` or
`NDUncertainty` subclasses are recommended.
If the `array` is `list`-like or `numpy.ndarray`-like it will be cast
to a plain `numpy.ndarray`.
Default is ``None``.
unit : unit-like, optional
Unit for the uncertainty ``array``. Strings that can be converted to a
`~astropy.units.Unit` are allowed.
Default is ``None``.
copy : `bool`, optional
Indicates whether to save the `array` as a copy. ``True`` copies it
before saving, while ``False`` tries to save every parameter as
reference. Note however that it is not always possible to save the
input as reference.
Default is ``True``.
Raises
------
IncompatibleUncertaintiesException
If given another `NDUncertainty`-like class as ``array`` if their
``uncertainty_type`` is different.
"""
def __init__(self, array=None, copy=True, unit=None):
if isinstance(array, NDUncertainty):
# Given an NDUncertainty class or subclass check that the type
# is the same.
if array.uncertainty_type != self.uncertainty_type:
raise IncompatibleUncertaintiesException
# Check if two units are given and take the explicit one then.
if unit is not None and unit != array._unit:
# TODO : Clarify it (see NDData.init for same problem)?
log.info("overwriting Uncertainty's current unit with specified unit.")
elif array._unit is not None:
unit = array.unit
array = array.array
elif isinstance(array, Quantity):
# Check if two units are given and take the explicit one then.
if unit is not None and array.unit is not None and unit != array.unit:
log.info("overwriting Quantity's current unit with specified unit.")
elif array.unit is not None:
unit = array.unit
array = array.value
if unit is None:
self._unit = None
else:
self._unit = Unit(unit)
if copy:
array = deepcopy(array)
unit = deepcopy(unit)
self.array = array
self.parent_nddata = None # no associated NDData - until it is set!
@property
@abstractmethod
def uncertainty_type(self):
"""`str` : Short description of the type of uncertainty.
Defined as abstract property so subclasses *have* to override this.
"""
return None
@property
def supports_correlated(self):
"""`bool` : Supports uncertainty propagation with correlated uncertainties?
.. versionadded:: 1.2
"""
return False
@property
def array(self):
"""`numpy.ndarray` : the uncertainty's value."""
return self._array
@array.setter
def array(self, value):
if isinstance(value, (list, np.ndarray)):
value = np.array(value, subok=False, copy=False)
self._array = value
@property
def unit(self):
"""`~astropy.units.Unit` : The unit of the uncertainty, if any."""
return self._unit
@unit.setter
def unit(self, value):
"""
The unit should be set to a value consistent with the parent NDData
unit and the uncertainty type.
"""
if value is not None:
# Check the hidden attribute below, not the property. The property
# raises an exception if there is no parent_nddata.
if self._parent_nddata is not None:
parent_unit = self.parent_nddata.unit
try:
# Check for consistency with the unit of the parent_nddata
self._data_unit_to_uncertainty_unit(parent_unit).to(value)
except UnitConversionError:
raise UnitConversionError(
"Unit {} is incompatible with unit {} of parent nddata".format(
value, parent_unit
)
)
self._unit = Unit(value)
else:
self._unit = value
@property
def quantity(self):
"""
This uncertainty as an `~astropy.units.Quantity` object.
"""
return Quantity(self.array, self.unit, copy=False, dtype=self.array.dtype)
@property
def parent_nddata(self):
"""`NDData` : reference to `NDData` instance with this uncertainty.
In case the reference is not set uncertainty propagation will not be
possible since propagation might need the uncertain data besides the
uncertainty.
"""
no_parent_message = "uncertainty is not associated with an NDData object"
parent_lost_message = (
"the associated NDData object was deleted and cannot be accessed "
"anymore. You can prevent the NDData object from being deleted by "
"assigning it to a variable. If this happened after unpickling "
"make sure you pickle the parent not the uncertainty directly."
)
try:
parent = self._parent_nddata
except AttributeError:
raise MissingDataAssociationException(no_parent_message)
else:
if parent is None:
raise MissingDataAssociationException(no_parent_message)
else:
# The NDData is saved as weak reference so we must call it
# to get the object the reference points to. However because
# we have a weak reference here it's possible that the parent
# was deleted because its reference count dropped to zero.
if isinstance(self._parent_nddata, weakref.ref):
resolved_parent = self._parent_nddata()
if resolved_parent is None:
log.info(parent_lost_message)
return resolved_parent
else:
log.info("parent_nddata should be a weakref to an NDData object.")
return self._parent_nddata
@parent_nddata.setter
def parent_nddata(self, value):
if value is not None and not isinstance(value, weakref.ref):
# Save a weak reference on the uncertainty that points to this
# instance of NDData. Direct references should NOT be used:
# https://github.com/astropy/astropy/pull/4799#discussion_r61236832
value = weakref.ref(value)
# Set _parent_nddata here and access below with the property because value
# is a weakref
self._parent_nddata = value
# set uncertainty unit to that of the parent if it was not already set, unless initializing
# with empty parent (Value=None)
if value is not None:
parent_unit = self.parent_nddata.unit
# this will get the unit for masked quantity input:
parent_data_unit = getattr(self.parent_nddata.data, "unit", None)
if parent_unit is None and parent_data_unit is None:
self.unit = None
elif self.unit is None and parent_unit is not None:
# Set the uncertainty's unit to the appropriate value
self.unit = self._data_unit_to_uncertainty_unit(parent_unit)
elif parent_data_unit is not None:
# if the parent_nddata object has a unit, use it:
self.unit = self._data_unit_to_uncertainty_unit(parent_data_unit)
else:
# Check that units of uncertainty are compatible with those of
# the parent. If they are, no need to change units of the
# uncertainty or the data. If they are not, let the user know.
unit_from_data = self._data_unit_to_uncertainty_unit(parent_unit)
try:
unit_from_data.to(self.unit)
except UnitConversionError:
raise UnitConversionError(
"Unit {} of uncertainty "
"incompatible with unit {} of "
"data".format(self.unit, parent_unit)
)
@abstractmethod
def _data_unit_to_uncertainty_unit(self, value):
"""
Subclasses must override this property. It should take in a data unit
and return the correct unit for the uncertainty given the uncertainty
type.
"""
return None
def __repr__(self):
prefix = self.__class__.__name__ + "("
try:
body = np.array2string(self.array, separator=", ", prefix=prefix)
except AttributeError:
# In case it wasn't possible to use array2string
body = str(self.array)
return "".join([prefix, body, ")"])
def __getstate__(self):
# Because of the weak reference the class wouldn't be picklable.
try:
return self._array, self._unit, self.parent_nddata
except MissingDataAssociationException:
# In case there's no parent
return self._array, self._unit, None
def __setstate__(self, state):
if len(state) != 3:
raise TypeError("The state should contain 3 items.")
self._array = state[0]
self._unit = state[1]
parent = state[2]
if parent is not None:
parent = weakref.ref(parent)
self._parent_nddata = parent
def __getitem__(self, item):
"""Normal slicing on the array, keep the unit and return a reference."""
return self.__class__(self.array[item], unit=self.unit, copy=False)
def propagate(self, operation, other_nddata, result_data, correlation, axis=None):
"""Calculate the resulting uncertainty given an operation on the data.
.. versionadded:: 1.2
Parameters
----------
operation : callable
The operation that is performed on the `NDData`. Supported are
`numpy.add`, `numpy.subtract`, `numpy.multiply` and
`numpy.true_divide` (or `numpy.divide`).
other_nddata : `NDData` instance
The second operand in the arithmetic operation.
result_data : `~astropy.units.Quantity` or ndarray
The result of the arithmetic operations on the data.
correlation : `numpy.ndarray` or number
The correlation (rho) is defined between the uncertainties in
sigma_AB = sigma_A * sigma_B * rho. A value of ``0`` means
uncorrelated operands.
axis : int or tuple of ints, optional
Axis over which to perform a collapsing operation.
Returns
-------
resulting_uncertainty : `NDUncertainty` instance
Another instance of the same `NDUncertainty` subclass containing
the uncertainty of the result.
Raises
------
ValueError
If the ``operation`` is not supported or if correlation is not zero
but the subclass does not support correlated uncertainties.
Notes
-----
First this method checks if a correlation is given and the subclass
implements propagation with correlated uncertainties.
Then the second uncertainty is converted (or an Exception is raised)
to the same class in order to do the propagation.
Then the appropriate propagation method is invoked and the result is
returned.
"""
# Check if the subclass supports correlation
if not self.supports_correlated:
if isinstance(correlation, np.ndarray) or correlation != 0:
raise ValueError(
"{} does not support uncertainty propagation"
" with correlation."
"".format(self.__class__.__name__)
)
if other_nddata is not None:
# Get the other uncertainty (and convert it to a matching one)
other_uncert = self._convert_uncertainty(other_nddata.uncertainty)
if operation.__name__ == "add":
result = self._propagate_add(other_uncert, result_data, correlation)
elif operation.__name__ == "subtract":
result = self._propagate_subtract(
other_uncert, result_data, correlation
)
elif operation.__name__ == "multiply":
result = self._propagate_multiply(
other_uncert, result_data, correlation
)
elif operation.__name__ in ["true_divide", "divide"]:
result = self._propagate_divide(other_uncert, result_data, correlation)
else:
raise ValueError(f"unsupported operation: {operation.__name__}")
else:
# assume this is a collapsing operation:
result = self._propagate_collapse(operation, axis)
return self.__class__(result, copy=False)
def _convert_uncertainty(self, other_uncert):
"""Checks if the uncertainties are compatible for propagation.
Checks if the other uncertainty is `NDUncertainty`-like and if so
verify that the uncertainty_type is equal. If the latter is not the
case try returning ``self.__class__(other_uncert)``.
Parameters
----------
other_uncert : `NDUncertainty` subclass
The other uncertainty.
Returns
-------
other_uncert : `NDUncertainty` subclass
but converted to a compatible `NDUncertainty` subclass if
possible and necessary.
Raises
------
IncompatibleUncertaintiesException:
If the other uncertainty cannot be converted to a compatible
`NDUncertainty` subclass.
"""
if isinstance(other_uncert, NDUncertainty):
if self.uncertainty_type == other_uncert.uncertainty_type:
return other_uncert
else:
return self.__class__(other_uncert)
else:
raise IncompatibleUncertaintiesException
@abstractmethod
def _propagate_add(self, other_uncert, result_data, correlation):
return None
@abstractmethod
def _propagate_subtract(self, other_uncert, result_data, correlation):
return None
@abstractmethod
def _propagate_multiply(self, other_uncert, result_data, correlation):
return None
@abstractmethod
def _propagate_divide(self, other_uncert, result_data, correlation):
return None
def represent_as(self, other_uncert):
"""Convert this uncertainty to a different uncertainty type.
Parameters
----------
other_uncert : `NDUncertainty` subclass
The `NDUncertainty` subclass to convert to.
Returns
-------
resulting_uncertainty : `NDUncertainty` instance
An instance of ``other_uncert`` subclass containing the uncertainty
converted to the new uncertainty type.
Raises
------
TypeError
If either the initial or final subclasses do not support
conversion, a `TypeError` is raised.
"""
as_variance = getattr(self, "_convert_to_variance", None)
if as_variance is None:
raise TypeError(
f"{type(self)} does not support conversion to another uncertainty type."
)
from_variance = getattr(other_uncert, "_convert_from_variance", None)
if from_variance is None:
raise TypeError(
f"{other_uncert.__name__} does not support conversion from "
"another uncertainty type."
)
return from_variance(as_variance())
class UnknownUncertainty(NDUncertainty):
"""This class implements any unknown uncertainty type.
The main purpose of having an unknown uncertainty class is to prevent
uncertainty propagation.
Parameters
----------
args, kwargs :
see `NDUncertainty`
"""
@property
def supports_correlated(self):
"""`False` : Uncertainty propagation is *not* possible for this class."""
return False
@property
def uncertainty_type(self):
"""``"unknown"`` : `UnknownUncertainty` implements any unknown \
uncertainty type.
"""
return "unknown"
def _data_unit_to_uncertainty_unit(self, value):
"""
No way to convert if uncertainty is unknown.
"""
return None
def _convert_uncertainty(self, other_uncert):
"""Raise an Exception because unknown uncertainty types cannot
implement propagation.
"""
msg = "Uncertainties of unknown type cannot be propagated."
raise IncompatibleUncertaintiesException(msg)
def _propagate_add(self, other_uncert, result_data, correlation):
"""Not possible for unknown uncertainty types."""
return None
def _propagate_subtract(self, other_uncert, result_data, correlation):
return None
def _propagate_multiply(self, other_uncert, result_data, correlation):
return None
def _propagate_divide(self, other_uncert, result_data, correlation):
return None
class _VariancePropagationMixin:
"""
Propagation of uncertainties for variances, also used to perform error
propagation for variance-like uncertainties (standard deviation and inverse
variance).
"""
def _propagate_collapse(self, numpy_op, axis=None):
"""
Error propagation for collapse operations on variance or
variance-like uncertainties. Uncertainties are calculated using the
formulae for variance but can be used for uncertainty convertible to
a variance.
Parameters
----------
numpy_op : function
Numpy operation like `np.sum` or `np.max` to use in the collapse
subtract : bool, optional
If ``True``, propagate for subtraction, otherwise propagate for
addition.
axis : tuple, optional
Axis on which to compute collapsing operations.
"""
try:
result_unit_sq = self.parent_nddata.unit**2
except (AttributeError, TypeError):
result_unit_sq = None
if self.array is not None:
# Formula: sigma**2 = dA
if numpy_op in [np.min, np.max]:
# Find the indices of the min/max in parent data along each axis,
# return the uncertainty at the corresponding entry:
return self._get_err_at_extremum(numpy_op, axis=axis)
# np.sum and np.mean operations use similar pattern
# to `_propagate_add_sub`, for example:
else:
# lookup the mapping for to_variance and from_variance for this
# numpy operation:
to_variance = collapse_to_variance_mapping[numpy_op]
from_variance = collapse_from_variance_mapping[numpy_op]
masked_uncertainty = np.ma.masked_array(
self.array, self.parent_nddata.mask
)
if (
self.unit is not None
and to_variance(self.unit) != self.parent_nddata.unit**2
):
# If the uncertainty has a different unit than the result we
# need to convert it to the results unit.
this = (
to_variance(masked_uncertainty << self.unit)
.to(result_unit_sq)
.value
)
else:
this = to_variance(masked_uncertainty)
return from_variance(this, axis=axis)
def _get_err_at_extremum(self, extremum, axis):
"""
Return the value of the ``uncertainty`` array at the indices
which satisfy the ``extremum`` function applied to the ``measurement`` array,
where we expect ``extremum`` to be np.argmax or np.argmin, and
we expect a two-dimensional output.
Assumes the ``measurement`` and ``uncertainty`` array dimensions
are ordered such that the zeroth dimension is the one to preserve.
For example, if you start with array with shape (a, b, c), this
function applies the ``extremum`` function to the last two dimensions,
with shapes b and c.
This operation is difficult to cast in a vectorized way. Here
we implement it with a list comprehension, which is likely not the
most performant solution.
"""
if axis is not None and not hasattr(axis, "__len__"):
# this is a single axis:
axis = [axis]
if extremum is np.min:
arg_extremum = np.ma.argmin
elif extremum is np.max:
arg_extremum = np.ma.argmax
all_axes = np.arange(self.array.ndim)
if axis is None:
# collapse over all dimensions
ind = arg_extremum(np.asanyarray(self.parent_nddata).ravel())
return self.array.ravel()[ind]
# collapse an ND array over arbitrary dimensions:
preserve_axes = [ax for ax in all_axes if ax not in axis]
meas = np.ma.masked_array(
_move_preserved_axes_first(self.parent_nddata.data, preserve_axes),
_move_preserved_axes_first(self.parent_nddata.mask, preserve_axes),
)
err = _move_preserved_axes_first(self.array, preserve_axes)
result = np.array(
[e[np.unravel_index(arg_extremum(m), m.shape)] for m, e in zip(meas, err)]
)
return _unravel_preserved_axes(
self.parent_nddata.data,
result,
preserve_axes,
)
def _propagate_add_sub(
self,
other_uncert,
result_data,
correlation,
subtract=False,
to_variance=lambda x: x,
from_variance=lambda x: x,
):
"""
Error propagation for addition or subtraction of variance or
variance-like uncertainties. Uncertainties are calculated using the
formulae for variance but can be used for uncertainty convertible to
a variance.
Parameters
----------
other_uncert : `~astropy.nddata.NDUncertainty` instance
The uncertainty, if any, of the other operand.
result_data : `~astropy.nddata.NDData` instance
The results of the operation on the data.
correlation : float or array-like
Correlation of the uncertainties.
subtract : bool, optional
If ``True``, propagate for subtraction, otherwise propagate for
addition.
to_variance : function, optional
Function that will transform the input uncertainties to variance.
The default assumes the uncertainty is the variance.
from_variance : function, optional
Function that will convert from variance to the input uncertainty.
The default assumes the uncertainty is the variance.
"""
if subtract:
correlation_sign = -1
else:
correlation_sign = 1
try:
result_unit_sq = result_data.unit**2
except AttributeError:
result_unit_sq = None
if other_uncert.array is not None:
# Formula: sigma**2 = dB
if other_uncert.unit is not None and result_unit_sq != to_variance(
other_uncert.unit
):
# If the other uncertainty has a unit and this unit differs
# from the unit of the result convert it to the results unit
other = (
to_variance(other_uncert.array << other_uncert.unit)
.to(result_unit_sq)
.value
)
else:
other = to_variance(other_uncert.array)
else:
other = 0
if self.array is not None:
# Formula: sigma**2 = dA
if (
self.unit is not None
and to_variance(self.unit) != self.parent_nddata.unit**2
):
# If the uncertainty has a different unit than the result we
# need to convert it to the results unit.
this = to_variance(self.array << self.unit).to(result_unit_sq).value
else:
this = to_variance(self.array)
else:
this = 0
# Formula: sigma**2 = dA + dB +/- 2*cor*sqrt(dA*dB)
# Formula: sigma**2 = sigma_other + sigma_self +/- 2*cor*sqrt(dA*dB)
# (sign depends on whether addition or subtraction)
# Determine the result depending on the correlation
if isinstance(correlation, np.ndarray) or correlation != 0:
corr = 2 * correlation * np.sqrt(this * other)
result = this + other + correlation_sign * corr
else:
result = this + other
return from_variance(result)
def _propagate_multiply_divide(
self,
other_uncert,
result_data,
correlation,
divide=False,
to_variance=lambda x: x,
from_variance=lambda x: x,
):
"""
Error propagation for multiplication or division of variance or
variance-like uncertainties. Uncertainties are calculated using the
formulae for variance but can be used for uncertainty convertible to
a variance.
Parameters
----------
other_uncert : `~astropy.nddata.NDUncertainty` instance
The uncertainty, if any, of the other operand.
result_data : `~astropy.nddata.NDData` instance
The results of the operation on the data.
correlation : float or array-like
Correlation of the uncertainties.
divide : bool, optional
If ``True``, propagate for division, otherwise propagate for
multiplication.
to_variance : function, optional
Function that will transform the input uncertainties to variance.
The default assumes the uncertainty is the variance.
from_variance : function, optional
Function that will convert from variance to the input uncertainty.
The default assumes the uncertainty is the variance.
"""
# For multiplication we don't need the result as quantity
if isinstance(result_data, Quantity):
result_data = result_data.value
if divide:
correlation_sign = -1
else:
correlation_sign = 1
if other_uncert.array is not None:
# We want the result to have a unit consistent with the parent, so
# we only need to convert the unit of the other uncertainty if it
# is different from its data's unit.
if (
other_uncert.unit
and to_variance(1 * other_uncert.unit)
!= ((1 * other_uncert.parent_nddata.unit) ** 2).unit
):
d_b = (
to_variance(other_uncert.array << other_uncert.unit)
.to((1 * other_uncert.parent_nddata.unit) ** 2)
.value
)
else:
d_b = to_variance(other_uncert.array)
# Formula: sigma**2 = |A|**2 * d_b
right = np.abs(self.parent_nddata.data**2 * d_b)
else:
right = 0
if self.array is not None:
# Just the reversed case
if (
self.unit
and to_variance(1 * self.unit)
!= ((1 * self.parent_nddata.unit) ** 2).unit
):
d_a = (
to_variance(self.array << self.unit)
.to((1 * self.parent_nddata.unit) ** 2)
.value
)
else:
d_a = to_variance(self.array)
# Formula: sigma**2 = |B|**2 * d_a
left = np.abs(other_uncert.parent_nddata.data**2 * d_a)
else:
left = 0
# Multiplication
#
# The fundamental formula is:
# sigma**2 = |AB|**2*(d_a/A**2+d_b/B**2+2*sqrt(d_a)/A*sqrt(d_b)/B*cor)
#
# This formula is not very handy since it generates NaNs for every
# zero in A and B. So we rewrite it:
#
# Multiplication Formula:
# sigma**2 = (d_a*B**2 + d_b*A**2 + (2 * cor * ABsqrt(dAdB)))
# sigma**2 = (left + right + (2 * cor * ABsqrt(dAdB)))
#
# Division
#
# The fundamental formula for division is:
# sigma**2 = |A/B|**2*(d_a/A**2+d_b/B**2-2*sqrt(d_a)/A*sqrt(d_b)/B*cor)
#
# As with multiplication, it is convenient to rewrite this to avoid
# nans where A is zero.
#
# Division formula (rewritten):
# sigma**2 = d_a/B**2 + (A/B)**2 * d_b/B**2
# - 2 * cor * A *sqrt(dAdB) / B**3
# sigma**2 = d_a/B**2 + (A/B)**2 * d_b/B**2
# - 2*cor * sqrt(d_a)/B**2 * sqrt(d_b) * A / B
# sigma**2 = multiplication formula/B**4 (and sign change in
# the correlation)
if isinstance(correlation, np.ndarray) or correlation != 0:
corr = (
2
* correlation
* np.sqrt(d_a * d_b)
* self.parent_nddata.data
* other_uncert.parent_nddata.data
)
else:
corr = 0
if divide:
return from_variance(
(left + right + correlation_sign * corr)
/ other_uncert.parent_nddata.data**4
)
else:
return from_variance(left + right + correlation_sign * corr)
class StdDevUncertainty(_VariancePropagationMixin, NDUncertainty):
"""Standard deviation uncertainty assuming first order gaussian error
propagation.
This class implements uncertainty propagation for ``addition``,
``subtraction``, ``multiplication`` and ``division`` with other instances
of `StdDevUncertainty`. The class can handle if the uncertainty has a
unit that differs from (but is convertible to) the parents `NDData` unit.
The unit of the resulting uncertainty will have the same unit as the
resulting data. Also support for correlation is possible but requires the
correlation as input. It cannot handle correlation determination itself.
Parameters
----------
args, kwargs :
see `NDUncertainty`
Examples
--------
`StdDevUncertainty` should always be associated with an `NDData`-like
instance, either by creating it during initialization::
>>> from astropy.nddata import NDData, StdDevUncertainty
>>> ndd = NDData([1,2,3], unit='m',
... uncertainty=StdDevUncertainty([0.1, 0.1, 0.1]))
>>> ndd.uncertainty # doctest: +FLOAT_CMP
StdDevUncertainty([0.1, 0.1, 0.1])
or by setting it manually on the `NDData` instance::
>>> ndd.uncertainty = StdDevUncertainty([0.2], unit='m', copy=True)
>>> ndd.uncertainty # doctest: +FLOAT_CMP
StdDevUncertainty([0.2])
the uncertainty ``array`` can also be set directly::
>>> ndd.uncertainty.array = 2
>>> ndd.uncertainty
StdDevUncertainty(2)
.. note::
The unit will not be displayed.
"""
@property
def supports_correlated(self):
"""`True` : `StdDevUncertainty` allows to propagate correlated \
uncertainties.
``correlation`` must be given, this class does not implement computing
it by itself.
"""
return True
@property
def uncertainty_type(self):
"""``"std"`` : `StdDevUncertainty` implements standard deviation."""
return "std"
def _convert_uncertainty(self, other_uncert):
if isinstance(other_uncert, StdDevUncertainty):
return other_uncert
else:
raise IncompatibleUncertaintiesException
def _propagate_add(self, other_uncert, result_data, correlation):
return super()._propagate_add_sub(
other_uncert,
result_data,
correlation,
subtract=False,
to_variance=np.square,
from_variance=np.sqrt,
)
def _propagate_subtract(self, other_uncert, result_data, correlation):
return super()._propagate_add_sub(
other_uncert,
result_data,
correlation,
subtract=True,
to_variance=np.square,
from_variance=np.sqrt,
)
def _propagate_multiply(self, other_uncert, result_data, correlation):
return super()._propagate_multiply_divide(
other_uncert,
result_data,
correlation,
divide=False,
to_variance=np.square,
from_variance=np.sqrt,
)
def _propagate_divide(self, other_uncert, result_data, correlation):
return super()._propagate_multiply_divide(
other_uncert,
result_data,
correlation,
divide=True,
to_variance=np.square,
from_variance=np.sqrt,
)
def _propagate_collapse(self, numpy_operation, axis):
# defer to _VariancePropagationMixin
return super()._propagate_collapse(numpy_operation, axis=axis)
def _data_unit_to_uncertainty_unit(self, value):
return value
def _convert_to_variance(self):
new_array = None if self.array is None else self.array**2
new_unit = None if self.unit is None else self.unit**2
return VarianceUncertainty(new_array, unit=new_unit)
@classmethod
def _convert_from_variance(cls, var_uncert):
new_array = None if var_uncert.array is None else var_uncert.array ** (1 / 2)
new_unit = None if var_uncert.unit is None else var_uncert.unit ** (1 / 2)
return cls(new_array, unit=new_unit)
class VarianceUncertainty(_VariancePropagationMixin, NDUncertainty):
"""
Variance uncertainty assuming first order Gaussian error
propagation.
This class implements uncertainty propagation for ``addition``,
``subtraction``, ``multiplication`` and ``division`` with other instances
of `VarianceUncertainty`. The class can handle if the uncertainty has a
unit that differs from (but is convertible to) the parents `NDData` unit.
The unit of the resulting uncertainty will be the square of the unit of the
resulting data. Also support for correlation is possible but requires the
correlation as input. It cannot handle correlation determination itself.
Parameters
----------
args, kwargs :
see `NDUncertainty`
Examples
--------
Compare this example to that in `StdDevUncertainty`; the uncertainties
in the examples below are equivalent to the uncertainties in
`StdDevUncertainty`.
`VarianceUncertainty` should always be associated with an `NDData`-like
instance, either by creating it during initialization::
>>> from astropy.nddata import NDData, VarianceUncertainty
>>> ndd = NDData([1,2,3], unit='m',
... uncertainty=VarianceUncertainty([0.01, 0.01, 0.01]))
>>> ndd.uncertainty # doctest: +FLOAT_CMP
VarianceUncertainty([0.01, 0.01, 0.01])
or by setting it manually on the `NDData` instance::
>>> ndd.uncertainty = VarianceUncertainty([0.04], unit='m^2', copy=True)
>>> ndd.uncertainty # doctest: +FLOAT_CMP
VarianceUncertainty([0.04])
the uncertainty ``array`` can also be set directly::
>>> ndd.uncertainty.array = 4
>>> ndd.uncertainty
VarianceUncertainty(4)
.. note::
The unit will not be displayed.
"""
@property
def uncertainty_type(self):
"""``"var"`` : `VarianceUncertainty` implements variance."""
return "var"
@property
def supports_correlated(self):
"""`True` : `VarianceUncertainty` allows to propagate correlated \
uncertainties.
``correlation`` must be given, this class does not implement computing
it by itself.
"""
return True
def _propagate_add(self, other_uncert, result_data, correlation):
return super()._propagate_add_sub(
other_uncert, result_data, correlation, subtract=False
)
def _propagate_subtract(self, other_uncert, result_data, correlation):
return super()._propagate_add_sub(
other_uncert, result_data, correlation, subtract=True
)
def _propagate_multiply(self, other_uncert, result_data, correlation):
return super()._propagate_multiply_divide(
other_uncert, result_data, correlation, divide=False
)
def _propagate_divide(self, other_uncert, result_data, correlation):
return super()._propagate_multiply_divide(
other_uncert, result_data, correlation, divide=True
)
def _data_unit_to_uncertainty_unit(self, value):
return value**2
def _convert_to_variance(self):
return self
@classmethod
def _convert_from_variance(cls, var_uncert):
return var_uncert
def _inverse(x):
"""Just a simple inverse for use in the InverseVariance."""
return 1 / x
class InverseVariance(_VariancePropagationMixin, NDUncertainty):
"""
Inverse variance uncertainty assuming first order Gaussian error
propagation.
This class implements uncertainty propagation for ``addition``,
``subtraction``, ``multiplication`` and ``division`` with other instances
of `InverseVariance`. The class can handle if the uncertainty has a unit
that differs from (but is convertible to) the parents `NDData` unit. The
unit of the resulting uncertainty will the inverse square of the unit of
the resulting data. Also support for correlation is possible but requires
the correlation as input. It cannot handle correlation determination
itself.
Parameters
----------
args, kwargs :
see `NDUncertainty`
Examples
--------
Compare this example to that in `StdDevUncertainty`; the uncertainties
in the examples below are equivalent to the uncertainties in
`StdDevUncertainty`.
`InverseVariance` should always be associated with an `NDData`-like
instance, either by creating it during initialization::
>>> from astropy.nddata import NDData, InverseVariance
>>> ndd = NDData([1,2,3], unit='m',
... uncertainty=InverseVariance([100, 100, 100]))
>>> ndd.uncertainty # doctest: +FLOAT_CMP
InverseVariance([100, 100, 100])
or by setting it manually on the `NDData` instance::
>>> ndd.uncertainty = InverseVariance([25], unit='1/m^2', copy=True)
>>> ndd.uncertainty # doctest: +FLOAT_CMP
InverseVariance([25])
the uncertainty ``array`` can also be set directly::
>>> ndd.uncertainty.array = 0.25
>>> ndd.uncertainty
InverseVariance(0.25)
.. note::
The unit will not be displayed.
"""
@property
def uncertainty_type(self):
"""``"ivar"`` : `InverseVariance` implements inverse variance."""
return "ivar"
@property
def supports_correlated(self):
"""`True` : `InverseVariance` allows to propagate correlated \
uncertainties.
``correlation`` must be given, this class does not implement computing
it by itself.
"""
return True
def _propagate_add(self, other_uncert, result_data, correlation):
return super()._propagate_add_sub(
other_uncert,
result_data,
correlation,
subtract=False,
to_variance=_inverse,
from_variance=_inverse,
)
def _propagate_subtract(self, other_uncert, result_data, correlation):
return super()._propagate_add_sub(
other_uncert,
result_data,
correlation,
subtract=True,
to_variance=_inverse,
from_variance=_inverse,
)
def _propagate_multiply(self, other_uncert, result_data, correlation):
return super()._propagate_multiply_divide(
other_uncert,
result_data,
correlation,
divide=False,
to_variance=_inverse,
from_variance=_inverse,
)
def _propagate_divide(self, other_uncert, result_data, correlation):
return super()._propagate_multiply_divide(
other_uncert,
result_data,
correlation,
divide=True,
to_variance=_inverse,
from_variance=_inverse,
)
def _data_unit_to_uncertainty_unit(self, value):
return 1 / value**2
def _convert_to_variance(self):
new_array = None if self.array is None else 1 / self.array
new_unit = None if self.unit is None else 1 / self.unit
return VarianceUncertainty(new_array, unit=new_unit)
@classmethod
def _convert_from_variance(cls, var_uncert):
new_array = None if var_uncert.array is None else 1 / var_uncert.array
new_unit = None if var_uncert.unit is None else 1 / var_uncert.unit
return cls(new_array, unit=new_unit)
|
1c2630d394875365d5f299686d32a3fe3eb39557f9feb59adf557685b1ece835 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# This module implements the base NDData class.
from copy import deepcopy
import numpy as np
from astropy import log
from astropy.units import Quantity, Unit
from astropy.utils.masked import Masked, MaskedNDArray
from astropy.utils.metadata import MetaData
from astropy.wcs.wcsapi import SlicedLowLevelWCS # noqa: F401
from astropy.wcs.wcsapi import BaseHighLevelWCS, BaseLowLevelWCS, HighLevelWCSWrapper
from .nddata_base import NDDataBase
from .nduncertainty import NDUncertainty, UnknownUncertainty
__all__ = ["NDData"]
_meta_doc = """`dict`-like : Additional meta information about the dataset."""
class NDData(NDDataBase):
"""
A container for `numpy.ndarray`-based datasets, using the
`~astropy.nddata.NDDataBase` interface.
The key distinction from raw `numpy.ndarray` is the presence of
additional metadata such as uncertainty, mask, unit, a coordinate system
and/or a dictionary containing further meta information. This class *only*
provides a container for *storing* such datasets. For further functionality
take a look at the ``See also`` section.
See also: https://docs.astropy.org/en/stable/nddata/
Parameters
----------
data : `numpy.ndarray`-like or `NDData`-like
The dataset.
uncertainty : any type, optional
Uncertainty in the dataset.
Should have an attribute ``uncertainty_type`` that defines what kind of
uncertainty is stored, for example ``"std"`` for standard deviation or
``"var"`` for variance. A metaclass defining such an interface is
`NDUncertainty` - but isn't mandatory. If the uncertainty has no such
attribute the uncertainty is stored as `UnknownUncertainty`.
Defaults to ``None``.
mask : any type, optional
Mask for the dataset. Masks should follow the ``numpy`` convention that
**valid** data points are marked by ``False`` and **invalid** ones with
``True``.
Defaults to ``None``.
wcs : any type, optional
World coordinate system (WCS) for the dataset.
Default is ``None``.
meta : `dict`-like object, optional
Additional meta information about the dataset. If no meta is provided
an empty `collections.OrderedDict` is created.
Default is ``None``.
unit : unit-like, optional
Unit for the dataset. Strings that can be converted to a
`~astropy.units.Unit` are allowed.
Default is ``None``.
copy : `bool`, optional
Indicates whether to save the arguments as copy. ``True`` copies
every attribute before saving it while ``False`` tries to save every
parameter as reference.
Note however that it is not always possible to save the input as
reference.
Default is ``False``.
.. versionadded:: 1.2
psf : `numpy.ndarray` or None, optional
Image representation of the PSF. In order for convolution to be flux-
preserving, this should generally be normalized to sum to unity.
Raises
------
TypeError
In case ``data`` or ``meta`` don't meet the restrictions.
Notes
-----
Each attribute can be accessed through the homonymous instance attribute:
``data`` in a `NDData` object can be accessed through the `data`
attribute::
>>> from astropy.nddata import NDData
>>> nd = NDData([1,2,3])
>>> nd.data
array([1, 2, 3])
Given a conflicting implicit and an explicit parameter during
initialization, for example the ``data`` is a `~astropy.units.Quantity` and
the unit parameter is not ``None``, then the implicit parameter is replaced
(without conversion) by the explicit one and a warning is issued::
>>> import numpy as np
>>> import astropy.units as u
>>> q = np.array([1,2,3,4]) * u.m
>>> nd2 = NDData(q, unit=u.cm)
INFO: overwriting Quantity's current unit with specified unit. [astropy.nddata.nddata]
>>> nd2.data # doctest: +FLOAT_CMP
array([100., 200., 300., 400.])
>>> nd2.unit
Unit("cm")
See Also
--------
NDDataRef
NDDataArray
"""
# Instead of a custom property use the MetaData descriptor also used for
# Tables. It will check if the meta is dict-like or raise an exception.
meta = MetaData(doc=_meta_doc, copy=False)
def __init__(
self,
data,
uncertainty=None,
mask=None,
wcs=None,
meta=None,
unit=None,
copy=False,
psf=None,
):
# Rather pointless since the NDDataBase does not implement any setting
# but before the NDDataBase did call the uncertainty
# setter. But if anyone wants to alter this behavior again the call
# to the superclass NDDataBase should be in here.
super().__init__()
# Check if data is any type from which to collect some implicitly
# passed parameters.
if isinstance(data, NDData): # don't use self.__class__ (issue #4137)
# Of course we need to check the data because subclasses with other
# init-logic might be passed in here. We could skip these
# tests if we compared for self.__class__ but that has other
# drawbacks.
# Comparing if there is an explicit and an implicit unit parameter.
# If that is the case use the explicit one and issue a warning
# that there might be a conflict. In case there is no explicit
# unit just overwrite the unit parameter with the NDData.unit
# and proceed as if that one was given as parameter. Same for the
# other parameters.
if unit is None and data.unit is not None:
unit = data.unit
elif unit is not None and data.unit is not None:
log.info("overwriting NDData's current unit with specified unit.")
if uncertainty is not None and data.uncertainty is not None:
log.info(
"overwriting NDData's current "
"uncertainty with specified uncertainty."
)
elif data.uncertainty is not None:
uncertainty = data.uncertainty
if mask is not None and data.mask is not None:
log.info("overwriting NDData's current mask with specified mask.")
elif data.mask is not None:
mask = data.mask
if wcs is not None and data.wcs is not None:
log.info("overwriting NDData's current wcs with specified wcs.")
elif data.wcs is not None:
wcs = data.wcs
if psf is not None and data.psf is not None:
log.info("Overwriting NDData's current psf with specified psf.")
elif data.psf is not None:
psf = data.psf
if meta is not None and data.meta is not None:
log.info("overwriting NDData's current meta with specified meta.")
elif data.meta is not None:
meta = data.meta
# get the data attribute as it is, and continue to process it:
data = data.data
# if the data is wrapped by astropy.utils.masked.Masked:
if isinstance(data, Masked):
# first get the mask if one is available:
if hasattr(data, "mask"):
if mask is not None:
log.info(
"overwriting Masked Quantity's current mask with specified mask."
)
else:
mask = data.mask
if isinstance(data, MaskedNDArray):
if unit is not None and hasattr(data, "unit") and data.unit != unit:
log.info(
"overwriting MaskedNDArray's current unit with specified unit."
)
data = data.to(unit).value
elif unit is None and hasattr(data, "unit"):
unit = data.unit
data = data.value
# now get the unmasked ndarray:
data = np.asarray(data)
if isinstance(data, Quantity):
# this is a Quantity:
if unit is not None and data.unit != unit:
log.info("overwriting Quantity's current unit with specified unit.")
data = data.to(unit)
elif unit is None and data.unit is not None:
unit = data.unit
data = data.value
if isinstance(data, np.ma.masked_array):
if mask is not None:
log.info(
"overwriting masked ndarray's current mask with specified mask."
)
else:
mask = data.mask
data = data.data
if isinstance(data, Quantity):
# this is a Quantity:
if unit is not None and data.unit != unit:
log.info("overwriting Quantity's current unit with specified unit.")
data = data.to(unit)
elif unit is None and data.unit is not None:
unit = data.unit
data = data.value
if isinstance(data, np.ndarray):
# check for mask from np.ma.masked_ndarray
if hasattr(data, "mask"):
if mask is not None:
log.info(
"overwriting masked ndarray's current mask with specified mask."
)
else:
mask = data.mask
# Quick check on the parameters if they match the requirements.
if (
not hasattr(data, "shape")
or not hasattr(data, "__getitem__")
or not hasattr(data, "__array__")
):
# Data doesn't look like a numpy array, try converting it to
# one.
data = np.array(data, subok=True, copy=False)
# Another quick check to see if what we got looks like an array
# rather than an object (since numpy will convert a
# non-numerical/non-string inputs to an array of objects).
if data.dtype == "O":
raise TypeError("could not convert data to numpy array.")
if unit is not None:
unit = Unit(unit)
if copy:
# Data might have been copied before but no way of validating
# without another variable.
data = deepcopy(data)
mask = deepcopy(mask)
wcs = deepcopy(wcs)
psf = deepcopy(psf)
meta = deepcopy(meta)
uncertainty = deepcopy(uncertainty)
# Actually - copying the unit is unnecessary but better safe
# than sorry :-)
unit = deepcopy(unit)
# Store the attributes
self._data = data
self.mask = mask
self._wcs = None
if wcs is not None:
# Validate the wcs
self.wcs = wcs
self.meta = meta # TODO: Make this call the setter sometime
self._unit = unit
# Call the setter for uncertainty to further check the uncertainty
self.uncertainty = uncertainty
self.psf = psf
def __str__(self):
data = str(self.data)
unit = f" {self.unit}" if self.unit is not None else ""
return data + unit
def __repr__(self):
prefix = self.__class__.__name__ + "("
data = np.array2string(self.data, separator=", ", prefix=prefix)
unit = f", unit='{self.unit}'" if self.unit is not None else ""
return "".join((prefix, data, unit, ")"))
@property
def data(self):
"""
`~numpy.ndarray`-like : The stored dataset.
"""
return self._data
@property
def mask(self):
"""
any type : Mask for the dataset, if any.
Masks should follow the ``numpy`` convention that valid data points are
marked by ``False`` and invalid ones with ``True``.
"""
return self._mask
@mask.setter
def mask(self, value):
self._mask = value
@property
def unit(self):
"""
`~astropy.units.Unit` : Unit for the dataset, if any.
"""
return self._unit
@property
def wcs(self):
"""
any type : A world coordinate system (WCS) for the dataset, if any.
"""
return self._wcs
@wcs.setter
def wcs(self, wcs):
if self._wcs is not None and wcs is not None:
raise ValueError(
"You can only set the wcs attribute with a WCS if no WCS is present."
)
if wcs is None or isinstance(wcs, BaseHighLevelWCS):
self._wcs = wcs
elif isinstance(wcs, BaseLowLevelWCS):
self._wcs = HighLevelWCSWrapper(wcs)
else:
raise TypeError(
"The wcs argument must implement either the high or low level WCS API."
)
@property
def psf(self):
return self._psf
@psf.setter
def psf(self, value):
self._psf = value
@property
def uncertainty(self):
"""
any type : Uncertainty in the dataset, if any.
Should have an attribute ``uncertainty_type`` that defines what kind of
uncertainty is stored, such as ``'std'`` for standard deviation or
``'var'`` for variance. A metaclass defining such an interface is
`~astropy.nddata.NDUncertainty` but isn't mandatory.
"""
return self._uncertainty
@uncertainty.setter
def uncertainty(self, value):
if value is not None:
# There is one requirements on the uncertainty: That
# it has an attribute 'uncertainty_type'.
# If it does not match this requirement convert it to an unknown
# uncertainty.
if not hasattr(value, "uncertainty_type"):
log.info("uncertainty should have attribute uncertainty_type.")
value = UnknownUncertainty(value, copy=False)
# If it is a subclass of NDUncertainty we must set the
# parent_nddata attribute. (#4152)
if isinstance(value, NDUncertainty):
# In case the uncertainty already has a parent create a new
# instance because we need to assume that we don't want to
# steal the uncertainty from another NDData object
if value._parent_nddata is not None:
value = value.__class__(value, copy=False)
# Then link it to this NDData instance (internally this needs
# to be saved as weakref but that's done by NDUncertainty
# setter).
value.parent_nddata = self
self._uncertainty = value
|
65ea3d216059e45bf0be4b04c4807147676494f344b9a2c033ed115d4592386a | """
A module that provides functions for manipulating bit masks and data quality
(DQ) arrays.
"""
import numbers
import warnings
from collections import OrderedDict
import numpy as np
__all__ = [
"bitfield_to_boolean_mask",
"interpret_bit_flags",
"BitFlagNameMap",
"extend_bit_flag_map",
"InvalidBitFlag",
]
_ENABLE_BITFLAG_CACHING = True
_MAX_UINT_TYPE = np.maximum_sctype(np.uint)
_SUPPORTED_FLAGS = int(np.bitwise_not(0, dtype=_MAX_UINT_TYPE, casting="unsafe"))
def _is_bit_flag(n):
"""
Verifies if the input number is a bit flag (i.e., an integer number that is
an integer power of 2).
Parameters
----------
n : int
A positive integer number. Non-positive integers are considered not to
be "flags".
Returns
-------
bool
``True`` if input ``n`` is a bit flag and ``False`` if it is not.
"""
if n < 1:
return False
return bin(n).count("1") == 1
def _is_int(n):
return (isinstance(n, numbers.Integral) and not isinstance(n, bool)) or (
isinstance(n, np.generic) and np.issubdtype(n, np.integer)
)
class InvalidBitFlag(ValueError):
"""Indicates that a value is not an integer that is a power of 2."""
pass
class BitFlag(int):
"""Bit flags: integer values that are powers of 2."""
def __new__(cls, val, doc=None):
if isinstance(val, tuple):
if doc is not None:
raise ValueError("Flag's doc string cannot be provided twice.")
val, doc = val
if not (_is_int(val) and _is_bit_flag(val)):
raise InvalidBitFlag(
"Value '{}' is not a valid bit flag: bit flag value must be "
"an integral power of two.".format(val)
)
s = int.__new__(cls, val)
if doc is not None:
s.__doc__ = doc
return s
class BitFlagNameMeta(type):
def __new__(mcls, name, bases, members):
for k, v in members.items():
if not k.startswith("_"):
v = BitFlag(v)
attr = [k for k in members.keys() if not k.startswith("_")]
attrl = list(map(str.lower, attr))
if _ENABLE_BITFLAG_CACHING:
cache = OrderedDict()
for b in bases:
for k, v in b.__dict__.items():
if k.startswith("_"):
continue
kl = k.lower()
if kl in attrl:
idx = attrl.index(kl)
raise AttributeError(
f"Bit flag '{attr[idx]:s}' was already defined."
)
if _ENABLE_BITFLAG_CACHING:
cache[kl] = v
members = {
k: v if k.startswith("_") else BitFlag(v) for k, v in members.items()
}
if _ENABLE_BITFLAG_CACHING:
cache.update(
{k.lower(): v for k, v in members.items() if not k.startswith("_")}
)
members = {"_locked": True, "__version__": "", **members, "_cache": cache}
else:
members = {"_locked": True, "__version__": "", **members}
return super().__new__(mcls, name, bases, members)
def __setattr__(cls, name, val):
if name == "_locked":
return super().__setattr__(name, True)
else:
if name == "__version__":
if cls._locked:
raise AttributeError("Version cannot be modified.")
return super().__setattr__(name, val)
err_msg = f"Bit flags are read-only. Unable to reassign attribute {name}"
if cls._locked:
raise AttributeError(err_msg)
namel = name.lower()
if _ENABLE_BITFLAG_CACHING:
if not namel.startswith("_") and namel in cls._cache:
raise AttributeError(err_msg)
else:
for b in cls.__bases__:
if not namel.startswith("_") and namel in list(
map(str.lower, b.__dict__)
):
raise AttributeError(err_msg)
if namel in list(map(str.lower, cls.__dict__)):
raise AttributeError(err_msg)
val = BitFlag(val)
if _ENABLE_BITFLAG_CACHING and not namel.startswith("_"):
cls._cache[namel] = val
return super().__setattr__(name, val)
def __getattr__(cls, name):
if _ENABLE_BITFLAG_CACHING:
flagnames = cls._cache
else:
flagnames = {k.lower(): v for k, v in cls.__dict__.items()}
flagnames.update(
{k.lower(): v for b in cls.__bases__ for k, v in b.__dict__.items()}
)
try:
return flagnames[name.lower()]
except KeyError:
raise AttributeError(f"Flag '{name}' not defined")
def __getitem__(cls, key):
return cls.__getattr__(key)
def __add__(cls, items):
if not isinstance(items, dict):
if not isinstance(items[0], (tuple, list)):
items = [items]
items = dict(items)
return extend_bit_flag_map(
cls.__name__ + "_" + "_".join([k for k in items]), cls, **items
)
def __iadd__(cls, other):
raise NotImplementedError(
"Unary '+' is not supported. Use binary operator instead."
)
def __delattr__(cls, name):
raise AttributeError(
f"{cls.__name__}: cannot delete {cls.mro()[-2].__name__} member."
)
def __delitem__(cls, name):
raise AttributeError(
f"{cls.__name__}: cannot delete {cls.mro()[-2].__name__} member."
)
def __repr__(cls):
return f"<{cls.mro()[-2].__name__} '{cls.__name__}'>"
class BitFlagNameMap(metaclass=BitFlagNameMeta):
"""
A base class for bit flag name maps used to describe data quality (DQ)
flags of images by provinding a mapping from a mnemonic flag name to a flag
value.
Mapping for a specific instrument should subclass this class.
Subclasses should define flags as class attributes with integer values
that are powers of 2. Each bit flag may also contain a string
comment following the flag value.
Examples
--------
>>> from astropy.nddata.bitmask import BitFlagNameMap
>>> class ST_DQ(BitFlagNameMap):
... __version__ = '1.0.0' # optional
... CR = 1, 'Cosmic Ray'
... CLOUDY = 4 # no docstring comment
... RAINY = 8, 'Dome closed'
...
>>> class ST_CAM1_DQ(ST_DQ):
... HOT = 16
... DEAD = 32
"""
pass
def extend_bit_flag_map(cls_name, base_cls=BitFlagNameMap, **kwargs):
"""
A convenience function for creating bit flags maps by subclassing an
existing map and adding additional flags supplied as keyword arguments.
Parameters
----------
cls_name : str
Class name of the bit flag map to be created.
base_cls : BitFlagNameMap, optional
Base class for the new bit flag map.
**kwargs : int
Each supplied keyword argument will be used to define bit flag
names in the new map. In addition to bit flag names, ``__version__`` is
allowed to indicate the version of the newly created map.
Examples
--------
>>> from astropy.nddata.bitmask import extend_bit_flag_map
>>> ST_DQ = extend_bit_flag_map('ST_DQ', __version__='1.0.0', CR=1, CLOUDY=4, RAINY=8)
>>> ST_CAM1_DQ = extend_bit_flag_map('ST_CAM1_DQ', ST_DQ, HOT=16, DEAD=32)
>>> ST_CAM1_DQ['HOT'] # <-- Access flags as dictionary keys
16
>>> ST_CAM1_DQ.HOT # <-- Access flags as class attributes
16
"""
new_cls = BitFlagNameMeta.__new__(
BitFlagNameMeta, cls_name, (base_cls,), {"_locked": False}
)
for k, v in kwargs.items():
try:
setattr(new_cls, k, v)
except AttributeError as e:
if new_cls[k] != int(v):
raise e
new_cls._locked = True
return new_cls
def interpret_bit_flags(bit_flags, flip_bits=None, flag_name_map=None):
"""
Converts input bit flags to a single integer value (bit mask) or `None`.
When input is a list of flags (either a Python list of integer flags or a
string of comma-, ``'|'``-, or ``'+'``-separated list of flags),
the returned bit mask is obtained by summing input flags.
.. note::
In order to flip the bits of the returned bit mask,
for input of `str` type, prepend '~' to the input string. '~' must
be prepended to the *entire string* and not to each bit flag! For
input that is already a bit mask or a Python list of bit flags, set
``flip_bits`` for `True` in order to flip the bits of the returned
bit mask.
Parameters
----------
bit_flags : int, str, list, None
An integer bit mask or flag, `None`, a string of comma-, ``'|'``- or
``'+'``-separated list of integer bit flags or mnemonic flag names,
or a Python list of integer bit flags. If ``bit_flags`` is a `str`
and if it is prepended with '~', then the output bit mask will have
its bits flipped (compared to simple sum of input flags).
For input ``bit_flags`` that is already a bit mask or a Python list
of bit flags, bit-flipping can be controlled through ``flip_bits``
parameter.
.. note::
When ``bit_flags`` is a list of flag names, the ``flag_name_map``
parameter must be provided.
.. note::
Only one flag separator is supported at a time. ``bit_flags``
string should not mix ``','``, ``'+'``, and ``'|'`` separators.
flip_bits : bool, None
Indicates whether or not to flip the bits of the returned bit mask
obtained from input bit flags. This parameter must be set to `None`
when input ``bit_flags`` is either `None` or a Python list of flags.
flag_name_map : BitFlagNameMap
A `BitFlagNameMap` object that provides mapping from mnemonic
bit flag names to integer bit values in order to translate mnemonic
flags to numeric values when ``bit_flags`` that are comma- or
'+'-separated list of menmonic bit flag names.
Returns
-------
bitmask : int or None
Returns an integer bit mask formed from the input bit value or `None`
if input ``bit_flags`` parameter is `None` or an empty string.
If input string value was prepended with '~' (or ``flip_bits`` was set
to `True`), then returned value will have its bits flipped
(inverse mask).
Examples
--------
>>> from astropy.nddata.bitmask import interpret_bit_flags, extend_bit_flag_map
>>> ST_DQ = extend_bit_flag_map('ST_DQ', CR=1, CLOUDY=4, RAINY=8, HOT=16, DEAD=32)
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags(28))
'0000000000011100'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags('4,8,16'))
'0000000000011100'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags('CLOUDY,RAINY,HOT', flag_name_map=ST_DQ))
'0000000000011100'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags('~4,8,16'))
'1111111111100011'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags('~(4+8+16)'))
'1111111111100011'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags('~(CLOUDY+RAINY+HOT)',
... flag_name_map=ST_DQ))
'1111111111100011'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags([4, 8, 16]))
'0000000000011100'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags([4, 8, 16], flip_bits=True))
'1111111111100011'
"""
has_flip_bits = flip_bits is not None
flip_bits = bool(flip_bits)
allow_non_flags = False
if _is_int(bit_flags):
return ~int(bit_flags) if flip_bits else int(bit_flags)
elif bit_flags is None:
if has_flip_bits:
raise TypeError(
"Keyword argument 'flip_bits' must be set to 'None' when "
"input 'bit_flags' is None."
)
return None
elif isinstance(bit_flags, str):
if has_flip_bits:
raise TypeError(
"Keyword argument 'flip_bits' is not permitted for "
"comma-separated string lists of bit flags. Prepend '~' to "
"the string to indicate bit-flipping."
)
bit_flags = str(bit_flags).strip()
if bit_flags.upper() in ["", "NONE", "INDEF"]:
return None
# check whether bitwise-NOT is present and if it is, check that it is
# in the first position:
bitflip_pos = bit_flags.find("~")
if bitflip_pos == 0:
flip_bits = True
bit_flags = bit_flags[1:].lstrip()
else:
if bitflip_pos > 0:
raise ValueError("Bitwise-NOT must precede bit flag list.")
flip_bits = False
# basic check for correct use of parenthesis:
while True:
nlpar = bit_flags.count("(")
nrpar = bit_flags.count(")")
if nlpar == 0 and nrpar == 0:
break
if nlpar != nrpar:
raise ValueError("Unbalanced parentheses in bit flag list.")
lpar_pos = bit_flags.find("(")
rpar_pos = bit_flags.rfind(")")
if lpar_pos > 0 or rpar_pos < (len(bit_flags) - 1):
raise ValueError(
"Incorrect syntax (incorrect use of parenthesis) in bit flag list."
)
bit_flags = bit_flags[1:-1].strip()
if sum(k in bit_flags for k in "+,|") > 1:
raise ValueError(
"Only one type of bit flag separator may be used in one "
"expression. Allowed separators are: '+', '|', or ','."
)
if "," in bit_flags:
bit_flags = bit_flags.split(",")
elif "+" in bit_flags:
bit_flags = bit_flags.split("+")
elif "|" in bit_flags:
bit_flags = bit_flags.split("|")
else:
if bit_flags == "":
raise ValueError(
"Empty bit flag lists not allowed when either bitwise-NOT "
"or parenthesis are present."
)
bit_flags = [bit_flags]
if flag_name_map is not None:
try:
int(bit_flags[0])
except ValueError:
bit_flags = [flag_name_map[f] for f in bit_flags]
allow_non_flags = len(bit_flags) == 1
elif hasattr(bit_flags, "__iter__"):
if not all(_is_int(flag) for flag in bit_flags):
if flag_name_map is not None and all(
isinstance(flag, str) for flag in bit_flags
):
bit_flags = [flag_name_map[f] for f in bit_flags]
else:
raise TypeError(
"Every bit flag in a list must be either an "
"integer flag value or a 'str' flag name."
)
else:
raise TypeError("Unsupported type for argument 'bit_flags'.")
bitset = set(map(int, bit_flags))
if len(bitset) != len(bit_flags):
warnings.warn("Duplicate bit flags will be ignored")
bitmask = 0
for v in bitset:
if not _is_bit_flag(v) and not allow_non_flags:
raise ValueError(
f"Input list contains invalid (not powers of two) bit flag: {v}"
)
bitmask += v
if flip_bits:
bitmask = ~bitmask
return bitmask
def bitfield_to_boolean_mask(
bitfield,
ignore_flags=0,
flip_bits=None,
good_mask_value=False,
dtype=np.bool_,
flag_name_map=None,
):
"""
bitfield_to_boolean_mask(bitfield, ignore_flags=None, flip_bits=None, \
good_mask_value=False, dtype=numpy.bool_)
Converts an array of bit fields to a boolean (or integer) mask array
according to a bit mask constructed from the supplied bit flags (see
``ignore_flags`` parameter).
This function is particularly useful to convert data quality arrays to
boolean masks with selective filtering of DQ flags.
Parameters
----------
bitfield : ndarray
An array of bit flags. By default, values different from zero are
interpreted as "bad" values and values equal to zero are considered
as "good" values. However, see ``ignore_flags`` parameter on how to
selectively ignore some bits in the ``bitfield`` array data.
ignore_flags : int, str, list, None (default = 0)
An integer bit mask, `None`, a Python list of bit flags, a comma-,
or ``'|'``-separated, ``'+'``-separated string list of integer
bit flags or mnemonic flag names that indicate what bits in the input
``bitfield`` should be *ignored* (i.e., zeroed), or `None`.
.. note::
When ``bit_flags`` is a list of flag names, the ``flag_name_map``
parameter must be provided.
| Setting ``ignore_flags`` to `None` effectively will make
`bitfield_to_boolean_mask` interpret all ``bitfield`` elements
as "good" regardless of their value.
| When ``ignore_flags`` argument is an integer bit mask, it will be
combined using bitwise-NOT and bitwise-AND with each element of the
input ``bitfield`` array (``~ignore_flags & bitfield``). If the
resultant bitfield element is non-zero, that element will be
interpreted as a "bad" in the output boolean mask and it will be
interpreted as "good" otherwise. ``flip_bits`` parameter may be used
to flip the bits (``bitwise-NOT``) of the bit mask thus effectively
changing the meaning of the ``ignore_flags`` parameter from "ignore"
to "use only" these flags.
.. note::
Setting ``ignore_flags`` to 0 effectively will assume that all
non-zero elements in the input ``bitfield`` array are to be
interpreted as "bad".
| When ``ignore_flags`` argument is a Python list of integer bit
flags, these flags are added together to create an integer bit mask.
Each item in the list must be a flag, i.e., an integer that is an
integer power of 2. In order to flip the bits of the resultant
bit mask, use ``flip_bits`` parameter.
| Alternatively, ``ignore_flags`` may be a string of comma- or
``'+'``(or ``'|'``)-separated list of integer bit flags that should
be added (bitwise OR) together to create an integer bit mask.
For example, both ``'4,8'``, ``'4|8'``, and ``'4+8'`` are equivalent
and indicate that bit flags 4 and 8 in the input ``bitfield``
array should be ignored when generating boolean mask.
.. note::
``'None'``, ``'INDEF'``, and empty (or all white space) strings
are special values of string ``ignore_flags`` that are
interpreted as `None`.
.. note::
Each item in the list must be a flag, i.e., an integer that is an
integer power of 2. In addition, for convenience, an arbitrary
**single** integer is allowed and it will be interpreted as an
integer bit mask. For example, instead of ``'4,8'`` one could
simply provide string ``'12'``.
.. note::
Only one flag separator is supported at a time. ``ignore_flags``
string should not mix ``','``, ``'+'``, and ``'|'`` separators.
.. note::
When ``ignore_flags`` is a `str` and when it is prepended with
'~', then the meaning of ``ignore_flags`` parameters will be
reversed: now it will be interpreted as a list of bit flags to be
*used* (or *not ignored*) when deciding which elements of the
input ``bitfield`` array are "bad". Following this convention,
an ``ignore_flags`` string value of ``'~0'`` would be equivalent
to setting ``ignore_flags=None``.
.. warning::
Because prepending '~' to a string ``ignore_flags`` is equivalent
to setting ``flip_bits`` to `True`, ``flip_bits`` cannot be used
with string ``ignore_flags`` and it must be set to `None`.
flip_bits : bool, None (default = None)
Specifies whether or not to invert the bits of the bit mask either
supplied directly through ``ignore_flags`` parameter or built from the
bit flags passed through ``ignore_flags`` (only when bit flags are
passed as Python lists of integer bit flags). Occasionally, it may be
useful to *consider only specific bit flags* in the ``bitfield``
array when creating a boolean mask as opposed to *ignoring* specific
bit flags as ``ignore_flags`` behaves by default. This can be achieved
by inverting/flipping the bits of the bit mask created from
``ignore_flags`` flags which effectively changes the meaning of the
``ignore_flags`` parameter from "ignore" to "use only" these flags.
Setting ``flip_bits`` to `None` means that no bit flipping will be
performed. Bit flipping for string lists of bit flags must be
specified by prepending '~' to string bit flag lists
(see documentation for ``ignore_flags`` for more details).
.. warning::
This parameter can be set to either `True` or `False` **ONLY** when
``ignore_flags`` is either an integer bit mask or a Python
list of integer bit flags. When ``ignore_flags`` is either
`None` or a string list of flags, ``flip_bits`` **MUST** be set
to `None`.
good_mask_value : int, bool (default = False)
This parameter is used to derive the values that will be assigned to
the elements in the output boolean mask array that correspond to the
"good" bit fields (that are 0 after zeroing bits specified by
``ignore_flags``) in the input ``bitfield`` array. When
``good_mask_value`` is non-zero or ``numpy.True_`` then values in the
output boolean mask array corresponding to "good" bit fields in
``bitfield`` will be ``numpy.True_`` (if ``dtype`` is ``numpy.bool_``)
or 1 (if ``dtype`` is of numerical type) and values of corresponding
to "bad" flags will be ``numpy.False_`` (or 0). When
``good_mask_value`` is zero or ``numpy.False_`` then the values
in the output boolean mask array corresponding to "good" bit fields
in ``bitfield`` will be ``numpy.False_`` (if ``dtype`` is
``numpy.bool_``) or 0 (if ``dtype`` is of numerical type) and values
of corresponding to "bad" flags will be ``numpy.True_`` (or 1).
dtype : data-type (default = ``numpy.bool_``)
The desired data-type for the output binary mask array.
flag_name_map : BitFlagNameMap
A `BitFlagNameMap` object that provides mapping from mnemonic
bit flag names to integer bit values in order to translate mnemonic
flags to numeric values when ``bit_flags`` that are comma- or
'+'-separated list of menmonic bit flag names.
Returns
-------
mask : ndarray
Returns an array of the same dimensionality as the input ``bitfield``
array whose elements can have two possible values,
e.g., ``numpy.True_`` or ``numpy.False_`` (or 1 or 0 for integer
``dtype``) according to values of to the input ``bitfield`` elements,
``ignore_flags`` parameter, and the ``good_mask_value`` parameter.
Examples
--------
>>> from astropy.nddata import bitmask
>>> import numpy as np
>>> dqarr = np.asarray([[0, 0, 1, 2, 0, 8, 12, 0],
... [10, 4, 0, 0, 0, 16, 6, 0]])
>>> flag_map = bitmask.extend_bit_flag_map(
... 'ST_DQ', CR=2, CLOUDY=4, RAINY=8, HOT=16, DEAD=32
... )
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags=0,
... dtype=int)
array([[0, 0, 1, 1, 0, 1, 1, 0],
[1, 1, 0, 0, 0, 1, 1, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags=0,
... dtype=bool)
array([[False, False, True, True, False, True, True, False],
[ True, True, False, False, False, True, True, False]]...)
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags=6,
... good_mask_value=0, dtype=int)
array([[0, 0, 1, 0, 0, 1, 1, 0],
[1, 0, 0, 0, 0, 1, 0, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags=~6,
... good_mask_value=0, dtype=int)
array([[0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags=6, dtype=int,
... flip_bits=True, good_mask_value=0)
array([[0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags='~(2+4)',
... good_mask_value=0, dtype=int)
array([[0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags=[2, 4],
... flip_bits=True, good_mask_value=0,
... dtype=int)
array([[0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags='~(CR,CLOUDY)',
... good_mask_value=0, dtype=int,
... flag_name_map=flag_map)
array([[0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags='~(CR+CLOUDY)',
... good_mask_value=0, dtype=int,
... flag_name_map=flag_map)
array([[0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 0]])
"""
bitfield = np.asarray(bitfield)
if not np.issubdtype(bitfield.dtype, np.integer):
raise TypeError("Input bitfield array must be of integer type.")
ignore_mask = interpret_bit_flags(
ignore_flags, flip_bits=flip_bits, flag_name_map=flag_name_map
)
if ignore_mask is None:
if good_mask_value:
mask = np.ones_like(bitfield, dtype=dtype)
else:
mask = np.zeros_like(bitfield, dtype=dtype)
return mask
# filter out bits beyond the maximum supported by the data type:
ignore_mask = ignore_mask & _SUPPORTED_FLAGS
# invert the "ignore" mask:
ignore_mask = np.bitwise_not(
ignore_mask, dtype=bitfield.dtype.type, casting="unsafe"
)
mask = np.empty_like(bitfield, dtype=np.bool_)
np.bitwise_and(bitfield, ignore_mask, out=mask, casting="unsafe")
if good_mask_value:
np.logical_not(mask, out=mask)
return mask.astype(dtype=dtype, subok=False, copy=False)
|
a7f00f56eb9785d0d28a937d58258b6f1c56168b6f945756c5294d1ce2f7d4be | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""This module implements the base CCDData class."""
import itertools
import numpy as np
from astropy import log
from astropy import units as u
from astropy.io import fits, registry
from astropy.utils.decorators import sharedmethod
from astropy.wcs import WCS
from .compat import NDDataArray
from .nduncertainty import (
InverseVariance,
NDUncertainty,
StdDevUncertainty,
VarianceUncertainty,
)
__all__ = ["CCDData", "fits_ccddata_reader", "fits_ccddata_writer"]
_known_uncertainties = (StdDevUncertainty, VarianceUncertainty, InverseVariance)
_unc_name_to_cls = {cls.__name__: cls for cls in _known_uncertainties}
_unc_cls_to_name = {cls: cls.__name__ for cls in _known_uncertainties}
# Global value which can turn on/off the unit requirements when creating a
# CCDData. Should be used with care because several functions actually break
# if the unit is None!
_config_ccd_requires_unit = True
def _arithmetic(op):
"""Decorator factory which temporarily disables the need for a unit when
creating a new CCDData instance. The final result must have a unit.
Parameters
----------
op : function
The function to apply. Supported are:
- ``np.add``
- ``np.subtract``
- ``np.multiply``
- ``np.true_divide``
Notes
-----
Should only be used on CCDData ``add``, ``subtract``, ``divide`` or
``multiply`` because only these methods from NDArithmeticMixin are
overwritten.
"""
def decorator(func):
def inner(self, operand, operand2=None, **kwargs):
global _config_ccd_requires_unit
_config_ccd_requires_unit = False
result = self._prepare_then_do_arithmetic(op, operand, operand2, **kwargs)
# Wrap it again as CCDData so it checks the final unit.
_config_ccd_requires_unit = True
return result.__class__(result)
inner.__doc__ = f"See `astropy.nddata.NDArithmeticMixin.{func.__name__}`."
return sharedmethod(inner)
return decorator
def _uncertainty_unit_equivalent_to_parent(uncertainty_type, unit, parent_unit):
if uncertainty_type is StdDevUncertainty:
return unit == parent_unit
elif uncertainty_type is VarianceUncertainty:
return unit == (parent_unit**2)
elif uncertainty_type is InverseVariance:
return unit == (1 / (parent_unit**2))
raise ValueError(f"unsupported uncertainty type: {uncertainty_type}")
class CCDData(NDDataArray):
"""A class describing basic CCD data.
The CCDData class is based on the NDData object and includes a data array,
uncertainty frame, mask frame, flag frame, meta data, units, and WCS
information for a single CCD image.
Parameters
----------
data : `~astropy.nddata.CCDData`-like or array-like
The actual data contained in this `~astropy.nddata.CCDData` object.
Note that the data will always be saved by *reference*, so you should
make a copy of the ``data`` before passing it in if that's the desired
behavior.
uncertainty : `~astropy.nddata.StdDevUncertainty`, \
`~astropy.nddata.VarianceUncertainty`, \
`~astropy.nddata.InverseVariance`, `numpy.ndarray` or \
None, optional
Uncertainties on the data. If the uncertainty is a `numpy.ndarray`, it
it assumed to be, and stored as, a `~astropy.nddata.StdDevUncertainty`.
Default is ``None``.
mask : `numpy.ndarray` or None, optional
Mask for the data, given as a boolean Numpy array with a shape
matching that of the data. The values must be `False` where
the data is *valid* and `True` when it is not (like Numpy
masked arrays). If ``data`` is a numpy masked array, providing
``mask`` here will causes the mask from the masked array to be
ignored.
Default is ``None``.
flags : `numpy.ndarray` or `~astropy.nddata.FlagCollection` or None, \
optional
Flags giving information about each pixel. These can be specified
either as a Numpy array of any type with a shape matching that of the
data, or as a `~astropy.nddata.FlagCollection` instance which has a
shape matching that of the data.
Default is ``None``.
wcs : `~astropy.wcs.WCS` or None, optional
WCS-object containing the world coordinate system for the data.
Default is ``None``.
meta : dict-like object or None, optional
Metadata for this object. "Metadata" here means all information that
is included with this object but not part of any other attribute
of this particular object, e.g. creation date, unique identifier,
simulation parameters, exposure time, telescope name, etc.
unit : `~astropy.units.Unit` or str, optional
The units of the data.
Default is ``None``.
.. warning::
If the unit is ``None`` or not otherwise specified it will raise a
``ValueError``
psf : `numpy.ndarray` or None, optional
Image representation of the PSF at the center of this image. In order
for convolution to be flux-preserving, this should generally be
normalized to sum to unity.
Raises
------
ValueError
If the ``uncertainty`` or ``mask`` inputs cannot be broadcast (e.g.,
match shape) onto ``data``.
Methods
-------
read(\\*args, \\**kwargs)
``Classmethod`` to create an CCDData instance based on a ``FITS`` file.
This method uses :func:`fits_ccddata_reader` with the provided
parameters.
write(\\*args, \\**kwargs)
Writes the contents of the CCDData instance into a new ``FITS`` file.
This method uses :func:`fits_ccddata_writer` with the provided
parameters.
Attributes
----------
known_invalid_fits_unit_strings
A dictionary that maps commonly-used fits unit name strings that are
technically invalid to the correct valid unit type (or unit string).
This is primarily for variant names like "ELECTRONS/S" which are not
formally valid, but are unambiguous and frequently enough encountered
that it is convenient to map them to the correct unit.
Notes
-----
`~astropy.nddata.CCDData` objects can be easily converted to a regular
Numpy array using `numpy.asarray`.
For example::
>>> from astropy.nddata import CCDData
>>> import numpy as np
>>> x = CCDData([1,2,3], unit='adu')
>>> np.asarray(x)
array([1, 2, 3])
This is useful, for example, when plotting a 2D image using
matplotlib.
>>> from astropy.nddata import CCDData
>>> from matplotlib import pyplot as plt # doctest: +SKIP
>>> x = CCDData([[1,2,3], [4,5,6]], unit='adu')
>>> plt.imshow(x) # doctest: +SKIP
"""
def __init__(self, *args, **kwd):
if "meta" not in kwd:
kwd["meta"] = kwd.pop("header", None)
if "header" in kwd:
raise ValueError("can't have both header and meta.")
super().__init__(*args, **kwd)
if self._wcs is not None:
llwcs = self._wcs.low_level_wcs
if not isinstance(llwcs, WCS):
raise TypeError("the wcs must be a WCS instance.")
self._wcs = llwcs
# Check if a unit is set. This can be temporarily disabled by the
# _CCDDataUnit contextmanager.
if _config_ccd_requires_unit and self.unit is None:
raise ValueError("a unit for CCDData must be specified.")
def _slice_wcs(self, item):
"""
Override the WCS slicing behaviour so that the wcs attribute continues
to be an `astropy.wcs.WCS`.
"""
if self.wcs is None:
return None
try:
return self.wcs[item]
except Exception as err:
self._handle_wcs_slicing_error(err, item)
@property
def data(self):
return self._data
@data.setter
def data(self, value):
self._data = value
@property
def wcs(self):
return self._wcs
@wcs.setter
def wcs(self, value):
if value is not None and not isinstance(value, WCS):
raise TypeError("the wcs must be a WCS instance.")
self._wcs = value
@property
def unit(self):
return self._unit
@unit.setter
def unit(self, value):
self._unit = u.Unit(value)
@property
def psf(self):
return self._psf
@psf.setter
def psf(self, value):
if value is not None and not isinstance(value, np.ndarray):
raise TypeError("The psf must be a numpy array.")
self._psf = value
@property
def header(self):
return self._meta
@header.setter
def header(self, value):
self.meta = value
@property
def uncertainty(self):
return self._uncertainty
@uncertainty.setter
def uncertainty(self, value):
if value is not None:
if isinstance(value, NDUncertainty):
if getattr(value, "_parent_nddata", None) is not None:
value = value.__class__(value, copy=False)
self._uncertainty = value
elif isinstance(value, np.ndarray):
if value.shape != self.shape:
raise ValueError("uncertainty must have same shape as data.")
self._uncertainty = StdDevUncertainty(value)
log.info(
"array provided for uncertainty; assuming it is a "
"StdDevUncertainty."
)
else:
raise TypeError(
"uncertainty must be an instance of a "
"NDUncertainty object or a numpy array."
)
self._uncertainty.parent_nddata = self
else:
self._uncertainty = value
def to_hdu(
self,
hdu_mask="MASK",
hdu_uncertainty="UNCERT",
hdu_flags=None,
wcs_relax=True,
key_uncertainty_type="UTYPE",
as_image_hdu=False,
hdu_psf="PSFIMAGE",
):
"""Creates an HDUList object from a CCDData object.
Parameters
----------
hdu_mask, hdu_uncertainty, hdu_flags, hdu_psf : str or None, optional
If it is a string append this attribute to the HDUList as
`~astropy.io.fits.ImageHDU` with the string as extension name.
Flags are not supported at this time. If ``None`` this attribute
is not appended.
Default is ``'MASK'`` for mask, ``'UNCERT'`` for uncertainty,
``'PSFIMAGE'`` for psf, and `None` for flags.
wcs_relax : bool
Value of the ``relax`` parameter to use in converting the WCS to a
FITS header using `~astropy.wcs.WCS.to_header`. The common
``CTYPE`` ``RA---TAN-SIP`` and ``DEC--TAN-SIP`` requires
``relax=True`` for the ``-SIP`` part of the ``CTYPE`` to be
preserved.
key_uncertainty_type : str, optional
The header key name for the class name of the uncertainty (if any)
that is used to store the uncertainty type in the uncertainty hdu.
Default is ``UTYPE``.
.. versionadded:: 3.1
as_image_hdu : bool
If this option is `True`, the first item of the returned
`~astropy.io.fits.HDUList` is a `~astropy.io.fits.ImageHDU`, instead
of the default `~astropy.io.fits.PrimaryHDU`.
Raises
------
ValueError
- If ``self.mask`` is set but not a `numpy.ndarray`.
- If ``self.uncertainty`` is set but not a astropy uncertainty type.
- If ``self.uncertainty`` is set but has another unit then
``self.data``.
NotImplementedError
Saving flags is not supported.
Returns
-------
hdulist : `~astropy.io.fits.HDUList`
"""
if isinstance(self.header, fits.Header):
# Copy here so that we can modify the HDU header by adding WCS
# information without changing the header of the CCDData object.
header = self.header.copy()
else:
# Because _insert_in_metadata_fits_safe is written as a method
# we need to create a dummy CCDData instance to hold the FITS
# header we are constructing. This probably indicates that
# _insert_in_metadata_fits_safe should be rewritten in a more
# sensible way...
dummy_ccd = CCDData([1], meta=fits.Header(), unit="adu")
for k, v in self.header.items():
dummy_ccd._insert_in_metadata_fits_safe(k, v)
header = dummy_ccd.header
if self.unit is not u.dimensionless_unscaled:
header["bunit"] = self.unit.to_string()
if self.wcs:
# Simply extending the FITS header with the WCS can lead to
# duplicates of the WCS keywords; iterating over the WCS
# header should be safer.
#
# Turns out if I had read the io.fits.Header.extend docs more
# carefully, I would have realized that the keywords exist to
# avoid duplicates and preserve, as much as possible, the
# structure of the commentary cards.
#
# Note that until astropy/astropy#3967 is closed, the extend
# will fail if there are comment cards in the WCS header but
# not header.
wcs_header = self.wcs.to_header(relax=wcs_relax)
header.extend(wcs_header, useblanks=False, update=True)
if as_image_hdu:
hdus = [fits.ImageHDU(self.data, header)]
else:
hdus = [fits.PrimaryHDU(self.data, header)]
if hdu_mask and self.mask is not None:
# Always assuming that the mask is a np.ndarray (check that it has
# a 'shape').
if not hasattr(self.mask, "shape"):
raise ValueError("only a numpy.ndarray mask can be saved.")
# Convert boolean mask to uint since io.fits cannot handle bool.
hduMask = fits.ImageHDU(self.mask.astype(np.uint8), name=hdu_mask)
hdus.append(hduMask)
if hdu_uncertainty and self.uncertainty is not None:
# We need to save some kind of information which uncertainty was
# used so that loading the HDUList can infer the uncertainty type.
# No idea how this can be done so only allow StdDevUncertainty.
uncertainty_cls = self.uncertainty.__class__
if uncertainty_cls not in _known_uncertainties:
raise ValueError(
f"only uncertainties of type {_known_uncertainties} can be saved."
)
uncertainty_name = _unc_cls_to_name[uncertainty_cls]
hdr_uncertainty = fits.Header()
hdr_uncertainty[key_uncertainty_type] = uncertainty_name
# Assuming uncertainty is an StdDevUncertainty save just the array
# this might be problematic if the Uncertainty has a unit differing
# from the data so abort for different units. This is important for
# astropy > 1.2
if hasattr(self.uncertainty, "unit") and self.uncertainty.unit is not None:
if not _uncertainty_unit_equivalent_to_parent(
uncertainty_cls, self.uncertainty.unit, self.unit
):
raise ValueError(
"saving uncertainties with a unit that is not "
"equivalent to the unit from the data unit is not "
"supported."
)
hduUncert = fits.ImageHDU(
self.uncertainty.array, hdr_uncertainty, name=hdu_uncertainty
)
hdus.append(hduUncert)
if hdu_flags and self.flags:
raise NotImplementedError(
"adding the flags to a HDU is not supported at this time."
)
if hdu_psf and self.psf is not None:
# The PSF is an image, so write it as a separate ImageHDU.
hdu_psf = fits.ImageHDU(self.psf, name=hdu_psf)
hdus.append(hdu_psf)
hdulist = fits.HDUList(hdus)
return hdulist
def copy(self):
"""
Return a copy of the CCDData object.
"""
return self.__class__(self, copy=True)
add = _arithmetic(np.add)(NDDataArray.add)
subtract = _arithmetic(np.subtract)(NDDataArray.subtract)
multiply = _arithmetic(np.multiply)(NDDataArray.multiply)
divide = _arithmetic(np.true_divide)(NDDataArray.divide)
def _insert_in_metadata_fits_safe(self, key, value):
"""
Insert key/value pair into metadata in a way that FITS can serialize.
Parameters
----------
key : str
Key to be inserted in dictionary.
value : str or None
Value to be inserted.
Notes
-----
This addresses a shortcoming of the FITS standard. There are length
restrictions on both the ``key`` (8 characters) and ``value`` (72
characters) in the FITS standard. There is a convention for handling
long keywords and a convention for handling long values, but the
two conventions cannot be used at the same time.
This addresses that case by checking the length of the ``key`` and
``value`` and, if necessary, shortening the key.
"""
if len(key) > 8 and len(value) > 72:
short_name = key[:8]
self.meta[f"HIERARCH {key.upper()}"] = (
short_name,
f"Shortened name for {key}",
)
self.meta[short_name] = value
else:
self.meta[key] = value
# A dictionary mapping "known" invalid fits unit
known_invalid_fits_unit_strings = {
"ELECTRONS/S": u.electron / u.s,
"ELECTRONS": u.electron,
"electrons": u.electron,
}
# These need to be importable by the tests...
_KEEP_THESE_KEYWORDS_IN_HEADER = ["JD-OBS", "MJD-OBS", "DATE-OBS"]
_PCs = {"PC1_1", "PC1_2", "PC2_1", "PC2_2"}
_CDs = {"CD1_1", "CD1_2", "CD2_1", "CD2_2"}
def _generate_wcs_and_update_header(hdr):
"""
Generate a WCS object from a header and remove the WCS-specific
keywords from the header.
Parameters
----------
hdr : astropy.io.fits.header or other dict-like
Returns
-------
new_header, wcs
"""
# Try constructing a WCS object.
try:
wcs = WCS(hdr)
except Exception as exc:
# Normally WCS only raises Warnings and doesn't fail but in rare
# cases (malformed header) it could fail...
log.info(
"An exception happened while extracting WCS information from "
"the Header.\n{}: {}".format(type(exc).__name__, str(exc))
)
return hdr, None
# Test for success by checking to see if the wcs ctype has a non-empty
# value, return None for wcs if ctype is empty.
if not wcs.wcs.ctype[0]:
return (hdr, None)
new_hdr = hdr.copy()
# If the keywords below are in the header they are also added to WCS.
# It seems like they should *not* be removed from the header, though.
wcs_header = wcs.to_header(relax=True)
for k in wcs_header:
if k not in _KEEP_THESE_KEYWORDS_IN_HEADER:
new_hdr.remove(k, ignore_missing=True)
# Check that this does not result in an inconsistent header WCS if the WCS
# is converted back to a header.
if (_PCs & set(wcs_header)) and (_CDs & set(new_hdr)):
# The PCi_j representation is used by the astropy.wcs object,
# so CDi_j keywords were not removed from new_hdr. Remove them now.
for cd in _CDs:
new_hdr.remove(cd, ignore_missing=True)
# The other case -- CD in the header produced by astropy.wcs -- should
# never happen based on [1], which computes the matrix in PC form.
# [1]: https://github.com/astropy/astropy/blob/1cf277926d3598dd672dd528504767c37531e8c9/cextern/wcslib/C/wcshdr.c#L596
#
# The test test_ccddata.test_wcs_keyword_removal_for_wcs_test_files() does
# check for the possibility that both PC and CD are present in the result
# so if the implementation of to_header changes in wcslib in the future
# then the tests should catch it, and then this code will need to be
# updated.
# We need to check for any SIP coefficients that got left behind if the
# header has SIP.
if wcs.sip is not None:
keyword = "{}_{}_{}"
polynomials = ["A", "B", "AP", "BP"]
for poly in polynomials:
order = wcs.sip.__getattribute__(f"{poly.lower()}_order")
for i, j in itertools.product(range(order), repeat=2):
new_hdr.remove(keyword.format(poly, i, j), ignore_missing=True)
return (new_hdr, wcs)
def fits_ccddata_reader(
filename,
hdu=0,
unit=None,
hdu_uncertainty="UNCERT",
hdu_mask="MASK",
hdu_flags=None,
key_uncertainty_type="UTYPE",
hdu_psf="PSFIMAGE",
**kwd,
):
"""
Generate a CCDData object from a FITS file.
Parameters
----------
filename : str
Name of fits file.
hdu : int, str, tuple of (str, int), optional
Index or other identifier of the Header Data Unit of the FITS
file from which CCDData should be initialized. If zero and
no data in the primary HDU, it will search for the first
extension HDU with data. The header will be added to the primary HDU.
Default is ``0``.
unit : `~astropy.units.Unit`, optional
Units of the image data. If this argument is provided and there is a
unit for the image in the FITS header (the keyword ``BUNIT`` is used
as the unit, if present), this argument is used for the unit.
Default is ``None``.
hdu_uncertainty : str or None, optional
FITS extension from which the uncertainty should be initialized. If the
extension does not exist the uncertainty of the CCDData is ``None``.
Default is ``'UNCERT'``.
hdu_mask : str or None, optional
FITS extension from which the mask should be initialized. If the
extension does not exist the mask of the CCDData is ``None``.
Default is ``'MASK'``.
hdu_flags : str or None, optional
Currently not implemented.
Default is ``None``.
key_uncertainty_type : str, optional
The header key name where the class name of the uncertainty is stored
in the hdu of the uncertainty (if any).
Default is ``UTYPE``.
.. versionadded:: 3.1
hdu_psf : str or None, optional
FITS extension from which the psf image should be initialized. If the
extension does not exist the psf of the CCDData is `None`.
kwd :
Any additional keyword parameters are passed through to the FITS reader
in :mod:`astropy.io.fits`; see Notes for additional discussion.
Notes
-----
FITS files that contained scaled data (e.g. unsigned integer images) will
be scaled and the keywords used to manage scaled data in
:mod:`astropy.io.fits` are disabled.
"""
unsupport_open_keywords = {
"do_not_scale_image_data": "Image data must be scaled.",
"scale_back": "Scale information is not preserved.",
}
for key, msg in unsupport_open_keywords.items():
if key in kwd:
prefix = f"unsupported keyword: {key}."
raise TypeError(" ".join([prefix, msg]))
with fits.open(filename, **kwd) as hdus:
hdr = hdus[hdu].header
if hdu_uncertainty is not None and hdu_uncertainty in hdus:
unc_hdu = hdus[hdu_uncertainty]
stored_unc_name = unc_hdu.header.get(key_uncertainty_type, "None")
# For compatibility reasons the default is standard deviation
# uncertainty because files could have been created before the
# uncertainty type was stored in the header.
unc_type = _unc_name_to_cls.get(stored_unc_name, StdDevUncertainty)
uncertainty = unc_type(unc_hdu.data)
else:
uncertainty = None
if hdu_mask is not None and hdu_mask in hdus:
# Mask is saved as uint but we want it to be boolean.
mask = hdus[hdu_mask].data.astype(np.bool_)
else:
mask = None
if hdu_flags is not None and hdu_flags in hdus:
raise NotImplementedError("loading flags is currently not supported.")
if hdu_psf is not None and hdu_psf in hdus:
psf = hdus[hdu_psf].data
else:
psf = None
# search for the first instance with data if
# the primary header is empty.
if hdu == 0 and hdus[hdu].data is None:
for i in range(len(hdus)):
if (
hdus.info(hdu)[i][3] == "ImageHDU"
and hdus.fileinfo(i)["datSpan"] > 0
):
hdu = i
comb_hdr = hdus[hdu].header.copy()
# Add header values from the primary header that aren't
# present in the extension header.
comb_hdr.extend(hdr, unique=True)
hdr = comb_hdr
log.info(f"first HDU with data is extension {hdu}.")
break
if "bunit" in hdr:
fits_unit_string = hdr["bunit"]
# patch to handle FITS files using ADU for the unit instead of the
# standard version of 'adu'
if fits_unit_string.strip().lower() == "adu":
fits_unit_string = fits_unit_string.lower()
else:
fits_unit_string = None
if fits_unit_string:
if unit is None:
# Convert the BUNIT header keyword to a unit and if that's not
# possible raise a meaningful error message.
try:
kifus = CCDData.known_invalid_fits_unit_strings
if fits_unit_string in kifus:
fits_unit_string = kifus[fits_unit_string]
fits_unit_string = u.Unit(fits_unit_string)
except ValueError:
raise ValueError(
"The Header value for the key BUNIT ({}) cannot be "
"interpreted as valid unit. To successfully read the "
"file as CCDData you can pass in a valid `unit` "
"argument explicitly or change the header of the FITS "
"file before reading it.".format(fits_unit_string)
)
else:
log.info(
"using the unit {} passed to the FITS reader instead "
"of the unit {} in the FITS file.".format(unit, fits_unit_string)
)
use_unit = unit or fits_unit_string
hdr, wcs = _generate_wcs_and_update_header(hdr)
ccd_data = CCDData(
hdus[hdu].data,
meta=hdr,
unit=use_unit,
mask=mask,
uncertainty=uncertainty,
wcs=wcs,
psf=psf,
)
return ccd_data
def fits_ccddata_writer(
ccd_data,
filename,
hdu_mask="MASK",
hdu_uncertainty="UNCERT",
hdu_flags=None,
key_uncertainty_type="UTYPE",
as_image_hdu=False,
hdu_psf="PSFIMAGE",
**kwd,
):
"""
Write CCDData object to FITS file.
Parameters
----------
ccd_data : CCDData
Object to write.
filename : str
Name of file.
hdu_mask, hdu_uncertainty, hdu_flags, hdu_psf : str or None, optional
If it is a string append this attribute to the HDUList as
`~astropy.io.fits.ImageHDU` with the string as extension name.
Flags are not supported at this time. If ``None`` this attribute
is not appended.
Default is ``'MASK'`` for mask, ``'UNCERT'`` for uncertainty,
``'PSFIMAGE'`` for psf, and `None` for flags.
key_uncertainty_type : str, optional
The header key name for the class name of the uncertainty (if any)
that is used to store the uncertainty type in the uncertainty hdu.
Default is ``UTYPE``.
.. versionadded:: 3.1
as_image_hdu : bool
If this option is `True`, the first item of the returned
`~astropy.io.fits.HDUList` is a `~astropy.io.fits.ImageHDU`, instead of
the default `~astropy.io.fits.PrimaryHDU`.
kwd :
All additional keywords are passed to :py:mod:`astropy.io.fits`
Raises
------
ValueError
- If ``self.mask`` is set but not a `numpy.ndarray`.
- If ``self.uncertainty`` is set but not a
`~astropy.nddata.StdDevUncertainty`.
- If ``self.uncertainty`` is set but has another unit then
``self.data``.
NotImplementedError
Saving flags is not supported.
"""
hdu = ccd_data.to_hdu(
hdu_mask=hdu_mask,
hdu_uncertainty=hdu_uncertainty,
key_uncertainty_type=key_uncertainty_type,
hdu_flags=hdu_flags,
as_image_hdu=as_image_hdu,
hdu_psf=hdu_psf,
)
if as_image_hdu:
hdu.insert(0, fits.PrimaryHDU())
hdu.writeto(filename, **kwd)
with registry.delay_doc_updates(CCDData):
registry.register_reader("fits", CCDData, fits_ccddata_reader)
registry.register_writer("fits", CCDData, fits_ccddata_writer)
registry.register_identifier("fits", CCDData, fits.connect.is_fits)
|
ac8d7e57aeb42956f8650a4cd0c541739f905f6620fe1f7af76cd4e346a554b4 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module includes helper functions for array operations.
"""
from copy import deepcopy
import numpy as np
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.io.fits.hdu.image import Section
from astropy.utils import lazyproperty
from astropy.wcs import Sip
from astropy.wcs.utils import proj_plane_pixel_scales, skycoord_to_pixel
__all__ = [
"extract_array",
"add_array",
"subpixel_indices",
"overlap_slices",
"NoOverlapError",
"PartialOverlapError",
"Cutout2D",
]
class NoOverlapError(ValueError):
"""Raised when determining the overlap of non-overlapping arrays."""
pass
class PartialOverlapError(ValueError):
"""Raised when arrays only partially overlap."""
pass
def overlap_slices(large_array_shape, small_array_shape, position, mode="partial"):
"""
Get slices for the overlapping part of a small and a large array.
Given a certain position of the center of the small array, with
respect to the large array, tuples of slices are returned which can be
used to extract, add or subtract the small array at the given
position. This function takes care of the correct behavior at the
boundaries, where the small array is cut of appropriately.
Integer positions are at the pixel centers.
Parameters
----------
large_array_shape : tuple of int or int
The shape of the large array (for 1D arrays, this can be an
`int`).
small_array_shape : int or tuple thereof
The shape of the small array (for 1D arrays, this can be an
`int`). See the ``mode`` keyword for additional details.
position : number or tuple thereof
The position of the small array's center with respect to the
large array. The pixel coordinates should be in the same order
as the array shape. Integer positions are at the pixel centers.
For any axis where ``small_array_shape`` is even, the position
is rounded up, e.g. extracting two elements with a center of
``1`` will define the extracted region as ``[0, 1]``.
mode : {'partial', 'trim', 'strict'}, optional
In ``'partial'`` mode, a partial overlap of the small and the
large array is sufficient. The ``'trim'`` mode is similar to
the ``'partial'`` mode, but ``slices_small`` will be adjusted to
return only the overlapping elements. In the ``'strict'`` mode,
the small array has to be fully contained in the large array,
otherwise an `~astropy.nddata.utils.PartialOverlapError` is
raised. In all modes, non-overlapping arrays will raise a
`~astropy.nddata.utils.NoOverlapError`.
Returns
-------
slices_large : tuple of slice
A tuple of slice objects for each axis of the large array, such
that ``large_array[slices_large]`` extracts the region of the
large array that overlaps with the small array.
slices_small : tuple of slice
A tuple of slice objects for each axis of the small array, such
that ``small_array[slices_small]`` extracts the region that is
inside the large array.
"""
if mode not in ["partial", "trim", "strict"]:
raise ValueError('Mode can be only "partial", "trim", or "strict".')
if np.isscalar(small_array_shape):
small_array_shape = (small_array_shape,)
if np.isscalar(large_array_shape):
large_array_shape = (large_array_shape,)
if np.isscalar(position):
position = (position,)
if any(~np.isfinite(position)):
raise ValueError("Input position contains invalid values (NaNs or infs).")
if len(small_array_shape) != len(large_array_shape):
raise ValueError(
'"large_array_shape" and "small_array_shape" must '
"have the same number of dimensions."
)
if len(small_array_shape) != len(position):
raise ValueError(
'"position" must have the same number of dimensions as "small_array_shape".'
)
# define the min/max pixel indices
indices_min = [
int(np.ceil(pos - (small_shape / 2.0)))
for (pos, small_shape) in zip(position, small_array_shape)
]
indices_max = [
int(np.ceil(pos + (small_shape / 2.0)))
for (pos, small_shape) in zip(position, small_array_shape)
]
for e_max in indices_max:
if e_max < 0:
raise NoOverlapError("Arrays do not overlap.")
for e_min, large_shape in zip(indices_min, large_array_shape):
if e_min >= large_shape:
raise NoOverlapError("Arrays do not overlap.")
if mode == "strict":
for e_min in indices_min:
if e_min < 0:
raise PartialOverlapError("Arrays overlap only partially.")
for e_max, large_shape in zip(indices_max, large_array_shape):
if e_max > large_shape:
raise PartialOverlapError("Arrays overlap only partially.")
# Set up slices
slices_large = tuple(
slice(max(0, indices_min), min(large_shape, indices_max))
for (indices_min, indices_max, large_shape) in zip(
indices_min, indices_max, large_array_shape
)
)
if mode == "trim":
slices_small = tuple(slice(0, slc.stop - slc.start) for slc in slices_large)
else:
slices_small = tuple(
slice(
max(0, -indices_min),
min(large_shape - indices_min, indices_max - indices_min),
)
for (indices_min, indices_max, large_shape) in zip(
indices_min, indices_max, large_array_shape
)
)
return slices_large, slices_small
def extract_array(
array_large,
shape,
position,
mode="partial",
fill_value=np.nan,
return_position=False,
):
"""
Extract a smaller array of the given shape and position from a
larger array.
Parameters
----------
array_large : ndarray
The array from which to extract the small array.
shape : int or tuple thereof
The shape of the extracted array (for 1D arrays, this can be an
`int`). See the ``mode`` keyword for additional details.
position : number or tuple thereof
The position of the small array's center with respect to the
large array. The pixel coordinates should be in the same order
as the array shape. Integer positions are at the pixel centers
(for 1D arrays, this can be a number).
mode : {'partial', 'trim', 'strict'}, optional
The mode used for extracting the small array. For the
``'partial'`` and ``'trim'`` modes, a partial overlap of the
small array and the large array is sufficient. For the
``'strict'`` mode, the small array has to be fully contained
within the large array, otherwise an
`~astropy.nddata.utils.PartialOverlapError` is raised. In all
modes, non-overlapping arrays will raise a
`~astropy.nddata.utils.NoOverlapError`. In ``'partial'`` mode,
positions in the small array that do not overlap with the large
array will be filled with ``fill_value``. In ``'trim'`` mode
only the overlapping elements are returned, thus the resulting
small array may be smaller than the requested ``shape``.
fill_value : number, optional
If ``mode='partial'``, the value to fill pixels in the extracted
small array that do not overlap with the input ``array_large``.
``fill_value`` will be changed to have the same ``dtype`` as the
``array_large`` array, with one exception. If ``array_large``
has integer type and ``fill_value`` is ``np.nan``, then a
`ValueError` will be raised.
return_position : bool, optional
If `True`, return the coordinates of ``position`` in the
coordinate system of the returned array.
Returns
-------
array_small : ndarray
The extracted array.
new_position : tuple
If ``return_position`` is true, this tuple will contain the
coordinates of the input ``position`` in the coordinate system
of ``array_small``. Note that for partially overlapping arrays,
``new_position`` might actually be outside of the
``array_small``; ``array_small[new_position]`` might give wrong
results if any element in ``new_position`` is negative.
Examples
--------
We consider a large array with the shape 11x10, from which we extract
a small array of shape 3x5:
>>> import numpy as np
>>> from astropy.nddata.utils import extract_array
>>> large_array = np.arange(110).reshape((11, 10))
>>> extract_array(large_array, (3, 5), (7, 7))
array([[65, 66, 67, 68, 69],
[75, 76, 77, 78, 79],
[85, 86, 87, 88, 89]])
"""
if np.isscalar(shape):
shape = (shape,)
if np.isscalar(position):
position = (position,)
if mode not in ["partial", "trim", "strict"]:
raise ValueError("Valid modes are 'partial', 'trim', and 'strict'.")
large_slices, small_slices = overlap_slices(
array_large.shape, shape, position, mode=mode
)
extracted_array = array_large[large_slices]
if return_position:
new_position = [i - s.start for i, s in zip(position, large_slices)]
# Extracting on the edges is presumably a rare case, so treat special here
if (extracted_array.shape != shape) and (mode == "partial"):
extracted_array = np.zeros(shape, dtype=array_large.dtype)
try:
extracted_array[:] = fill_value
except ValueError as exc:
exc.args += (
"fill_value is inconsistent with the data type of "
"the input array (e.g., fill_value cannot be set to "
"np.nan if the input array has integer type). Please "
"change either the input array dtype or the "
"fill_value.",
)
raise exc
extracted_array[small_slices] = array_large[large_slices]
if return_position:
new_position = [i + s.start for i, s in zip(new_position, small_slices)]
if return_position:
return extracted_array, tuple(new_position)
else:
return extracted_array
def add_array(array_large, array_small, position):
"""
Add a smaller array at a given position in a larger array.
Parameters
----------
array_large : ndarray
Large array.
array_small : ndarray
Small array to add. Can be equal to ``array_large`` in size in a given
dimension, but not larger.
position : tuple
Position of the small array's center, with respect to the large array.
Coordinates should be in the same order as the array shape.
Returns
-------
new_array : ndarray
The new array formed from the sum of ``array_large`` and
``array_small``.
Notes
-----
The addition is done in-place.
Examples
--------
We consider a large array of zeros with the shape 5x5 and a small
array of ones with a shape of 3x3:
>>> import numpy as np
>>> from astropy.nddata.utils import add_array
>>> large_array = np.zeros((5, 5))
>>> small_array = np.ones((3, 3))
>>> add_array(large_array, small_array, (1, 2)) # doctest: +FLOAT_CMP
array([[0., 1., 1., 1., 0.],
[0., 1., 1., 1., 0.],
[0., 1., 1., 1., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]])
"""
# Check if large array is not smaller
if all(
large_shape >= small_shape
for (large_shape, small_shape) in zip(array_large.shape, array_small.shape)
):
large_slices, small_slices = overlap_slices(
array_large.shape, array_small.shape, position
)
array_large[large_slices] += array_small[small_slices]
return array_large
else:
raise ValueError("Can't add array. Small array too large.")
def subpixel_indices(position, subsampling):
"""
Convert decimal points to indices, given a subsampling factor.
This discards the integer part of the position and uses only the decimal
place, and converts this to a subpixel position depending on the
subsampling specified. The center of a pixel corresponds to an integer
position.
Parameters
----------
position : ndarray or array-like
Positions in pixels.
subsampling : int
Subsampling factor per pixel.
Returns
-------
indices : ndarray
The integer subpixel indices corresponding to the input positions.
Examples
--------
If no subsampling is used, then the subpixel indices returned are always 0:
>>> from astropy.nddata.utils import subpixel_indices
>>> subpixel_indices([1.2, 3.4, 5.6], 1) # doctest: +FLOAT_CMP
array([0., 0., 0.])
If instead we use a subsampling of 2, we see that for the two first values
(1.1 and 3.4) the subpixel position is 1, while for 5.6 it is 0. This is
because the values of 1, 3, and 6 lie in the center of pixels, and 1.1 and
3.4 lie in the left part of the pixels and 5.6 lies in the right part.
>>> subpixel_indices([1.2, 3.4, 5.5], 2) # doctest: +FLOAT_CMP
array([1., 1., 0.])
"""
# Get decimal points
fractions = np.modf(np.asanyarray(position) + 0.5)[0]
return np.floor(fractions * subsampling)
class Cutout2D:
"""
Create a cutout object from a 2D array.
The returned object will contain a 2D cutout array. If
``copy=False`` (default), the cutout array is a view into the
original ``data`` array, otherwise the cutout array will contain a
copy of the original data.
If a `~astropy.wcs.WCS` object is input, then the returned object
will also contain a copy of the original WCS, but updated for the
cutout array.
For example usage, see :ref:`astropy:cutout_images`.
.. warning::
The cutout WCS object does not currently handle cases where the
input WCS object contains distortion lookup tables described in
the `FITS WCS distortion paper
<https://www.atnf.csiro.au/people/mcalabre/WCS/dcs_20040422.pdf>`__.
Parameters
----------
data : ndarray
The 2D data array from which to extract the cutout array.
position : tuple or `~astropy.coordinates.SkyCoord`
The position of the cutout array's center with respect to
the ``data`` array. The position can be specified either as
a ``(x, y)`` tuple of pixel coordinates or a
`~astropy.coordinates.SkyCoord`, in which case ``wcs`` is a
required input.
size : int, array-like, or `~astropy.units.Quantity`
The size of the cutout array along each axis. If ``size``
is a scalar number or a scalar `~astropy.units.Quantity`,
then a square cutout of ``size`` will be created. If
``size`` has two elements, they should be in ``(ny, nx)``
order. Scalar numbers in ``size`` are assumed to be in
units of pixels. ``size`` can also be a
`~astropy.units.Quantity` object or contain
`~astropy.units.Quantity` objects. Such
`~astropy.units.Quantity` objects must be in pixel or
angular units. For all cases, ``size`` will be converted to
an integer number of pixels, rounding the the nearest
integer. See the ``mode`` keyword for additional details on
the final cutout size.
.. note::
If ``size`` is in angular units, the cutout size is
converted to pixels using the pixel scales along each
axis of the image at the ``CRPIX`` location. Projection
and other non-linear distortions are not taken into
account.
wcs : `~astropy.wcs.WCS`, optional
A WCS object associated with the input ``data`` array. If
``wcs`` is not `None`, then the returned cutout object will
contain a copy of the updated WCS for the cutout data array.
mode : {'trim', 'partial', 'strict'}, optional
The mode used for creating the cutout data array. For the
``'partial'`` and ``'trim'`` modes, a partial overlap of the
cutout array and the input ``data`` array is sufficient.
For the ``'strict'`` mode, the cutout array has to be fully
contained within the ``data`` array, otherwise an
`~astropy.nddata.utils.PartialOverlapError` is raised. In
all modes, non-overlapping arrays will raise a
`~astropy.nddata.utils.NoOverlapError`. In ``'partial'``
mode, positions in the cutout array that do not overlap with
the ``data`` array will be filled with ``fill_value``. In
``'trim'`` mode only the overlapping elements are returned,
thus the resulting cutout array may be smaller than the
requested ``shape``.
fill_value : float or int, optional
If ``mode='partial'``, the value to fill pixels in the
cutout array that do not overlap with the input ``data``.
``fill_value`` must have the same ``dtype`` as the input
``data`` array.
copy : bool, optional
If `False` (default), then the cutout data will be a view
into the original ``data`` array. If `True`, then the
cutout data will hold a copy of the original ``data`` array.
Attributes
----------
data : 2D `~numpy.ndarray`
The 2D cutout array.
shape : (2,) tuple
The ``(ny, nx)`` shape of the cutout array.
shape_input : (2,) tuple
The ``(ny, nx)`` shape of the input (original) array.
input_position_cutout : (2,) tuple
The (unrounded) ``(x, y)`` position with respect to the cutout
array.
input_position_original : (2,) tuple
The original (unrounded) ``(x, y)`` input position (with respect
to the original array).
slices_original : (2,) tuple of slice object
A tuple of slice objects for the minimal bounding box of the
cutout with respect to the original array. For
``mode='partial'``, the slices are for the valid (non-filled)
cutout values.
slices_cutout : (2,) tuple of slice object
A tuple of slice objects for the minimal bounding box of the
cutout with respect to the cutout array. For
``mode='partial'``, the slices are for the valid (non-filled)
cutout values.
xmin_original, ymin_original, xmax_original, ymax_original : float
The minimum and maximum ``x`` and ``y`` indices of the minimal
rectangular region of the cutout array with respect to the
original array. For ``mode='partial'``, the bounding box
indices are for the valid (non-filled) cutout values. These
values are the same as those in `bbox_original`.
xmin_cutout, ymin_cutout, xmax_cutout, ymax_cutout : float
The minimum and maximum ``x`` and ``y`` indices of the minimal
rectangular region of the cutout array with respect to the
cutout array. For ``mode='partial'``, the bounding box indices
are for the valid (non-filled) cutout values. These values are
the same as those in `bbox_cutout`.
wcs : `~astropy.wcs.WCS` or None
A WCS object associated with the cutout array if a ``wcs``
was input.
Examples
--------
>>> import numpy as np
>>> from astropy.nddata.utils import Cutout2D
>>> from astropy import units as u
>>> data = np.arange(20.).reshape(5, 4)
>>> cutout1 = Cutout2D(data, (2, 2), (3, 3))
>>> print(cutout1.data) # doctest: +FLOAT_CMP
[[ 5. 6. 7.]
[ 9. 10. 11.]
[13. 14. 15.]]
>>> print(cutout1.center_original)
(2.0, 2.0)
>>> print(cutout1.center_cutout)
(1.0, 1.0)
>>> print(cutout1.origin_original)
(1, 1)
>>> cutout2 = Cutout2D(data, (2, 2), 3)
>>> print(cutout2.data) # doctest: +FLOAT_CMP
[[ 5. 6. 7.]
[ 9. 10. 11.]
[13. 14. 15.]]
>>> size = u.Quantity([3, 3], u.pixel)
>>> cutout3 = Cutout2D(data, (0, 0), size)
>>> print(cutout3.data) # doctest: +FLOAT_CMP
[[0. 1.]
[4. 5.]]
>>> cutout4 = Cutout2D(data, (0, 0), (3 * u.pixel, 3))
>>> print(cutout4.data) # doctest: +FLOAT_CMP
[[0. 1.]
[4. 5.]]
>>> cutout5 = Cutout2D(data, (0, 0), (3, 3), mode='partial')
>>> print(cutout5.data) # doctest: +FLOAT_CMP
[[nan nan nan]
[nan 0. 1.]
[nan 4. 5.]]
"""
def __init__(
self, data, position, size, wcs=None, mode="trim", fill_value=np.nan, copy=False
):
if wcs is None:
wcs = getattr(data, "wcs", None)
if isinstance(position, SkyCoord):
if wcs is None:
raise ValueError("wcs must be input if position is a SkyCoord")
position = skycoord_to_pixel(position, wcs, mode="all") # (x, y)
if np.isscalar(size):
size = np.repeat(size, 2)
# special handling for a scalar Quantity
if isinstance(size, u.Quantity):
size = np.atleast_1d(size)
if len(size) == 1:
size = np.repeat(size, 2)
if len(size) > 2:
raise ValueError("size must have at most two elements")
shape = np.zeros(2).astype(int)
pixel_scales = None
# ``size`` can have a mixture of int and Quantity (and even units),
# so evaluate each axis separately
for axis, side in enumerate(size):
if not isinstance(side, u.Quantity):
shape[axis] = int(np.round(size[axis])) # pixels
else:
if side.unit == u.pixel:
shape[axis] = int(np.round(side.value))
elif side.unit.physical_type == "angle":
if wcs is None:
raise ValueError(
"wcs must be input if any element of size has angular units"
)
if pixel_scales is None:
pixel_scales = u.Quantity(
proj_plane_pixel_scales(wcs), wcs.wcs.cunit[axis]
)
shape[axis] = int(np.round((side / pixel_scales[axis]).decompose()))
else:
raise ValueError(
"shape can contain Quantities with only pixel or angular units"
)
if not isinstance(data, Section): # Accept lazy-loaded image sections
data = np.asanyarray(data)
# reverse position because extract_array and overlap_slices
# use (y, x), but keep the input position
pos_yx = position[::-1]
cutout_data, input_position_cutout = extract_array(
data,
tuple(shape),
pos_yx,
mode=mode,
fill_value=fill_value,
return_position=True,
)
if copy:
cutout_data = np.copy(cutout_data)
self.data = cutout_data
self.input_position_cutout = input_position_cutout[::-1] # (x, y)
slices_original, slices_cutout = overlap_slices(
data.shape, shape, pos_yx, mode=mode
)
self.slices_original = slices_original
self.slices_cutout = slices_cutout
self.shape = self.data.shape
self.input_position_original = position
self.shape_input = shape
(
(self.ymin_original, self.ymax_original),
(self.xmin_original, self.xmax_original),
) = self.bbox_original
(
(self.ymin_cutout, self.ymax_cutout),
(self.xmin_cutout, self.xmax_cutout),
) = self.bbox_cutout
# the true origin pixel of the cutout array, including any
# filled cutout values
self._origin_original_true = (
self.origin_original[0] - self.slices_cutout[1].start,
self.origin_original[1] - self.slices_cutout[0].start,
)
if wcs is not None:
self.wcs = deepcopy(wcs)
self.wcs.wcs.crpix -= self._origin_original_true
self.wcs.array_shape = self.data.shape
if wcs.sip is not None:
self.wcs.sip = Sip(
wcs.sip.a,
wcs.sip.b,
wcs.sip.ap,
wcs.sip.bp,
wcs.sip.crpix - self._origin_original_true,
)
else:
self.wcs = None
def to_original_position(self, cutout_position):
"""
Convert an ``(x, y)`` position in the cutout array to the original
``(x, y)`` position in the original large array.
Parameters
----------
cutout_position : tuple
The ``(x, y)`` pixel position in the cutout array.
Returns
-------
original_position : tuple
The corresponding ``(x, y)`` pixel position in the original
large array.
"""
return tuple(cutout_position[i] + self.origin_original[i] for i in [0, 1])
def to_cutout_position(self, original_position):
"""
Convert an ``(x, y)`` position in the original large array to
the ``(x, y)`` position in the cutout array.
Parameters
----------
original_position : tuple
The ``(x, y)`` pixel position in the original large array.
Returns
-------
cutout_position : tuple
The corresponding ``(x, y)`` pixel position in the cutout
array.
"""
return tuple(original_position[i] - self.origin_original[i] for i in [0, 1])
def plot_on_original(self, ax=None, fill=False, **kwargs):
"""
Plot the cutout region on a matplotlib Axes instance.
Parameters
----------
ax : `matplotlib.axes.Axes` instance, optional
If `None`, then the current `matplotlib.axes.Axes` instance
is used.
fill : bool, optional
Set whether to fill the cutout patch. The default is
`False`.
kwargs : optional
Any keyword arguments accepted by `matplotlib.patches.Patch`.
Returns
-------
ax : `matplotlib.axes.Axes` instance
The matplotlib Axes instance constructed in the method if
``ax=None``. Otherwise the output ``ax`` is the same as the
input ``ax``.
"""
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
kwargs["fill"] = fill
if ax is None:
ax = plt.gca()
height, width = self.shape
hw, hh = width / 2.0, height / 2.0
pos_xy = self.position_original - np.array([hw, hh])
patch = mpatches.Rectangle(pos_xy, width, height, 0.0, **kwargs)
ax.add_patch(patch)
return ax
@staticmethod
def _calc_center(slices):
"""
Calculate the center position. The center position will be
fractional for even-sized arrays. For ``mode='partial'``, the
central position is calculated for the valid (non-filled) cutout
values.
"""
return tuple(0.5 * (slices[i].start + slices[i].stop - 1) for i in [1, 0])
@staticmethod
def _calc_bbox(slices):
"""
Calculate a minimal bounding box in the form ``((ymin, ymax),
(xmin, xmax))``. Note these are pixel locations, not slice
indices. For ``mode='partial'``, the bounding box indices are
for the valid (non-filled) cutout values.
"""
# (stop - 1) to return the max pixel location, not the slice index
return (
(slices[0].start, slices[0].stop - 1),
(slices[1].start, slices[1].stop - 1),
)
@lazyproperty
def origin_original(self):
"""
The ``(x, y)`` index of the origin pixel of the cutout with
respect to the original array. For ``mode='partial'``, the
origin pixel is calculated for the valid (non-filled) cutout
values.
"""
return (self.slices_original[1].start, self.slices_original[0].start)
@lazyproperty
def origin_cutout(self):
"""
The ``(x, y)`` index of the origin pixel of the cutout with
respect to the cutout array. For ``mode='partial'``, the origin
pixel is calculated for the valid (non-filled) cutout values.
"""
return (self.slices_cutout[1].start, self.slices_cutout[0].start)
@staticmethod
def _round(a):
"""
Round the input to the nearest integer.
If two integers are equally close, the value is rounded up.
Note that this is different from `np.round`, which rounds to the
nearest even number.
"""
return int(np.floor(a + 0.5))
@lazyproperty
def position_original(self):
"""
The ``(x, y)`` position index (rounded to the nearest pixel) in
the original array.
"""
return (
self._round(self.input_position_original[0]),
self._round(self.input_position_original[1]),
)
@lazyproperty
def position_cutout(self):
"""
The ``(x, y)`` position index (rounded to the nearest pixel) in
the cutout array.
"""
return (
self._round(self.input_position_cutout[0]),
self._round(self.input_position_cutout[1]),
)
@lazyproperty
def center_original(self):
"""
The central ``(x, y)`` position of the cutout array with respect
to the original array. For ``mode='partial'``, the central
position is calculated for the valid (non-filled) cutout values.
"""
return self._calc_center(self.slices_original)
@lazyproperty
def center_cutout(self):
"""
The central ``(x, y)`` position of the cutout array with respect
to the cutout array. For ``mode='partial'``, the central
position is calculated for the valid (non-filled) cutout values.
"""
return self._calc_center(self.slices_cutout)
@lazyproperty
def bbox_original(self):
"""
The bounding box ``((ymin, ymax), (xmin, xmax))`` of the minimal
rectangular region of the cutout array with respect to the
original array. For ``mode='partial'``, the bounding box
indices are for the valid (non-filled) cutout values.
"""
return self._calc_bbox(self.slices_original)
@lazyproperty
def bbox_cutout(self):
"""
The bounding box ``((ymin, ymax), (xmin, xmax))`` of the minimal
rectangular region of the cutout array with respect to the
cutout array. For ``mode='partial'``, the bounding box indices
are for the valid (non-filled) cutout values.
"""
return self._calc_bbox(self.slices_cutout)
|
2636ee0bc378587af528f50b9415d7e2e6bfd13512fcd76ddcd6c50d5809e521 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# This module contains a class equivalent to pre-1.0 NDData.
import numpy as np
from astropy import log
from astropy.units import Unit, UnitConversionError, UnitsError # noqa: F401
from .flag_collection import FlagCollection
from .mixins.ndarithmetic import NDArithmeticMixin
from .mixins.ndio import NDIOMixin
from .mixins.ndslicing import NDSlicingMixin
from .nddata import NDData
from .nduncertainty import NDUncertainty
__all__ = ["NDDataArray"]
class NDDataArray(NDArithmeticMixin, NDSlicingMixin, NDIOMixin, NDData):
"""
An ``NDData`` object with arithmetic. This class is functionally equivalent
to ``NDData`` in astropy versions prior to 1.0.
The key distinction from raw numpy arrays is the presence of
additional metadata such as uncertainties, a mask, units, flags,
and/or a coordinate system.
See also: https://docs.astropy.org/en/stable/nddata/
Parameters
----------
data : ndarray or `NDData`
The actual data contained in this `NDData` object. Not that this
will always be copies by *reference* , so you should make copy
the ``data`` before passing it in if that's the desired behavior.
uncertainty : `~astropy.nddata.NDUncertainty`, optional
Uncertainties on the data.
mask : array-like, optional
Mask for the data, given as a boolean Numpy array or any object that
can be converted to a boolean Numpy array with a shape
matching that of the data. The values must be ``False`` where
the data is *valid* and ``True`` when it is not (like Numpy
masked arrays). If ``data`` is a numpy masked array, providing
``mask`` here will causes the mask from the masked array to be
ignored.
flags : array-like or `~astropy.nddata.FlagCollection`, optional
Flags giving information about each pixel. These can be specified
either as a Numpy array of any type (or an object which can be converted
to a Numpy array) with a shape matching that of the
data, or as a `~astropy.nddata.FlagCollection` instance which has a
shape matching that of the data.
wcs : None, optional
WCS-object containing the world coordinate system for the data.
.. warning::
This is not yet defined because the discussion of how best to
represent this class's WCS system generically is still under
consideration. For now just leave it as None
meta : `dict`-like object, optional
Metadata for this object. "Metadata" here means all information that
is included with this object but not part of any other attribute
of this particular object. e.g., creation date, unique identifier,
simulation parameters, exposure time, telescope name, etc.
unit : `~astropy.units.UnitBase` instance or str, optional
The units of the data.
Raises
------
ValueError :
If the `uncertainty` or `mask` inputs cannot be broadcast (e.g., match
shape) onto ``data``.
"""
def __init__(self, data, *args, flags=None, **kwargs):
# Initialize with the parent...
super().__init__(data, *args, **kwargs)
# ...then reset uncertainty to force it to go through the
# setter logic below. In base NDData all that is done is to
# set self._uncertainty to whatever uncertainty is passed in.
self.uncertainty = self._uncertainty
# Same thing for mask.
self.mask = self._mask
# Initial flags because it is no longer handled in NDData
# or NDDataBase.
if isinstance(data, NDDataArray):
if flags is None:
flags = data.flags
else:
log.info(
"Overwriting NDDataArrays's current flags with specified flags"
)
self.flags = flags
# Implement uncertainty as NDUncertainty to support propagation of
# uncertainties in arithmetic operations
@property
def uncertainty(self):
return self._uncertainty
@uncertainty.setter
def uncertainty(self, value):
if value is not None:
if isinstance(value, NDUncertainty):
class_name = self.__class__.__name__
if not self.unit and value._unit:
# Raise an error if uncertainty has unit and data does not
raise ValueError(
"Cannot assign an uncertainty with unit "
"to {} without "
"a unit".format(class_name)
)
self._uncertainty = value
self._uncertainty.parent_nddata = self
else:
raise TypeError(
"Uncertainty must be an instance of a NDUncertainty object"
)
else:
self._uncertainty = value
# Override unit so that we can add a setter.
@property
def unit(self):
return self._unit
@unit.setter
def unit(self, value):
from . import conf
try:
if self._unit is not None and conf.warn_setting_unit_directly:
log.info(
"Setting the unit directly changes the unit without "
"updating the data or uncertainty. Use the "
".convert_unit_to() method to change the unit and "
"scale values appropriately."
)
except AttributeError:
# raised if self._unit has not been set yet, in which case the
# warning is irrelevant
pass
if value is None:
self._unit = None
else:
self._unit = Unit(value)
# Implement mask in a way that converts nicely to a numpy masked array
@property
def mask(self):
if self._mask is np.ma.nomask:
return None
else:
return self._mask
@mask.setter
def mask(self, value):
# Check that value is not either type of null mask.
if (value is not None) and (value is not np.ma.nomask):
mask = np.array(value, dtype=np.bool_, copy=False)
if mask.shape != self.data.shape:
raise ValueError(
f"dimensions of mask {mask.shape} and data {self.data.shape} do not match"
)
else:
self._mask = mask
else:
# internal representation should be one numpy understands
self._mask = np.ma.nomask
@property
def shape(self):
"""
shape tuple of this object's data.
"""
return self.data.shape
@property
def size(self):
"""
integer size of this object's data.
"""
return self.data.size
@property
def dtype(self):
"""
`numpy.dtype` of this object's data.
"""
return self.data.dtype
@property
def ndim(self):
"""
integer dimensions of this object's data.
"""
return self.data.ndim
@property
def flags(self):
return self._flags
@flags.setter
def flags(self, value):
if value is not None:
if isinstance(value, FlagCollection):
if value.shape != self.shape:
raise ValueError("dimensions of FlagCollection does not match data")
else:
self._flags = value
else:
flags = np.array(value, copy=False)
if flags.shape != self.shape:
raise ValueError("dimensions of flags do not match data")
else:
self._flags = flags
else:
self._flags = value
def __array__(self):
"""
This allows code that requests a Numpy array to use an NDData
object as a Numpy array.
"""
if self.mask is not None:
return np.ma.masked_array(self.data, self.mask)
else:
return np.array(self.data)
def __array_prepare__(self, array, context=None):
"""
This ensures that a masked array is returned if self is masked.
"""
if self.mask is not None:
return np.ma.masked_array(array, self.mask)
else:
return array
def convert_unit_to(self, unit, equivalencies=[]):
"""
Returns a new `NDData` object whose values have been converted
to a new unit.
Parameters
----------
unit : `astropy.units.UnitBase` instance or str
The unit to convert to.
equivalencies : list of tuple
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`astropy:unit_equivalencies`.
Returns
-------
result : `~astropy.nddata.NDData`
The resulting dataset
Raises
------
`~astropy.units.UnitsError`
If units are inconsistent.
"""
if self.unit is None:
raise ValueError("No unit specified on source data")
data = self.unit.to(unit, self.data, equivalencies=equivalencies)
if self.uncertainty is not None:
uncertainty_values = self.unit.to(
unit, self.uncertainty.array, equivalencies=equivalencies
)
# should work for any uncertainty class
uncertainty = self.uncertainty.__class__(uncertainty_values)
else:
uncertainty = None
if self.mask is not None:
new_mask = self.mask.copy()
else:
new_mask = None
# Call __class__ in case we are dealing with an inherited type
result = self.__class__(
data,
uncertainty=uncertainty,
mask=new_mask,
wcs=self.wcs,
meta=self.meta,
unit=unit,
)
return result
|
b11c5e9f564f65806beba5d0cd66d426121e540c3ebf34d6aa970308d899cbbb | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module implements a class based on NDData with all Mixins.
"""
from .mixins.ndarithmetic import NDArithmeticMixin
from .mixins.ndio import NDIOMixin
from .mixins.ndslicing import NDSlicingMixin
from .nddata import NDData
__all__ = ["NDDataRef"]
class NDDataRef(NDArithmeticMixin, NDIOMixin, NDSlicingMixin, NDData):
"""Implements `NDData` with all Mixins.
This class implements a `NDData`-like container that supports reading and
writing as implemented in the ``astropy.io.registry`` and also slicing
(indexing) and simple arithmetic (add, subtract, divide and multiply).
Notes
-----
A key distinction from `NDDataArray` is that this class does not attempt
to provide anything that was not defined in any of the parent classes.
See Also
--------
NDData
NDArithmeticMixin
NDSlicingMixin
NDIOMixin
Examples
--------
The mixins allow operation that are not possible with `NDData` or
`NDDataBase`, i.e. simple arithmetic::
>>> from astropy.nddata import NDDataRef, StdDevUncertainty
>>> import numpy as np
>>> data = np.ones((3,3), dtype=float)
>>> ndd1 = NDDataRef(data, uncertainty=StdDevUncertainty(data))
>>> ndd2 = NDDataRef(data, uncertainty=StdDevUncertainty(data))
>>> ndd3 = ndd1.add(ndd2)
>>> ndd3.data # doctest: +FLOAT_CMP
array([[2., 2., 2.],
[2., 2., 2.],
[2., 2., 2.]])
>>> ndd3.uncertainty.array # doctest: +FLOAT_CMP
array([[1.41421356, 1.41421356, 1.41421356],
[1.41421356, 1.41421356, 1.41421356],
[1.41421356, 1.41421356, 1.41421356]])
see `NDArithmeticMixin` for a complete list of all supported arithmetic
operations.
But also slicing (indexing) is possible::
>>> ndd4 = ndd3[1,:]
>>> ndd4.data # doctest: +FLOAT_CMP
array([2., 2., 2.])
>>> ndd4.uncertainty.array # doctest: +FLOAT_CMP
array([1.41421356, 1.41421356, 1.41421356])
See `NDSlicingMixin` for a description how slicing works (which attributes)
are sliced.
"""
pass
|
e063e8d0fe7d4f8c35bbb3fbed23bdf57e585d6776ea46531f6c8b7761b21a84 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import copy
import numpy as np
import astropy.units as u
from astropy.coordinates import ITRS, CartesianRepresentation, SphericalRepresentation
from astropy.utils import unbroadcast
from .wcs import WCS, WCSSUB_LATITUDE, WCSSUB_LONGITUDE
__doctest_skip__ = ["wcs_to_celestial_frame", "celestial_frame_to_wcs"]
__all__ = [
"obsgeo_to_frame",
"add_stokes_axis_to_wcs",
"celestial_frame_to_wcs",
"wcs_to_celestial_frame",
"proj_plane_pixel_scales",
"proj_plane_pixel_area",
"is_proj_plane_distorted",
"non_celestial_pixel_scales",
"skycoord_to_pixel",
"pixel_to_skycoord",
"custom_wcs_to_frame_mappings",
"custom_frame_to_wcs_mappings",
"pixel_to_pixel",
"local_partial_pixel_derivatives",
"fit_wcs_from_points",
]
def add_stokes_axis_to_wcs(wcs, add_before_ind):
"""
Add a new Stokes axis that is uncorrelated with any other axes.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The WCS to add to
add_before_ind : int
Index of the WCS to insert the new Stokes axis in front of.
To add at the end, do add_before_ind = wcs.wcs.naxis
The beginning is at position 0.
Returns
-------
`~astropy.wcs.WCS`
A new `~astropy.wcs.WCS` instance with an additional axis
"""
inds = [i + 1 for i in range(wcs.wcs.naxis)]
inds.insert(add_before_ind, 0)
newwcs = wcs.sub(inds)
newwcs.wcs.ctype[add_before_ind] = "STOKES"
newwcs.wcs.cname[add_before_ind] = "STOKES"
return newwcs
def _wcs_to_celestial_frame_builtin(wcs):
# Import astropy.coordinates here to avoid circular imports
from astropy.coordinates import (
FK4,
FK5,
ICRS,
ITRS,
FK4NoETerms,
Galactic,
SphericalRepresentation,
)
# Import astropy.time here otherwise setup.py fails before extensions are compiled
from astropy.time import Time
if wcs.wcs.lng == -1 or wcs.wcs.lat == -1:
return None
radesys = wcs.wcs.radesys
if np.isnan(wcs.wcs.equinox):
equinox = None
else:
equinox = wcs.wcs.equinox
xcoord = wcs.wcs.ctype[wcs.wcs.lng][:4]
ycoord = wcs.wcs.ctype[wcs.wcs.lat][:4]
# Apply logic from FITS standard to determine the default radesys
if radesys == "" and xcoord == "RA--" and ycoord == "DEC-":
if equinox is None:
radesys = "ICRS"
elif equinox < 1984.0:
radesys = "FK4"
else:
radesys = "FK5"
if radesys == "FK4":
if equinox is not None:
equinox = Time(equinox, format="byear")
frame = FK4(equinox=equinox)
elif radesys == "FK4-NO-E":
if equinox is not None:
equinox = Time(equinox, format="byear")
frame = FK4NoETerms(equinox=equinox)
elif radesys == "FK5":
if equinox is not None:
equinox = Time(equinox, format="jyear")
frame = FK5(equinox=equinox)
elif radesys == "ICRS":
frame = ICRS()
else:
if xcoord == "GLON" and ycoord == "GLAT":
frame = Galactic()
elif xcoord == "TLON" and ycoord == "TLAT":
# The default representation for ITRS is cartesian, but for WCS
# purposes, we need the spherical representation.
frame = ITRS(
representation_type=SphericalRepresentation,
obstime=wcs.wcs.dateobs or None,
)
else:
frame = None
return frame
def _celestial_frame_to_wcs_builtin(frame, projection="TAN"):
# Import astropy.coordinates here to avoid circular imports
from astropy.coordinates import (
FK4,
FK5,
ICRS,
ITRS,
BaseRADecFrame,
FK4NoETerms,
Galactic,
)
# Create a 2-dimensional WCS
wcs = WCS(naxis=2)
if isinstance(frame, BaseRADecFrame):
xcoord = "RA--"
ycoord = "DEC-"
if isinstance(frame, ICRS):
wcs.wcs.radesys = "ICRS"
elif isinstance(frame, FK4NoETerms):
wcs.wcs.radesys = "FK4-NO-E"
wcs.wcs.equinox = frame.equinox.byear
elif isinstance(frame, FK4):
wcs.wcs.radesys = "FK4"
wcs.wcs.equinox = frame.equinox.byear
elif isinstance(frame, FK5):
wcs.wcs.radesys = "FK5"
wcs.wcs.equinox = frame.equinox.jyear
else:
return None
elif isinstance(frame, Galactic):
xcoord = "GLON"
ycoord = "GLAT"
elif isinstance(frame, ITRS):
xcoord = "TLON"
ycoord = "TLAT"
wcs.wcs.radesys = "ITRS"
wcs.wcs.dateobs = frame.obstime.utc.isot
else:
return None
wcs.wcs.ctype = [xcoord + "-" + projection, ycoord + "-" + projection]
return wcs
WCS_FRAME_MAPPINGS = [[_wcs_to_celestial_frame_builtin]]
FRAME_WCS_MAPPINGS = [[_celestial_frame_to_wcs_builtin]]
class custom_wcs_to_frame_mappings:
def __init__(self, mappings=[]):
if hasattr(mappings, "__call__"):
mappings = [mappings]
WCS_FRAME_MAPPINGS.append(mappings)
def __enter__(self):
pass
def __exit__(self, type, value, tb):
WCS_FRAME_MAPPINGS.pop()
# Backward-compatibility
custom_frame_mappings = custom_wcs_to_frame_mappings
class custom_frame_to_wcs_mappings:
def __init__(self, mappings=[]):
if hasattr(mappings, "__call__"):
mappings = [mappings]
FRAME_WCS_MAPPINGS.append(mappings)
def __enter__(self):
pass
def __exit__(self, type, value, tb):
FRAME_WCS_MAPPINGS.pop()
def wcs_to_celestial_frame(wcs):
"""
For a given WCS, return the coordinate frame that matches the celestial
component of the WCS.
Parameters
----------
wcs : :class:`~astropy.wcs.WCS` instance
The WCS to find the frame for
Returns
-------
frame : :class:`~astropy.coordinates.BaseCoordinateFrame` subclass instance
An instance of a :class:`~astropy.coordinates.BaseCoordinateFrame`
subclass instance that best matches the specified WCS.
Notes
-----
To extend this function to frames not defined in astropy.coordinates, you
can write your own function which should take a :class:`~astropy.wcs.WCS`
instance and should return either an instance of a frame, or `None` if no
matching frame was found. You can register this function temporarily with::
>>> from astropy.wcs.utils import wcs_to_celestial_frame, custom_wcs_to_frame_mappings
>>> with custom_wcs_to_frame_mappings(my_function):
... wcs_to_celestial_frame(...)
"""
for mapping_set in WCS_FRAME_MAPPINGS:
for func in mapping_set:
frame = func(wcs)
if frame is not None:
return frame
raise ValueError(
"Could not determine celestial frame corresponding to the specified WCS object"
)
def celestial_frame_to_wcs(frame, projection="TAN"):
"""
For a given coordinate frame, return the corresponding WCS object.
Note that the returned WCS object has only the elements corresponding to
coordinate frames set (e.g. ctype, equinox, radesys).
Parameters
----------
frame : :class:`~astropy.coordinates.BaseCoordinateFrame` subclass instance
An instance of a :class:`~astropy.coordinates.BaseCoordinateFrame`
subclass instance for which to find the WCS
projection : str
Projection code to use in ctype, if applicable
Returns
-------
wcs : :class:`~astropy.wcs.WCS` instance
The corresponding WCS object
Examples
--------
::
>>> from astropy.wcs.utils import celestial_frame_to_wcs
>>> from astropy.coordinates import FK5
>>> frame = FK5(equinox='J2010')
>>> wcs = celestial_frame_to_wcs(frame)
>>> wcs.to_header()
WCSAXES = 2 / Number of coordinate axes
CRPIX1 = 0.0 / Pixel coordinate of reference point
CRPIX2 = 0.0 / Pixel coordinate of reference point
CDELT1 = 1.0 / [deg] Coordinate increment at reference point
CDELT2 = 1.0 / [deg] Coordinate increment at reference point
CUNIT1 = 'deg' / Units of coordinate increment and value
CUNIT2 = 'deg' / Units of coordinate increment and value
CTYPE1 = 'RA---TAN' / Right ascension, gnomonic projection
CTYPE2 = 'DEC--TAN' / Declination, gnomonic projection
CRVAL1 = 0.0 / [deg] Coordinate value at reference point
CRVAL2 = 0.0 / [deg] Coordinate value at reference point
LONPOLE = 180.0 / [deg] Native longitude of celestial pole
LATPOLE = 0.0 / [deg] Native latitude of celestial pole
RADESYS = 'FK5' / Equatorial coordinate system
EQUINOX = 2010.0 / [yr] Equinox of equatorial coordinates
Notes
-----
To extend this function to frames not defined in astropy.coordinates, you
can write your own function which should take a
:class:`~astropy.coordinates.BaseCoordinateFrame` subclass
instance and a projection (given as a string) and should return either a WCS
instance, or `None` if the WCS could not be determined. You can register
this function temporarily with::
>>> from astropy.wcs.utils import celestial_frame_to_wcs, custom_frame_to_wcs_mappings
>>> with custom_frame_to_wcs_mappings(my_function):
... celestial_frame_to_wcs(...)
"""
for mapping_set in FRAME_WCS_MAPPINGS:
for func in mapping_set:
wcs = func(frame, projection=projection)
if wcs is not None:
return wcs
raise ValueError(
"Could not determine WCS corresponding to the specified coordinate frame."
)
def proj_plane_pixel_scales(wcs):
"""
For a WCS returns pixel scales along each axis of the image pixel at
the ``CRPIX`` location once it is projected onto the
"plane of intermediate world coordinates" as defined in
`Greisen & Calabretta 2002, A&A, 395, 1061 <https://ui.adsabs.harvard.edu/abs/2002A%26A...395.1061G>`_.
.. note::
This function is concerned **only** about the transformation
"image plane"->"projection plane" and **not** about the
transformation "celestial sphere"->"projection plane"->"image plane".
Therefore, this function ignores distortions arising due to
non-linear nature of most projections.
.. note::
In order to compute the scales corresponding to celestial axes only,
make sure that the input `~astropy.wcs.WCS` object contains
celestial axes only, e.g., by passing in the
`~astropy.wcs.WCS.celestial` WCS object.
Parameters
----------
wcs : `~astropy.wcs.WCS`
A world coordinate system object.
Returns
-------
scale : ndarray
A vector (`~numpy.ndarray`) of projection plane increments
corresponding to each pixel side (axis). The units of the returned
results are the same as the units of `~astropy.wcs.Wcsprm.cdelt`,
`~astropy.wcs.Wcsprm.crval`, and `~astropy.wcs.Wcsprm.cd` for
the celestial WCS and can be obtained by inquiring the value
of `~astropy.wcs.Wcsprm.cunit` property of the input
`~astropy.wcs.WCS` WCS object.
See Also
--------
astropy.wcs.utils.proj_plane_pixel_area
"""
return np.sqrt((wcs.pixel_scale_matrix**2).sum(axis=0, dtype=float))
def proj_plane_pixel_area(wcs):
"""
For a **celestial** WCS (see `astropy.wcs.WCS.celestial`) returns pixel
area of the image pixel at the ``CRPIX`` location once it is projected
onto the "plane of intermediate world coordinates" as defined in
`Greisen & Calabretta 2002, A&A, 395, 1061 <https://ui.adsabs.harvard.edu/abs/2002A%26A...395.1061G>`_.
.. note::
This function is concerned **only** about the transformation
"image plane"->"projection plane" and **not** about the
transformation "celestial sphere"->"projection plane"->"image plane".
Therefore, this function ignores distortions arising due to
non-linear nature of most projections.
.. note::
In order to compute the area of pixels corresponding to celestial
axes only, this function uses the `~astropy.wcs.WCS.celestial` WCS
object of the input ``wcs``. This is different from the
`~astropy.wcs.utils.proj_plane_pixel_scales` function
that computes the scales for the axes of the input WCS itself.
Parameters
----------
wcs : `~astropy.wcs.WCS`
A world coordinate system object.
Returns
-------
area : float
Area (in the projection plane) of the pixel at ``CRPIX`` location.
The units of the returned result are the same as the units of
the `~astropy.wcs.Wcsprm.cdelt`, `~astropy.wcs.Wcsprm.crval`,
and `~astropy.wcs.Wcsprm.cd` for the celestial WCS and can be
obtained by inquiring the value of `~astropy.wcs.Wcsprm.cunit`
property of the `~astropy.wcs.WCS.celestial` WCS object.
Raises
------
ValueError
Pixel area is defined only for 2D pixels. Most likely the
`~astropy.wcs.Wcsprm.cd` matrix of the `~astropy.wcs.WCS.celestial`
WCS is not a square matrix of second order.
Notes
-----
Depending on the application, square root of the pixel area can be used to
represent a single pixel scale of an equivalent square pixel
whose area is equal to the area of a generally non-square pixel.
See Also
--------
astropy.wcs.utils.proj_plane_pixel_scales
"""
psm = wcs.celestial.pixel_scale_matrix
if psm.shape != (2, 2):
raise ValueError("Pixel area is defined only for 2D pixels.")
return np.abs(np.linalg.det(psm))
def is_proj_plane_distorted(wcs, maxerr=1.0e-5):
r"""
For a WCS returns `False` if square image (detector) pixels stay square
when projected onto the "plane of intermediate world coordinates"
as defined in
`Greisen & Calabretta 2002, A&A, 395, 1061 <https://ui.adsabs.harvard.edu/abs/2002A%26A...395.1061G>`_.
It will return `True` if transformation from image (detector) coordinates
to the focal plane coordinates is non-orthogonal or if WCS contains
non-linear (e.g., SIP) distortions.
.. note::
Since this function is concerned **only** about the transformation
"image plane"->"focal plane" and **not** about the transformation
"celestial sphere"->"focal plane"->"image plane",
this function ignores distortions arising due to non-linear nature
of most projections.
Let's denote by *C* either the original or the reconstructed
(from ``PC`` and ``CDELT``) CD matrix. `is_proj_plane_distorted`
verifies that the transformation from image (detector) coordinates
to the focal plane coordinates is orthogonal using the following
check:
.. math::
\left \| \frac{C \cdot C^{\mathrm{T}}}
{| det(C)|} - I \right \|_{\mathrm{max}} < \epsilon .
Parameters
----------
wcs : `~astropy.wcs.WCS`
World coordinate system object
maxerr : float, optional
Accuracy to which the CD matrix, **normalized** such
that :math:`|det(CD)|=1`, should be close to being an
orthogonal matrix as described in the above equation
(see :math:`\epsilon`).
Returns
-------
distorted : bool
Returns `True` if focal (projection) plane is distorted and `False`
otherwise.
"""
cwcs = wcs.celestial
return not _is_cd_orthogonal(cwcs.pixel_scale_matrix, maxerr) or _has_distortion(cwcs) # fmt: skip
def _is_cd_orthogonal(cd, maxerr):
shape = cd.shape
if not (len(shape) == 2 and shape[0] == shape[1]):
raise ValueError("CD (or PC) matrix must be a 2D square matrix.")
pixarea = np.abs(np.linalg.det(cd))
if pixarea == 0.0:
raise ValueError("CD (or PC) matrix is singular.")
# NOTE: Technically, below we should use np.dot(cd, np.conjugate(cd.T))
# However, I am not aware of complex CD/PC matrices...
I = np.dot(cd, cd.T) / pixarea
cd_unitary_err = np.amax(np.abs(I - np.eye(shape[0])))
return cd_unitary_err < maxerr
def non_celestial_pixel_scales(inwcs):
"""
Calculate the pixel scale along each axis of a non-celestial WCS,
for example one with mixed spectral and spatial axes.
Parameters
----------
inwcs : `~astropy.wcs.WCS`
The world coordinate system object.
Returns
-------
scale : `numpy.ndarray`
The pixel scale along each axis.
"""
if inwcs.is_celestial:
raise ValueError("WCS is celestial, use celestial_pixel_scales instead")
pccd = inwcs.pixel_scale_matrix
if np.allclose(np.extract(1 - np.eye(*pccd.shape), pccd), 0):
return np.abs(np.diagonal(pccd)) * u.deg
else:
raise ValueError("WCS is rotated, cannot determine consistent pixel scales")
def _has_distortion(wcs):
"""
`True` if contains any SIP or image distortion components.
"""
return any(
getattr(wcs, dist_attr) is not None
for dist_attr in ["cpdis1", "cpdis2", "det2im1", "det2im2", "sip"]
)
# TODO: in future, we should think about how the following two functions can be
# integrated better into the WCS class.
def skycoord_to_pixel(coords, wcs, origin=0, mode="all"):
"""
Convert a set of SkyCoord coordinates into pixels.
Parameters
----------
coords : `~astropy.coordinates.SkyCoord`
The coordinates to convert.
wcs : `~astropy.wcs.WCS`
The WCS transformation to use.
origin : int
Whether to return 0 or 1-based pixel coordinates.
mode : 'all' or 'wcs'
Whether to do the transformation including distortions (``'all'``) or
only including only the core WCS transformation (``'wcs'``).
Returns
-------
xp, yp : `numpy.ndarray`
The pixel coordinates
See Also
--------
astropy.coordinates.SkyCoord.from_pixel
"""
if _has_distortion(wcs) and wcs.naxis != 2:
raise ValueError("Can only handle WCS with distortions for 2-dimensional WCS")
# Keep only the celestial part of the axes, also re-orders lon/lat
wcs = wcs.sub([WCSSUB_LONGITUDE, WCSSUB_LATITUDE])
if wcs.naxis != 2:
raise ValueError("WCS should contain celestial component")
# Check which frame the WCS uses
frame = wcs_to_celestial_frame(wcs)
# Check what unit the WCS needs
xw_unit = u.Unit(wcs.wcs.cunit[0])
yw_unit = u.Unit(wcs.wcs.cunit[1])
# Convert positions to frame
coords = coords.transform_to(frame)
# Extract longitude and latitude. We first try and use lon/lat directly,
# but if the representation is not spherical or unit spherical this will
# fail. We should then force the use of the unit spherical
# representation. We don't do that directly to make sure that we preserve
# custom lon/lat representations if available.
try:
lon = coords.data.lon.to(xw_unit)
lat = coords.data.lat.to(yw_unit)
except AttributeError:
lon = coords.spherical.lon.to(xw_unit)
lat = coords.spherical.lat.to(yw_unit)
# Convert to pixel coordinates
if mode == "all":
xp, yp = wcs.all_world2pix(lon.value, lat.value, origin)
elif mode == "wcs":
xp, yp = wcs.wcs_world2pix(lon.value, lat.value, origin)
else:
raise ValueError("mode should be either 'all' or 'wcs'")
return xp, yp
def pixel_to_skycoord(xp, yp, wcs, origin=0, mode="all", cls=None):
"""
Convert a set of pixel coordinates into a `~astropy.coordinates.SkyCoord`
coordinate.
Parameters
----------
xp, yp : float or ndarray
The coordinates to convert.
wcs : `~astropy.wcs.WCS`
The WCS transformation to use.
origin : int
Whether to return 0 or 1-based pixel coordinates.
mode : 'all' or 'wcs'
Whether to do the transformation including distortions (``'all'``) or
only including only the core WCS transformation (``'wcs'``).
cls : class or None
The class of object to create. Should be a
`~astropy.coordinates.SkyCoord` subclass. If None, defaults to
`~astropy.coordinates.SkyCoord`.
Returns
-------
coords : `~astropy.coordinates.SkyCoord` subclass
The celestial coordinates. Whatever ``cls`` type is.
See Also
--------
astropy.coordinates.SkyCoord.from_pixel
"""
# Import astropy.coordinates here to avoid circular imports
from astropy.coordinates import SkyCoord, UnitSphericalRepresentation
# we have to do this instead of actually setting the default to SkyCoord
# because importing SkyCoord at the module-level leads to circular
# dependencies.
if cls is None:
cls = SkyCoord
if _has_distortion(wcs) and wcs.naxis != 2:
raise ValueError("Can only handle WCS with distortions for 2-dimensional WCS")
# Keep only the celestial part of the axes, also re-orders lon/lat
wcs = wcs.sub([WCSSUB_LONGITUDE, WCSSUB_LATITUDE])
if wcs.naxis != 2:
raise ValueError("WCS should contain celestial component")
# Check which frame the WCS uses
frame = wcs_to_celestial_frame(wcs)
# Check what unit the WCS gives
lon_unit = u.Unit(wcs.wcs.cunit[0])
lat_unit = u.Unit(wcs.wcs.cunit[1])
# Convert pixel coordinates to celestial coordinates
if mode == "all":
lon, lat = wcs.all_pix2world(xp, yp, origin)
elif mode == "wcs":
lon, lat = wcs.wcs_pix2world(xp, yp, origin)
else:
raise ValueError("mode should be either 'all' or 'wcs'")
# Add units to longitude/latitude
lon = lon * lon_unit
lat = lat * lat_unit
# Create a SkyCoord-like object
data = UnitSphericalRepresentation(lon=lon, lat=lat)
coords = cls(frame.realize_frame(data))
return coords
def _unique_with_order_preserved(items):
"""
Return a list of unique items in the list provided, preserving the order
in which they are found.
"""
new_items = []
for item in items:
if item not in new_items:
new_items.append(item)
return new_items
def _pixel_to_world_correlation_matrix(wcs):
"""
Return a correlation matrix between the pixel coordinates and the
high level world coordinates, along with the list of high level world
coordinate classes.
The shape of the matrix is ``(n_world, n_pix)``, where ``n_world`` is the
number of high level world coordinates.
"""
# We basically want to collapse the world dimensions together that are
# combined into the same high-level objects.
# Get the following in advance as getting these properties can be expensive
all_components = wcs.low_level_wcs.world_axis_object_components
all_classes = wcs.low_level_wcs.world_axis_object_classes
axis_correlation_matrix = wcs.low_level_wcs.axis_correlation_matrix
components = _unique_with_order_preserved([c[0] for c in all_components])
matrix = np.zeros((len(components), wcs.pixel_n_dim), dtype=bool)
for iworld in range(wcs.world_n_dim):
iworld_unique = components.index(all_components[iworld][0])
matrix[iworld_unique] |= axis_correlation_matrix[iworld]
classes = [all_classes[component][0] for component in components]
return matrix, classes
def _pixel_to_pixel_correlation_matrix(wcs_in, wcs_out):
"""
Correlation matrix between the input and output pixel coordinates for a
pixel -> world -> pixel transformation specified by two WCS instances.
The first WCS specified is the one used for the pixel -> world
transformation and the second WCS specified is the one used for the world ->
pixel transformation. The shape of the matrix is
``(n_pixel_out, n_pixel_in)``.
"""
matrix1, classes1 = _pixel_to_world_correlation_matrix(wcs_in)
matrix2, classes2 = _pixel_to_world_correlation_matrix(wcs_out)
if len(classes1) != len(classes2):
raise ValueError("The two WCS return a different number of world coordinates")
# Check if classes match uniquely
unique_match = True
mapping = []
for class1 in classes1:
matches = classes2.count(class1)
if matches == 0:
raise ValueError("The world coordinate types of the two WCS do not match")
elif matches > 1:
unique_match = False
break
else:
mapping.append(classes2.index(class1))
if unique_match:
# Classes are unique, so we need to re-order matrix2 along the world
# axis using the mapping we found above.
matrix2 = matrix2[mapping]
elif classes1 != classes2:
raise ValueError(
"World coordinate order doesn't match and automatic matching is ambiguous"
)
matrix = np.matmul(matrix2.T, matrix1)
return matrix
def _split_matrix(matrix):
"""
Given an axis correlation matrix from a WCS object, return information about
the individual WCS that can be split out.
The output is a list of tuples, where each tuple contains a list of
pixel dimensions and a list of world dimensions that can be extracted to
form a new WCS. For example, in the case of a spectral cube with the first
two world coordinates being the celestial coordinates and the third
coordinate being an uncorrelated spectral axis, the matrix would look like::
array([[ True, True, False],
[ True, True, False],
[False, False, True]])
and this function will return ``[([0, 1], [0, 1]), ([2], [2])]``.
"""
pixel_used = []
split_info = []
for ipix in range(matrix.shape[1]):
if ipix in pixel_used:
continue
pixel_include = np.zeros(matrix.shape[1], dtype=bool)
pixel_include[ipix] = True
n_pix_prev, n_pix = 0, 1
while n_pix > n_pix_prev:
world_include = matrix[:, pixel_include].any(axis=1)
pixel_include = matrix[world_include, :].any(axis=0)
n_pix_prev, n_pix = n_pix, np.sum(pixel_include)
pixel_indices = list(np.nonzero(pixel_include)[0])
world_indices = list(np.nonzero(world_include)[0])
pixel_used.extend(pixel_indices)
split_info.append((pixel_indices, world_indices))
return split_info
def pixel_to_pixel(wcs_in, wcs_out, *inputs):
"""
Transform pixel coordinates in a dataset with a WCS to pixel coordinates
in another dataset with a different WCS.
This function is designed to efficiently deal with input pixel arrays that
are broadcasted views of smaller arrays, and is compatible with any
APE14-compliant WCS.
Parameters
----------
wcs_in : `~astropy.wcs.wcsapi.BaseHighLevelWCS`
A WCS object for the original dataset which complies with the
high-level shared APE 14 WCS API.
wcs_out : `~astropy.wcs.wcsapi.BaseHighLevelWCS`
A WCS object for the target dataset which complies with the
high-level shared APE 14 WCS API.
*inputs :
Scalars or arrays giving the pixel coordinates to transform.
"""
# Shortcut for scalars
if np.isscalar(inputs[0]):
world_outputs = wcs_in.pixel_to_world(*inputs)
if not isinstance(world_outputs, (tuple, list)):
world_outputs = (world_outputs,)
return wcs_out.world_to_pixel(*world_outputs)
# Remember original shape
original_shape = inputs[0].shape
matrix = _pixel_to_pixel_correlation_matrix(wcs_in, wcs_out)
split_info = _split_matrix(matrix)
outputs = [None] * wcs_out.pixel_n_dim
for pixel_in_indices, pixel_out_indices in split_info:
pixel_inputs = []
for ipix in range(wcs_in.pixel_n_dim):
if ipix in pixel_in_indices:
pixel_inputs.append(unbroadcast(inputs[ipix]))
else:
pixel_inputs.append(inputs[ipix].flat[0])
pixel_inputs = np.broadcast_arrays(*pixel_inputs)
world_outputs = wcs_in.pixel_to_world(*pixel_inputs)
if not isinstance(world_outputs, (tuple, list)):
world_outputs = (world_outputs,)
pixel_outputs = wcs_out.world_to_pixel(*world_outputs)
if wcs_out.pixel_n_dim == 1:
pixel_outputs = (pixel_outputs,)
for ipix in range(wcs_out.pixel_n_dim):
if ipix in pixel_out_indices:
outputs[ipix] = np.broadcast_to(pixel_outputs[ipix], original_shape)
return outputs[0] if wcs_out.pixel_n_dim == 1 else outputs
def local_partial_pixel_derivatives(wcs, *pixel, normalize_by_world=False):
"""
Return a matrix of shape ``(world_n_dim, pixel_n_dim)`` where each entry
``[i, j]`` is the partial derivative d(world_i)/d(pixel_j) at the requested
pixel position.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The WCS transformation to evaluate the derivatives for.
*pixel : float
The scalar pixel coordinates at which to evaluate the derivatives.
normalize_by_world : bool
If `True`, the matrix is normalized so that for each world entry
the derivatives add up to 1.
"""
# Find the world coordinates at the requested pixel
pixel_ref = np.array(pixel)
world_ref = np.array(wcs.pixel_to_world_values(*pixel_ref))
# Set up the derivative matrix
derivatives = np.zeros((wcs.world_n_dim, wcs.pixel_n_dim))
for i in range(wcs.pixel_n_dim):
pixel_off = pixel_ref.copy()
pixel_off[i] += 1
world_off = np.array(wcs.pixel_to_world_values(*pixel_off))
derivatives[:, i] = world_off - world_ref
if normalize_by_world:
derivatives /= derivatives.sum(axis=0)[:, np.newaxis]
return derivatives
def _linear_wcs_fit(params, lon, lat, x, y, w_obj):
"""
Objective function for fitting linear terms.
Parameters
----------
params : array
6 element array. First 4 elements are PC matrix, last 2 are CRPIX.
lon, lat: array
Sky coordinates.
x, y: array
Pixel coordinates
w_obj: `~astropy.wcs.WCS`
WCS object
"""
cd = params[0:4]
crpix = params[4:6]
w_obj.wcs.cd = ((cd[0], cd[1]), (cd[2], cd[3]))
w_obj.wcs.crpix = crpix
lon2, lat2 = w_obj.wcs_pix2world(x, y, 0)
lat_resids = lat - lat2
lon_resids = lon - lon2
# In case the longitude has wrapped around
lon_resids = np.mod(lon_resids - 180.0, 360.0) - 180.0
resids = np.concatenate((lon_resids * np.cos(np.radians(lat)), lat_resids))
return resids
def _sip_fit(params, lon, lat, u, v, w_obj, order, coeff_names):
"""Objective function for fitting SIP.
Parameters
----------
params : array
Fittable parameters. First 4 elements are PC matrix, last 2 are CRPIX.
lon, lat: array
Sky coordinates.
u, v: array
Pixel coordinates
w_obj: `~astropy.wcs.WCS`
WCS object
"""
from astropy.modeling.models import SIP # here to avoid circular import
# unpack params
crpix = params[0:2]
cdx = params[2:6].reshape((2, 2))
a_params = params[6 : 6 + len(coeff_names)]
b_params = params[6 + len(coeff_names) :]
# assign to wcs, used for transformations in this function
w_obj.wcs.cd = cdx
w_obj.wcs.crpix = crpix
a_coeff, b_coeff = {}, {}
for i in range(len(coeff_names)):
a_coeff["A_" + coeff_names[i]] = a_params[i]
b_coeff["B_" + coeff_names[i]] = b_params[i]
sip = SIP(
crpix=crpix, a_order=order, b_order=order, a_coeff=a_coeff, b_coeff=b_coeff
)
fuv, guv = sip(u, v)
xo, yo = np.dot(cdx, np.array([u + fuv - crpix[0], v + guv - crpix[1]]))
# use all pix2world in case `projection` contains distortion table
x, y = w_obj.all_world2pix(lon, lat, 0)
x, y = np.dot(w_obj.wcs.cd, (x - w_obj.wcs.crpix[0], y - w_obj.wcs.crpix[1]))
resids = np.concatenate((x - xo, y - yo))
return resids
def fit_wcs_from_points(
xy, world_coords, proj_point="center", projection="TAN", sip_degree=None
):
"""
Given two matching sets of coordinates on detector and sky,
compute the WCS.
Fits a WCS object to matched set of input detector and sky coordinates.
Optionally, a SIP can be fit to account for geometric
distortion. Returns an `~astropy.wcs.WCS` object with the best fit
parameters for mapping between input pixel and sky coordinates.
The projection type (default 'TAN') can passed in as a string, one of
the valid three-letter projection codes - or as a WCS object with
projection keywords already set. Note that if an input WCS has any
non-polynomial distortion, this will be applied and reflected in the
fit terms and coefficients. Passing in a WCS object in this way essentially
allows it to be refit based on the matched input coordinates and projection
point, but take care when using this option as non-projection related
keywords in the input might cause unexpected behavior.
Notes
-----
- The fiducial point for the spherical projection can be set to 'center'
to use the mean position of input sky coordinates, or as an
`~astropy.coordinates.SkyCoord` object.
- Units in all output WCS objects will always be in degrees.
- If the coordinate frame differs between `~astropy.coordinates.SkyCoord`
objects passed in for ``world_coords`` and ``proj_point``, the frame for
``world_coords`` will override as the frame for the output WCS.
- If a WCS object is passed in to ``projection`` the CD/PC matrix will
be used as an initial guess for the fit. If this is known to be
significantly off and may throw off the fit, set to the identity matrix
(for example, by doing wcs.wcs.pc = [(1., 0.,), (0., 1.)])
Parameters
----------
xy : (`numpy.ndarray`, `numpy.ndarray`) tuple
x & y pixel coordinates.
world_coords : `~astropy.coordinates.SkyCoord`
Skycoord object with world coordinates.
proj_point : 'center' or ~astropy.coordinates.SkyCoord`
Defaults to 'center', in which the geometric center of input world
coordinates will be used as the projection point. To specify an exact
point for the projection, a Skycoord object with a coordinate pair can
be passed in. For consistency, the units and frame of these coordinates
will be transformed to match ``world_coords`` if they don't.
projection : str or `~astropy.wcs.WCS`
Three letter projection code, of any of standard projections defined
in the FITS WCS standard. Optionally, a WCS object with projection
keywords set may be passed in.
sip_degree : None or int
If set to a non-zero integer value, will fit SIP of degree
``sip_degree`` to model geometric distortion. Defaults to None, meaning
no distortion corrections will be fit.
Returns
-------
wcs : `~astropy.wcs.WCS`
The best-fit WCS to the points given.
"""
from scipy.optimize import least_squares
import astropy.units as u
from astropy.coordinates import SkyCoord # here to avoid circular import
from .wcs import Sip
xp, yp = xy
try:
lon, lat = world_coords.data.lon.deg, world_coords.data.lat.deg
except AttributeError:
unit_sph = world_coords.unit_spherical
lon, lat = unit_sph.lon.deg, unit_sph.lat.deg
# verify input
if (type(proj_point) != type(world_coords)) and (proj_point != "center"):
raise ValueError(
"proj_point must be set to 'center', or an"
+ "`~astropy.coordinates.SkyCoord` object with "
+ "a pair of points."
)
use_center_as_proj_point = str(proj_point) == "center"
if not use_center_as_proj_point:
assert proj_point.size == 1
proj_codes = [
"AZP",
"SZP",
"TAN",
"STG",
"SIN",
"ARC",
"ZEA",
"AIR",
"CYP",
"CEA",
"CAR",
"MER",
"SFL",
"PAR",
"MOL",
"AIT",
"COP",
"COE",
"COD",
"COO",
"BON",
"PCO",
"TSC",
"CSC",
"QSC",
"HPX",
"XPH",
]
if type(projection) == str:
if projection not in proj_codes:
raise ValueError(
"Must specify valid projection code from list of "
+ "supported types: ",
", ".join(proj_codes),
)
# empty wcs to fill in with fit values
wcs = celestial_frame_to_wcs(frame=world_coords.frame, projection=projection)
else: # if projection is not string, should be wcs object. use as template.
wcs = copy.deepcopy(projection)
wcs.cdelt = (1.0, 1.0) # make sure cdelt is 1
wcs.sip = None
# Change PC to CD, since cdelt will be set to 1
if wcs.wcs.has_pc():
wcs.wcs.cd = wcs.wcs.pc
wcs.wcs.__delattr__("pc")
if (type(sip_degree) != type(None)) and (type(sip_degree) != int):
raise ValueError("sip_degree must be None, or integer.")
# compute bounding box for sources in image coordinates:
xpmin, xpmax, ypmin, ypmax = xp.min(), xp.max(), yp.min(), yp.max()
# set pixel_shape to span of input points
wcs.pixel_shape = (
1 if xpmax <= 0.0 else int(np.ceil(xpmax)),
1 if ypmax <= 0.0 else int(np.ceil(ypmax)),
)
# determine CRVAL from input
close = lambda l, p: p[np.argmin(np.abs(l))]
if use_center_as_proj_point: # use center of input points
sc1 = SkyCoord(lon.min() * u.deg, lat.max() * u.deg)
sc2 = SkyCoord(lon.max() * u.deg, lat.min() * u.deg)
pa = sc1.position_angle(sc2)
sep = sc1.separation(sc2)
midpoint_sc = sc1.directional_offset_by(pa, sep / 2)
wcs.wcs.crval = (midpoint_sc.data.lon.deg, midpoint_sc.data.lat.deg)
wcs.wcs.crpix = ((xpmax + xpmin) / 2.0, (ypmax + ypmin) / 2.0)
else: # convert units, initial guess for crpix
proj_point.transform_to(world_coords)
wcs.wcs.crval = (proj_point.data.lon.deg, proj_point.data.lat.deg)
wcs.wcs.crpix = (
close(lon - wcs.wcs.crval[0], xp + 1),
close(lon - wcs.wcs.crval[1], yp + 1),
)
# fit linear terms, assign to wcs
# use (1, 0, 0, 1) as initial guess, in case input wcs was passed in
# and cd terms are way off.
# Use bounds to require that the fit center pixel is on the input image
if xpmin == xpmax:
xpmin, xpmax = xpmin - 0.5, xpmax + 0.5
if ypmin == ypmax:
ypmin, ypmax = ypmin - 0.5, ypmax + 0.5
p0 = np.concatenate([wcs.wcs.cd.flatten(), wcs.wcs.crpix.flatten()])
fit = least_squares(
_linear_wcs_fit,
p0,
args=(lon, lat, xp, yp, wcs),
bounds=[
[-np.inf, -np.inf, -np.inf, -np.inf, xpmin + 1, ypmin + 1],
[np.inf, np.inf, np.inf, np.inf, xpmax + 1, ypmax + 1],
],
)
wcs.wcs.crpix = np.array(fit.x[4:6])
wcs.wcs.cd = np.array(fit.x[0:4].reshape((2, 2)))
# fit SIP, if specified. Only fit forward coefficients
if sip_degree:
degree = sip_degree
if "-SIP" not in wcs.wcs.ctype[0]:
wcs.wcs.ctype = [x + "-SIP" for x in wcs.wcs.ctype]
coef_names = [
f"{i}_{j}"
for i in range(degree + 1)
for j in range(degree + 1)
if (i + j) < (degree + 1) and (i + j) > 1
]
p0 = np.concatenate(
(
np.array(wcs.wcs.crpix),
wcs.wcs.cd.flatten(),
np.zeros(2 * len(coef_names)),
)
)
fit = least_squares(
_sip_fit,
p0,
args=(lon, lat, xp, yp, wcs, degree, coef_names),
bounds=[
[xpmin + 1, ypmin + 1] + [-np.inf] * (4 + 2 * len(coef_names)),
[xpmax + 1, ypmax + 1] + [np.inf] * (4 + 2 * len(coef_names)),
],
)
coef_fit = (
list(fit.x[6 : 6 + len(coef_names)]),
list(fit.x[6 + len(coef_names) :]),
)
# put fit values in wcs
wcs.wcs.cd = fit.x[2:6].reshape((2, 2))
wcs.wcs.crpix = fit.x[0:2]
a_vals = np.zeros((degree + 1, degree + 1))
b_vals = np.zeros((degree + 1, degree + 1))
for coef_name in coef_names:
a_vals[int(coef_name[0])][int(coef_name[2])] = coef_fit[0].pop(0)
b_vals[int(coef_name[0])][int(coef_name[2])] = coef_fit[1].pop(0)
wcs.sip = Sip(
a_vals,
b_vals,
np.zeros((degree + 1, degree + 1)),
np.zeros((degree + 1, degree + 1)),
wcs.wcs.crpix,
)
return wcs
def obsgeo_to_frame(obsgeo, obstime):
"""
Convert a WCS obsgeo property into an ITRS coordinate frame.
Parameters
----------
obsgeo : array-like
A shape ``(6, )`` array representing ``OBSGEO-[XYZ], OBSGEO-[BLH]`` as
returned by ``WCS.wcs.obsgeo``.
obstime : time-like
The time associated with the coordinate, will be passed to
`~astropy.coordinates.ITRS` as the obstime keyword.
Returns
-------
~astropy.coordinates.ITRS
An `~astropy.coordinates.ITRS` coordinate frame
representing the coordinates.
Notes
-----
The obsgeo array as accessed on a `.WCS` object is a length 6 numpy array
where the first three elements are the coordinate in a cartesian
representation and the second 3 are the coordinate in a spherical
representation.
This function priorities reading the cartesian coordinates, and will only
read the spherical coordinates if the cartesian coordinates are either all
zero or any of the cartesian coordinates are non-finite.
In the case where both the spherical and cartesian coordinates have some
non-finite values the spherical coordinates will be returned with the
non-finite values included.
"""
if (
obsgeo is None
or len(obsgeo) != 6
or np.all(np.array(obsgeo) == 0)
or np.all(~np.isfinite(obsgeo))
):
raise ValueError(
f"Can not parse the 'obsgeo' location ({obsgeo}). "
"obsgeo should be a length 6 non-zero, finite numpy array"
)
# If the cartesian coords are zero or have NaNs in them use the spherical ones
if np.all(obsgeo[:3] == 0) or np.any(~np.isfinite(obsgeo[:3])):
data = SphericalRepresentation(*(obsgeo[3:] * (u.deg, u.deg, u.m)))
# Otherwise we assume the cartesian ones are valid
else:
data = CartesianRepresentation(*obsgeo[:3] * u.m)
return ITRS(data, obstime=obstime)
|
3a25d35499a5da1c1b98a4685d25f93b6498aef26403e3ac77583d55582f5901 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# Under the hood, there are 3 separate classes that perform different
# parts of the transformation:
#
# - `~astropy.wcs.Wcsprm`: Is a direct wrapper of the core WCS
# functionality in `wcslib`_. (This includes TPV and TPD
# polynomial distortion, but not SIP distortion).
#
# - `~astropy.wcs.Sip`: Handles polynomial distortion as defined in the
# `SIP`_ convention.
#
# - `~astropy.wcs.DistortionLookupTable`: Handles `distortion paper`_
# lookup tables.
#
# Additionally, the class `WCS` aggregates all of these transformations
# together in a pipeline:
#
# - Detector to image plane correction (by a pair of
# `~astropy.wcs.DistortionLookupTable` objects).
#
# - `SIP`_ distortion correction (by an underlying `~astropy.wcs.Sip`
# object)
#
# - `distortion paper`_ table-lookup correction (by a pair of
# `~astropy.wcs.DistortionLookupTable` objects).
#
# - `wcslib`_ WCS transformation (by a `~astropy.wcs.Wcsprm` object)
# STDLIB
import builtins
import copy
import io
import itertools
import os
import re
import textwrap
import uuid
import warnings
# THIRD-PARTY
import numpy as np
from packaging.version import Version
# LOCAL
from astropy import log
from astropy import units as u
from astropy.io import fits
from astropy.utils.decorators import deprecated_renamed_argument
from astropy.utils.exceptions import (
AstropyDeprecationWarning,
AstropyUserWarning,
AstropyWarning,
)
from . import _wcs, docstrings
# Mix-in class that provides the APE 14 API
from .wcsapi.fitswcs import FITSWCSAPIMixin, SlicedFITSWCS
__all__ = [
"FITSFixedWarning",
"WCS",
"find_all_wcs",
"DistortionLookupTable",
"Sip",
"Tabprm",
"Wcsprm",
"Auxprm",
"Celprm",
"Prjprm",
"Wtbarr",
"WCSBase",
"validate",
"WcsError",
"SingularMatrixError",
"InconsistentAxisTypesError",
"InvalidTransformError",
"InvalidCoordinateError",
"InvalidPrjParametersError",
"NoSolutionError",
"InvalidSubimageSpecificationError",
"NoConvergence",
"NonseparableSubimageCoordinateSystemError",
"NoWcsKeywordsFoundError",
"InvalidTabularParametersError",
]
__doctest_skip__ = ["WCS.all_world2pix"]
if _wcs is not None:
if Version(_wcs.__version__) < Version("5.8"):
raise ImportError(
"astropy.wcs is built with wcslib {0}, but only versions 5.8 and "
"later on the 5.x series are known to work. The version of wcslib "
"that ships with astropy may be used."
)
if not _wcs._sanity_check():
raise RuntimeError(
"astropy.wcs did not pass its sanity check for your build on your platform."
)
_WCSSUB_TIME_SUPPORT = Version(_wcs.__version__) >= Version("7.8")
_WCS_TPD_WARN_LT71 = Version(_wcs.__version__) < Version("7.1")
_WCS_TPD_WARN_LT74 = Version(_wcs.__version__) < Version("7.4")
WCSBase = _wcs._Wcs
DistortionLookupTable = _wcs.DistortionLookupTable
Sip = _wcs.Sip
Wcsprm = _wcs.Wcsprm
Auxprm = _wcs.Auxprm
Celprm = _wcs.Celprm
Prjprm = _wcs.Prjprm
Tabprm = _wcs.Tabprm
Wtbarr = _wcs.Wtbarr
WcsError = _wcs.WcsError
SingularMatrixError = _wcs.SingularMatrixError
InconsistentAxisTypesError = _wcs.InconsistentAxisTypesError
InvalidTransformError = _wcs.InvalidTransformError
InvalidCoordinateError = _wcs.InvalidCoordinateError
NoSolutionError = _wcs.NoSolutionError
InvalidSubimageSpecificationError = _wcs.InvalidSubimageSpecificationError
NonseparableSubimageCoordinateSystemError = (
_wcs.NonseparableSubimageCoordinateSystemError
)
NoWcsKeywordsFoundError = _wcs.NoWcsKeywordsFoundError
InvalidTabularParametersError = _wcs.InvalidTabularParametersError
InvalidPrjParametersError = _wcs.InvalidPrjParametersError
# Copy all the constants from the C extension into this module's namespace
for key, val in _wcs.__dict__.items():
if key.startswith(("WCSSUB_", "WCSHDR_", "WCSHDO_", "WCSCOMPARE_", "PRJ_")):
locals()[key] = val
__all__.append(key)
# Set coordinate extraction callback for WCS -TAB:
def _load_tab_bintable(hdulist, extnam, extver, extlev, kind, ttype, row, ndim):
arr = hdulist[(extnam, extver)].data[ttype][row - 1]
if arr.ndim != ndim:
if kind == "c" and ndim == 2:
arr = arr.reshape((arr.size, 1))
else:
raise ValueError("Bad TDIM")
return np.ascontiguousarray(arr, dtype=np.double)
_wcs.set_wtbarr_fitsio_callback(_load_tab_bintable)
else:
WCSBase = object
Wcsprm = object
DistortionLookupTable = object
Sip = object
Tabprm = object
Wtbarr = object
WcsError = None
SingularMatrixError = None
InconsistentAxisTypesError = None
InvalidTransformError = None
InvalidCoordinateError = None
NoSolutionError = None
InvalidSubimageSpecificationError = None
NonseparableSubimageCoordinateSystemError = None
NoWcsKeywordsFoundError = None
InvalidTabularParametersError = None
_WCSSUB_TIME_SUPPORT = False
_WCS_TPD_WARN_LT71 = False
_WCS_TPD_WARN_LT74 = False
# Additional relax bit flags
WCSHDO_SIP = 0x80000
# Regular expression defining SIP keyword It matches keyword that starts with A
# or B, optionally followed by P, followed by an underscore then a number in
# range of 0-19, followed by an underscore and another number in range of 0-19.
# Keyword optionally ends with a capital letter.
SIP_KW = re.compile("""^[AB]P?_1?[0-9]_1?[0-9][A-Z]?$""")
def _parse_keysel(keysel):
keysel_flags = 0
if keysel is not None:
for element in keysel:
if element.lower() == "image":
keysel_flags |= _wcs.WCSHDR_IMGHEAD
elif element.lower() == "binary":
keysel_flags |= _wcs.WCSHDR_BIMGARR
elif element.lower() == "pixel":
keysel_flags |= _wcs.WCSHDR_PIXLIST
else:
raise ValueError(
"keysel must be a list of 'image', 'binary' and/or 'pixel'"
)
else:
keysel_flags = -1
return keysel_flags
class NoConvergence(Exception):
"""
An error class used to report non-convergence and/or divergence
of numerical methods. It is used to report errors in the
iterative solution used by
the :py:meth:`~astropy.wcs.WCS.all_world2pix`.
Attributes
----------
best_solution : `numpy.ndarray`
Best solution achieved by the numerical method.
accuracy : `numpy.ndarray`
Accuracy of the ``best_solution``.
niter : `int`
Number of iterations performed by the numerical method
to compute ``best_solution``.
divergent : None, `numpy.ndarray`
Indices of the points in ``best_solution`` array
for which the solution appears to be divergent. If the
solution does not diverge, ``divergent`` will be set to `None`.
slow_conv : None, `numpy.ndarray`
Indices of the solutions in ``best_solution`` array
for which the solution failed to converge within the
specified maximum number of iterations. If there are no
non-converging solutions (i.e., if the required accuracy
has been achieved for all input data points)
then ``slow_conv`` will be set to `None`.
"""
def __init__(
self,
*args,
best_solution=None,
accuracy=None,
niter=None,
divergent=None,
slow_conv=None,
**kwargs,
):
super().__init__(*args)
self.best_solution = best_solution
self.accuracy = accuracy
self.niter = niter
self.divergent = divergent
self.slow_conv = slow_conv
if kwargs:
warnings.warn(
f"Function received unexpected arguments ({list(kwargs)}) these "
"are ignored but will raise an Exception in the "
"future.",
AstropyDeprecationWarning,
)
class FITSFixedWarning(AstropyWarning):
"""
The warning raised when the contents of the FITS header have been
modified to be standards compliant.
"""
pass
class WCS(FITSWCSAPIMixin, WCSBase):
"""WCS objects perform standard WCS transformations, and correct for
`SIP`_ and `distortion paper`_ table-lookup transformations, based
on the WCS keywords and supplementary data read from a FITS file.
See also: https://docs.astropy.org/en/stable/wcs/
Parameters
----------
header : `~astropy.io.fits.Header`, `~astropy.io.fits.hdu.image.PrimaryHDU`, `~astropy.io.fits.hdu.image.ImageHDU`, str, dict-like, or None, optional
If *header* is not provided or None, the object will be
initialized to default values.
fobj : `~astropy.io.fits.HDUList`, optional
It is needed when header keywords point to a `distortion
paper`_ lookup table stored in a different extension.
key : str, optional
The name of a particular WCS transform to use. This may be
either ``' '`` or ``'A'``-``'Z'`` and corresponds to the
``\"a\"`` part of the ``CTYPEia`` cards. *key* may only be
provided if *header* is also provided.
minerr : float, optional
The minimum value a distortion correction must have in order
to be applied. If the value of ``CQERRja`` is smaller than
*minerr*, the corresponding distortion is not applied.
relax : bool or int, optional
Degree of permissiveness:
- `True` (default): Admit all recognized informal extensions
of the WCS standard.
- `False`: Recognize only FITS keywords defined by the
published WCS standard.
- `int`: a bit field selecting specific extensions to accept.
See :ref:`astropy:relaxread` for details.
naxis : int or sequence, optional
Extracts specific coordinate axes using
:meth:`~astropy.wcs.Wcsprm.sub`. If a header is provided, and
*naxis* is not ``None``, *naxis* will be passed to
:meth:`~astropy.wcs.Wcsprm.sub` in order to select specific
axes from the header. See :meth:`~astropy.wcs.Wcsprm.sub` for
more details about this parameter.
keysel : sequence of str, optional
A sequence of flags used to select the keyword types
considered by wcslib. When ``None``, only the standard image
header keywords are considered (and the underlying wcspih() C
function is called). To use binary table image array or pixel
list keywords, *keysel* must be set.
Each element in the list should be one of the following
strings:
- 'image': Image header keywords
- 'binary': Binary table image array keywords
- 'pixel': Pixel list keywords
Keywords such as ``EQUIna`` or ``RFRQna`` that are common to
binary table image arrays and pixel lists (including
``WCSNna`` and ``TWCSna``) are selected by both 'binary' and
'pixel'.
colsel : sequence of int, optional
A sequence of table column numbers used to restrict the WCS
transformations considered to only those pertaining to the
specified columns. If `None`, there is no restriction.
fix : bool, optional
When `True` (default), call `~astropy.wcs.Wcsprm.fix` on
the resulting object to fix any non-standard uses in the
header. `FITSFixedWarning` Warnings will be emitted if any
changes were made.
translate_units : str, optional
Specify which potentially unsafe translations of non-standard
unit strings to perform. By default, performs none. See
`WCS.fix` for more information about this parameter. Only
effective when ``fix`` is `True`.
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid key.
KeyError
Key not found in FITS header.
ValueError
Lookup table distortion present in the header but *fobj* was
not provided.
Notes
-----
1. astropy.wcs supports arbitrary *n* dimensions for the core WCS
(the transformations handled by WCSLIB). However, the
`distortion paper`_ lookup table and `SIP`_ distortions must be
two dimensional. Therefore, if you try to create a WCS object
where the core WCS has a different number of dimensions than 2
and that object also contains a `distortion paper`_ lookup
table or `SIP`_ distortion, a `ValueError`
exception will be raised. To avoid this, consider using the
*naxis* kwarg to select two dimensions from the core WCS.
2. The number of coordinate axes in the transformation is not
determined directly from the ``NAXIS`` keyword but instead from
the highest of:
- ``NAXIS`` keyword
- ``WCSAXESa`` keyword
- The highest axis number in any parameterized WCS keyword.
The keyvalue, as well as the keyword, must be
syntactically valid otherwise it will not be considered.
If none of these keyword types is present, i.e. if the header
only contains auxiliary WCS keywords for a particular
coordinate representation, then no coordinate description is
constructed for it.
The number of axes, which is set as the ``naxis`` member, may
differ for different coordinate representations of the same
image.
3. When the header includes duplicate keywords, in most cases the
last encountered is used.
4. `~astropy.wcs.Wcsprm.set` is called immediately after
construction, so any invalid keywords or transformations will
be raised by the constructor, not when subsequently calling a
transformation method.
"""
def __init__(
self,
header=None,
fobj=None,
key=" ",
minerr=0.0,
relax=True,
naxis=None,
keysel=None,
colsel=None,
fix=True,
translate_units="",
_do_set=True,
):
close_fds = []
# these parameters are stored to be used when unpickling a WCS object:
self._init_kwargs = {
"keysel": copy.copy(keysel),
"colsel": copy.copy(colsel),
}
if header is None:
if naxis is None:
naxis = 2
wcsprm = _wcs.Wcsprm(header=None, key=key, relax=relax, naxis=naxis)
self.naxis = wcsprm.naxis
# Set some reasonable defaults.
det2im = (None, None)
cpdis = (None, None)
sip = None
else:
keysel_flags = _parse_keysel(keysel)
if isinstance(header, (str, bytes)):
try:
is_path = os.path.exists(header)
except (OSError, ValueError):
is_path = False
if is_path:
if fobj is not None:
raise ValueError(
"Can not provide both a FITS filename to "
"argument 1 and a FITS file object to argument 2"
)
fobj = fits.open(header)
close_fds.append(fobj)
header = fobj[0].header
elif isinstance(header, fits.hdu.image._ImageBaseHDU):
header = header.header
elif not isinstance(header, fits.Header):
try:
# Accept any dict-like object
orig_header = header
header = fits.Header()
for dict_key in orig_header.keys():
header[dict_key] = orig_header[dict_key]
except TypeError:
raise TypeError(
"header must be a string, an astropy.io.fits.Header "
"object, or a dict-like object"
)
if isinstance(header, fits.Header):
header_string = header.tostring().rstrip()
else:
header_string = header
# Importantly, header is a *copy* of the passed-in header
# because we will be modifying it
if isinstance(header_string, str):
header_bytes = header_string.encode("ascii")
header_string = header_string
else:
header_bytes = header_string
header_string = header_string.decode("ascii")
if not (fobj is None or isinstance(fobj, fits.HDUList)):
raise AssertionError(
"'fobj' must be either None or an astropy.io.fits.HDUList object."
)
est_naxis = 2
try:
tmp_header = fits.Header.fromstring(header_string)
self._remove_sip_kw(tmp_header)
tmp_header_bytes = tmp_header.tostring().rstrip()
if isinstance(tmp_header_bytes, str):
tmp_header_bytes = tmp_header_bytes.encode("ascii")
tmp_wcsprm = _wcs.Wcsprm(
header=tmp_header_bytes,
key=key,
relax=relax,
keysel=keysel_flags,
colsel=colsel,
warnings=False,
hdulist=fobj,
)
if naxis is not None:
try:
tmp_wcsprm = tmp_wcsprm.sub(naxis)
except ValueError:
pass
est_naxis = tmp_wcsprm.naxis if tmp_wcsprm.naxis else 2
except _wcs.NoWcsKeywordsFoundError:
pass
self.naxis = est_naxis
header = fits.Header.fromstring(header_string)
det2im = self._read_det2im_kw(header, fobj, err=minerr)
cpdis = self._read_distortion_kw(header, fobj, dist="CPDIS", err=minerr)
self._fix_pre2012_scamp_tpv(header)
sip = self._read_sip_kw(header, wcskey=key)
self._remove_sip_kw(header)
header_string = header.tostring()
header_string = header_string.replace("END" + " " * 77, "")
if isinstance(header_string, str):
header_bytes = header_string.encode("ascii")
header_string = header_string
else:
header_bytes = header_string
header_string = header_string.decode("ascii")
try:
wcsprm = _wcs.Wcsprm(
header=header_bytes,
key=key,
relax=relax,
keysel=keysel_flags,
colsel=colsel,
hdulist=fobj,
)
except _wcs.NoWcsKeywordsFoundError:
# The header may have SIP or distortions, but no core
# WCS. That isn't an error -- we want a "default"
# (identity) core Wcs transformation in that case.
if colsel is None:
wcsprm = _wcs.Wcsprm(
header=None,
key=key,
relax=relax,
keysel=keysel_flags,
colsel=colsel,
hdulist=fobj,
)
else:
raise
if naxis is not None:
wcsprm = wcsprm.sub(naxis)
self.naxis = wcsprm.naxis
if wcsprm.naxis != 2 and (
det2im[0] or det2im[1] or cpdis[0] or cpdis[1] or sip
):
raise ValueError(
f"""
FITS WCS distortion paper lookup tables and SIP distortions only work
in 2 dimensions. However, WCSLIB has detected {wcsprm.naxis} dimensions in the
core WCS keywords. To use core WCS in conjunction with FITS WCS
distortion paper lookup tables or SIP distortion, you must select or
reduce these to 2 dimensions using the naxis kwarg.
"""
)
header_naxis = header.get("NAXIS", None)
if header_naxis is not None and header_naxis < wcsprm.naxis:
warnings.warn(
f"The WCS transformation has more axes ({wcsprm.naxis:d}) than the "
f"image it is associated with ({header_naxis:d})",
FITSFixedWarning,
)
self._get_naxis(header)
WCSBase.__init__(self, sip, cpdis, wcsprm, det2im)
if fix:
if header is None:
with warnings.catch_warnings():
warnings.simplefilter("ignore", FITSFixedWarning)
self.fix(translate_units=translate_units)
else:
self.fix(translate_units=translate_units)
if _do_set:
self.wcs.set()
for fd in close_fds:
fd.close()
self._pixel_bounds = None
def __copy__(self):
new_copy = self.__class__()
WCSBase.__init__(
new_copy,
self.sip,
(self.cpdis1, self.cpdis2),
self.wcs,
(self.det2im1, self.det2im2),
)
new_copy.__dict__.update(self.__dict__)
return new_copy
def __deepcopy__(self, memo):
from copy import deepcopy
new_copy = self.__class__()
new_copy.naxis = deepcopy(self.naxis, memo)
WCSBase.__init__(
new_copy,
deepcopy(self.sip, memo),
(deepcopy(self.cpdis1, memo), deepcopy(self.cpdis2, memo)),
deepcopy(self.wcs, memo),
(deepcopy(self.det2im1, memo), deepcopy(self.det2im2, memo)),
)
for key, val in self.__dict__.items():
new_copy.__dict__[key] = deepcopy(val, memo)
return new_copy
def copy(self):
"""
Return a shallow copy of the object.
Convenience method so user doesn't have to import the
:mod:`copy` stdlib module.
.. warning::
Use `deepcopy` instead of `copy` unless you know why you need a
shallow copy.
"""
return copy.copy(self)
def deepcopy(self):
"""
Return a deep copy of the object.
Convenience method so user doesn't have to import the
:mod:`copy` stdlib module.
"""
return copy.deepcopy(self)
def sub(self, axes=None):
copy = self.deepcopy()
# We need to know which axes have been dropped, but there is no easy
# way to do this with the .sub function, so instead we assign UUIDs to
# the CNAME parameters in copy.wcs. We can later access the original
# CNAME properties from self.wcs.
cname_uuid = [str(uuid.uuid4()) for i in range(copy.wcs.naxis)]
copy.wcs.cname = cname_uuid
# Subset the WCS
copy.wcs = copy.wcs.sub(axes)
copy.naxis = copy.wcs.naxis
# Construct a list of dimensions from the original WCS in the order
# in which they appear in the final WCS.
keep = [
cname_uuid.index(cname) if cname in cname_uuid else None
for cname in copy.wcs.cname
]
# Restore the original CNAMEs
copy.wcs.cname = ["" if i is None else self.wcs.cname[i] for i in keep]
# Subset pixel_shape and pixel_bounds
if self.pixel_shape:
copy.pixel_shape = tuple(
None if i is None else self.pixel_shape[i] for i in keep
)
if self.pixel_bounds:
copy.pixel_bounds = [
None if i is None else self.pixel_bounds[i] for i in keep
]
return copy
if _wcs is not None:
sub.__doc__ = _wcs.Wcsprm.sub.__doc__
def _fix_scamp(self):
"""
Remove SCAMP's PVi_m distortion parameters if SIP distortion parameters
are also present. Some projects (e.g., Palomar Transient Factory)
convert SCAMP's distortion parameters (which abuse the PVi_m cards) to
SIP. However, wcslib gets confused by the presence of both SCAMP and
SIP distortion parameters.
See https://github.com/astropy/astropy/issues/299.
SCAMP uses TAN projection exclusively. The case of CTYPE ending
in -TAN should have been handled by ``_fix_pre2012_scamp_tpv()`` before
calling this function.
"""
if self.wcs is None:
return
# Delete SIP if CTYPE explicitly has '-TPV' code:
ctype = [ct.strip().upper() for ct in self.wcs.ctype]
if sum(ct.endswith("-TPV") for ct in ctype) == 2:
if self.sip is not None:
self.sip = None
warnings.warn(
"Removed redundant SIP distortion parameters "
+ "because CTYPE explicitly specifies TPV distortions",
FITSFixedWarning,
)
return
# Nothing to be done if no PV parameters attached since SCAMP
# encodes distortion coefficients using PV keywords
pv = self.wcs.get_pv()
if not pv:
return
# Nothing to be done if axes don't use SIP distortion parameters
if self.sip is None:
return
# Loop over distinct values of `i' index
has_scamp = False
for i in {v[0] for v in pv}:
# Get all values of `j' index for this value of `i' index
js = tuple(v[1] for v in pv if v[0] == i)
if "-TAN" in self.wcs.ctype[i - 1].upper() and js and max(js) >= 5:
# TAN projection *may* use PVi_j with j up to 4 - see
# Sections 2.5, 2.6, and Table 13
# in https://doi.org/10.1051/0004-6361:20021327
has_scamp = True
break
if has_scamp and all(ct.endswith("-SIP") for ct in ctype):
# Prefer SIP - see recommendations in Section 7 in
# http://web.ipac.caltech.edu/staff/shupe/reprints/SIP_to_PV_SPIE2012.pdf
self.wcs.set_pv([])
warnings.warn(
"Removed redundant SCAMP distortion parameters "
+ "because SIP parameters are also present",
FITSFixedWarning,
)
return
def fix(self, translate_units="", naxis=None):
"""
Perform the fix operations from wcslib, and warn about any
changes it has made.
Parameters
----------
translate_units : str, optional
Specify which potentially unsafe translations of
non-standard unit strings to perform. By default,
performs none.
Although ``"S"`` is commonly used to represent seconds,
its translation to ``"s"`` is potentially unsafe since the
standard recognizes ``"S"`` formally as Siemens, however
rarely that may be used. The same applies to ``"H"`` for
hours (Henry), and ``"D"`` for days (Debye).
This string controls what to do in such cases, and is
case-insensitive.
- If the string contains ``"s"``, translate ``"S"`` to
``"s"``.
- If the string contains ``"h"``, translate ``"H"`` to
``"h"``.
- If the string contains ``"d"``, translate ``"D"`` to
``"d"``.
Thus ``''`` doesn't do any unsafe translations, whereas
``'shd'`` does all of them.
naxis : int array, optional
Image axis lengths. If this array is set to zero or
``None``, then `~astropy.wcs.Wcsprm.cylfix` will not be
invoked.
"""
if self.wcs is not None:
self._fix_scamp()
fixes = self.wcs.fix(translate_units, naxis)
for key, val in fixes.items():
if val != "No change":
if (
key == "datfix"
and "1858-11-17" in val
and not np.count_nonzero(self.wcs.mjdref)
):
continue
warnings.warn(
f"'{key}' made the change '{val}'.",
FITSFixedWarning,
)
def calc_footprint(self, header=None, undistort=True, axes=None, center=True):
"""
Calculates the footprint of the image on the sky.
A footprint is defined as the positions of the corners of the
image on the sky after all available distortions have been
applied.
Parameters
----------
header : `~astropy.io.fits.Header` object, optional
Used to get ``NAXIS1`` and ``NAXIS2``
header and axes are mutually exclusive, alternative ways
to provide the same information.
undistort : bool, optional
If `True`, take SIP and distortion lookup table into
account
axes : (int, int), optional
If provided, use the given sequence as the shape of the
image. Otherwise, use the ``NAXIS1`` and ``NAXIS2``
keywords from the header that was used to create this
`WCS` object.
center : bool, optional
If `True` use the center of the pixel, otherwise use the corner.
Returns
-------
coord : (4, 2) array of (*x*, *y*) coordinates.
The order is clockwise starting with the bottom left corner.
"""
if axes is not None:
naxis1, naxis2 = axes
else:
if header is None:
try:
# classes that inherit from WCS and define naxis1/2
# do not require a header parameter
naxis1, naxis2 = self.pixel_shape
except (AttributeError, TypeError):
warnings.warn(
"Need a valid header in order to calculate footprint\n",
AstropyUserWarning,
)
return None
else:
naxis1 = header.get("NAXIS1", None)
naxis2 = header.get("NAXIS2", None)
if naxis1 is None or naxis2 is None:
raise ValueError("Image size could not be determined.")
if center:
corners = np.array(
[[1, 1], [1, naxis2], [naxis1, naxis2], [naxis1, 1]], dtype=np.float64
)
else:
corners = np.array(
[
[0.5, 0.5],
[0.5, naxis2 + 0.5],
[naxis1 + 0.5, naxis2 + 0.5],
[naxis1 + 0.5, 0.5],
],
dtype=np.float64,
)
if undistort:
return self.all_pix2world(corners, 1)
else:
return self.wcs_pix2world(corners, 1)
def _read_det2im_kw(self, header, fobj, err=0.0):
"""
Create a `distortion paper`_ type lookup table for detector to
image plane correction.
"""
if fobj is None:
return (None, None)
if not isinstance(fobj, fits.HDUList):
return (None, None)
try:
axiscorr = header["AXISCORR"]
d2imdis = self._read_d2im_old_format(header, fobj, axiscorr)
return d2imdis
except KeyError:
pass
dist = "D2IMDIS"
d_kw = "D2IM"
err_kw = "D2IMERR"
tables = {}
for i in range(1, self.naxis + 1):
d_error = header.get(err_kw + str(i), 0.0)
if d_error < err:
tables[i] = None
continue
distortion = dist + str(i)
if distortion in header:
dis = header[distortion].lower()
if dis == "lookup":
del header[distortion]
assert isinstance(fobj, fits.HDUList), (
"An astropy.io.fits.HDUList"
"is required for Lookup table distortion."
)
dp = (d_kw + str(i)).strip()
dp_extver_key = dp + ".EXTVER"
if dp_extver_key in header:
d_extver = header[dp_extver_key]
del header[dp_extver_key]
else:
d_extver = 1
dp_axis_key = dp + f".AXIS.{i:d}"
if i == header[dp_axis_key]:
d_data = fobj["D2IMARR", d_extver].data
else:
d_data = (fobj["D2IMARR", d_extver].data).transpose()
del header[dp_axis_key]
d_header = fobj["D2IMARR", d_extver].header
d_crpix = (d_header.get("CRPIX1", 0.0), d_header.get("CRPIX2", 0.0))
d_crval = (d_header.get("CRVAL1", 0.0), d_header.get("CRVAL2", 0.0))
d_cdelt = (d_header.get("CDELT1", 1.0), d_header.get("CDELT2", 1.0))
d_lookup = DistortionLookupTable(d_data, d_crpix, d_crval, d_cdelt)
tables[i] = d_lookup
else:
warnings.warn(
"Polynomial distortion is not implemented.\n",
AstropyUserWarning,
)
for key in set(header):
if key.startswith(dp + "."):
del header[key]
else:
tables[i] = None
if not tables:
return (None, None)
else:
return (tables.get(1), tables.get(2))
def _read_d2im_old_format(self, header, fobj, axiscorr):
warnings.warn(
"The use of ``AXISCORR`` for D2IM correction has been"
" deprecated.`~astropy.wcs` will read in files with ``AXISCORR`` but"
" ``to_fits()`` will write out files without it.",
AstropyDeprecationWarning,
)
cpdis = [None, None]
crpix = [0.0, 0.0]
crval = [0.0, 0.0]
cdelt = [1.0, 1.0]
try:
d2im_data = fobj[("D2IMARR", 1)].data
except KeyError:
return (None, None)
except AttributeError:
return (None, None)
d2im_data = np.array([d2im_data])
d2im_hdr = fobj[("D2IMARR", 1)].header
naxis = d2im_hdr["NAXIS"]
for i in range(1, naxis + 1):
crpix[i - 1] = d2im_hdr.get("CRPIX" + str(i), 0.0)
crval[i - 1] = d2im_hdr.get("CRVAL" + str(i), 0.0)
cdelt[i - 1] = d2im_hdr.get("CDELT" + str(i), 1.0)
cpdis = DistortionLookupTable(d2im_data, crpix, crval, cdelt)
if axiscorr == 1:
return (cpdis, None)
elif axiscorr == 2:
return (None, cpdis)
else:
warnings.warn("Expected AXISCORR to be 1 or 2", AstropyUserWarning)
return (None, None)
def _write_det2im(self, hdulist):
"""
Writes a `distortion paper`_ type lookup table to the given
`~astropy.io.fits.HDUList`.
"""
if self.det2im1 is None and self.det2im2 is None:
return
dist = "D2IMDIS"
d_kw = "D2IM"
def write_d2i(num, det2im):
if det2im is None:
return
hdulist[0].header[f"{dist}{num:d}"] = (
"LOOKUP",
"Detector to image correction type",
)
hdulist[0].header[f"{d_kw}{num:d}.EXTVER"] = (
num,
"Version number of WCSDVARR extension",
)
hdulist[0].header[f"{d_kw}{num:d}.NAXES"] = (
len(det2im.data.shape),
"Number of independent variables in D2IM function",
)
for i in range(det2im.data.ndim):
jth = {1: "1st", 2: "2nd", 3: "3rd"}.get(i + 1, f"{i + 1}th")
hdulist[0].header[f"{d_kw}{num:d}.AXIS.{i + 1:d}"] = (
i + 1,
f"Axis number of the {jth} variable in a D2IM function",
)
image = fits.ImageHDU(det2im.data, name="D2IMARR")
header = image.header
header["CRPIX1"] = (det2im.crpix[0], "Coordinate system reference pixel")
header["CRPIX2"] = (det2im.crpix[1], "Coordinate system reference pixel")
header["CRVAL1"] = (
det2im.crval[0],
"Coordinate system value at reference pixel",
)
header["CRVAL2"] = (
det2im.crval[1],
"Coordinate system value at reference pixel",
)
header["CDELT1"] = (det2im.cdelt[0], "Coordinate increment along axis")
header["CDELT2"] = (det2im.cdelt[1], "Coordinate increment along axis")
image.ver = int(hdulist[0].header[f"{d_kw}{num:d}.EXTVER"])
hdulist.append(image)
write_d2i(1, self.det2im1)
write_d2i(2, self.det2im2)
def _read_distortion_kw(self, header, fobj, dist="CPDIS", err=0.0):
"""
Reads `distortion paper`_ table-lookup keywords and data, and
returns a 2-tuple of `~astropy.wcs.DistortionLookupTable`
objects.
If no `distortion paper`_ keywords are found, ``(None, None)``
is returned.
"""
if isinstance(header, (str, bytes)):
return (None, None)
if dist == "CPDIS":
d_kw = "DP"
err_kw = "CPERR"
else:
d_kw = "DQ"
err_kw = "CQERR"
tables = {}
for i in range(1, self.naxis + 1):
d_error_key = err_kw + str(i)
if d_error_key in header:
d_error = header[d_error_key]
del header[d_error_key]
else:
d_error = 0.0
if d_error < err:
tables[i] = None
continue
distortion = dist + str(i)
if distortion in header:
dis = header[distortion].lower()
del header[distortion]
if dis == "lookup":
if not isinstance(fobj, fits.HDUList):
raise ValueError(
"an astropy.io.fits.HDUList is "
"required for Lookup table distortion."
)
dp = (d_kw + str(i)).strip()
dp_extver_key = dp + ".EXTVER"
if dp_extver_key in header:
d_extver = header[dp_extver_key]
del header[dp_extver_key]
else:
d_extver = 1
dp_axis_key = dp + f".AXIS.{i:d}"
if i == header[dp_axis_key]:
d_data = fobj["WCSDVARR", d_extver].data
else:
d_data = (fobj["WCSDVARR", d_extver].data).transpose()
del header[dp_axis_key]
d_header = fobj["WCSDVARR", d_extver].header
d_crpix = (d_header.get("CRPIX1", 0.0), d_header.get("CRPIX2", 0.0))
d_crval = (d_header.get("CRVAL1", 0.0), d_header.get("CRVAL2", 0.0))
d_cdelt = (d_header.get("CDELT1", 1.0), d_header.get("CDELT2", 1.0))
d_lookup = DistortionLookupTable(d_data, d_crpix, d_crval, d_cdelt)
tables[i] = d_lookup
for key in set(header):
if key.startswith(dp + "."):
del header[key]
else:
warnings.warn(
"Polynomial distortion is not implemented.\n",
AstropyUserWarning,
)
else:
tables[i] = None
if not tables:
return (None, None)
else:
return (tables.get(1), tables.get(2))
def _write_distortion_kw(self, hdulist, dist="CPDIS"):
"""
Write out `distortion paper`_ keywords to the given
`~astropy.io.fits.HDUList`.
"""
if self.cpdis1 is None and self.cpdis2 is None:
return
if dist == "CPDIS":
d_kw = "DP"
else:
d_kw = "DQ"
def write_dist(num, cpdis):
if cpdis is None:
return
hdulist[0].header[f"{dist}{num:d}"] = (
"LOOKUP",
"Prior distortion function type",
)
hdulist[0].header[f"{d_kw}{num:d}.EXTVER"] = (
num,
"Version number of WCSDVARR extension",
)
hdulist[0].header[f"{d_kw}{num:d}.NAXES"] = (
len(cpdis.data.shape),
f"Number of independent variables in {dist} function",
)
for i in range(cpdis.data.ndim):
jth = {1: "1st", 2: "2nd", 3: "3rd"}.get(i + 1, f"{i + 1}th")
hdulist[0].header[f"{d_kw}{num:d}.AXIS.{i + 1:d}"] = (
i + 1,
f"Axis number of the {jth} variable in a {dist} function",
)
image = fits.ImageHDU(cpdis.data, name="WCSDVARR")
header = image.header
header["CRPIX1"] = (cpdis.crpix[0], "Coordinate system reference pixel")
header["CRPIX2"] = (cpdis.crpix[1], "Coordinate system reference pixel")
header["CRVAL1"] = (
cpdis.crval[0],
"Coordinate system value at reference pixel",
)
header["CRVAL2"] = (
cpdis.crval[1],
"Coordinate system value at reference pixel",
)
header["CDELT1"] = (cpdis.cdelt[0], "Coordinate increment along axis")
header["CDELT2"] = (cpdis.cdelt[1], "Coordinate increment along axis")
image.ver = int(hdulist[0].header[f"{d_kw}{num:d}.EXTVER"])
hdulist.append(image)
write_dist(1, self.cpdis1)
write_dist(2, self.cpdis2)
def _fix_pre2012_scamp_tpv(self, header, wcskey=""):
"""
Replace -TAN with TPV (for pre-2012 SCAMP headers that use -TAN
in CTYPE). Ignore SIP if present. This follows recommendations in
Section 7 in
http://web.ipac.caltech.edu/staff/shupe/reprints/SIP_to_PV_SPIE2012.pdf.
This is to deal with pre-2012 headers that may contain TPV with a
CTYPE that ends in '-TAN' (post-2012 they should end in '-TPV' when
SCAMP has adopted the new TPV convention).
"""
if isinstance(header, (str, bytes)):
return
wcskey = wcskey.strip().upper()
cntype = [
(nax, header.get(f"CTYPE{nax}{wcskey}", "").strip())
for nax in range(1, self.naxis + 1)
]
tan_axes = [ct[0] for ct in cntype if ct[1].endswith("-TAN")]
if len(tan_axes) == 2:
# check if PVi_j with j >= 5 is present and if so, do not load SIP
tan_to_tpv = False
for nax in tan_axes:
js = []
for p in header[f"PV{nax}_*{wcskey}"].keys():
prefix = f"PV{nax}_"
if p.startswith(prefix):
p = p[len(prefix) :]
p = p.rstrip(wcskey)
try:
p = int(p)
except ValueError:
continue
js.append(p)
if js and max(js) >= 5:
tan_to_tpv = True
break
if tan_to_tpv:
warnings.warn(
"Removed redundant SIP distortion parameters "
+ "because SCAMP' PV distortions are also present",
FITSFixedWarning,
)
self._remove_sip_kw(header, del_order=True)
for i in tan_axes:
kwd = f"CTYPE{i:d}{wcskey}"
if kwd in header:
header[kwd] = (
header[kwd].strip().upper().replace("-TAN", "-TPV")
)
@staticmethod
def _remove_sip_kw(header, del_order=False):
"""
Remove SIP information from a header.
"""
# Never pass SIP coefficients to wcslib
# CTYPE must be passed with -SIP to wcslib
for key in {
m.group() for m in map(SIP_KW.match, list(header)) if m is not None
}:
del header[key]
if del_order:
for kwd in ["A_ORDER", "B_ORDER", "AP_ORDER", "BP_ORDER"]:
if kwd in header:
del header[kwd]
def _read_sip_kw(self, header, wcskey=""):
"""
Reads `SIP`_ header keywords and returns a `~astropy.wcs.Sip`
object.
If no `SIP`_ header keywords are found, ``None`` is returned.
"""
if isinstance(header, (str, bytes)):
# TODO: Parse SIP from a string without pyfits around
return None
if "A_ORDER" in header and header["A_ORDER"] > 1:
if "B_ORDER" not in header:
raise ValueError(
"A_ORDER provided without corresponding B_ORDER "
"keyword for SIP distortion"
)
m = int(header["A_ORDER"])
a = np.zeros((m + 1, m + 1), np.double)
for i in range(m + 1):
for j in range(m - i + 1):
key = f"A_{i}_{j}"
if key in header:
a[i, j] = header[key]
del header[key]
m = int(header["B_ORDER"])
if m > 1:
b = np.zeros((m + 1, m + 1), np.double)
for i in range(m + 1):
for j in range(m - i + 1):
key = f"B_{i}_{j}"
if key in header:
b[i, j] = header[key]
del header[key]
else:
a = None
b = None
del header["A_ORDER"]
del header["B_ORDER"]
ctype = [header[f"CTYPE{nax}{wcskey}"] for nax in range(1, self.naxis + 1)]
if any(not ctyp.endswith("-SIP") for ctyp in ctype):
message = """
Inconsistent SIP distortion information is present in the FITS header and the WCS object:
SIP coefficients were detected, but CTYPE is missing a "-SIP" suffix.
astropy.wcs is using the SIP distortion coefficients,
therefore the coordinates calculated here might be incorrect.
If you do not want to apply the SIP distortion coefficients,
please remove the SIP coefficients from the FITS header or the
WCS object. As an example, if the image is already distortion-corrected
(e.g., drizzled) then distortion components should not apply and the SIP
coefficients should be removed.
While the SIP distortion coefficients are being applied here, if that was indeed the intent,
for consistency please append "-SIP" to the CTYPE in the FITS header or the WCS object.
"""
log.info(message)
elif "B_ORDER" in header and header["B_ORDER"] > 1:
raise ValueError(
"B_ORDER provided without corresponding A_ORDER "
+ "keyword for SIP distortion"
)
else:
a = None
b = None
if "AP_ORDER" in header and header["AP_ORDER"] > 1:
if "BP_ORDER" not in header:
raise ValueError(
"AP_ORDER provided without corresponding BP_ORDER "
"keyword for SIP distortion"
)
m = int(header["AP_ORDER"])
ap = np.zeros((m + 1, m + 1), np.double)
for i in range(m + 1):
for j in range(m - i + 1):
key = f"AP_{i}_{j}"
if key in header:
ap[i, j] = header[key]
del header[key]
m = int(header["BP_ORDER"])
if m > 1:
bp = np.zeros((m + 1, m + 1), np.double)
for i in range(m + 1):
for j in range(m - i + 1):
key = f"BP_{i}_{j}"
if key in header:
bp[i, j] = header[key]
del header[key]
else:
ap = None
bp = None
del header["AP_ORDER"]
del header["BP_ORDER"]
elif "BP_ORDER" in header and header["BP_ORDER"] > 1:
raise ValueError(
"BP_ORDER provided without corresponding AP_ORDER "
"keyword for SIP distortion"
)
else:
ap = None
bp = None
if a is None and b is None and ap is None and bp is None:
return None
if f"CRPIX1{wcskey}" not in header or f"CRPIX2{wcskey}" not in header:
raise ValueError("Header has SIP keywords without CRPIX keywords")
crpix1 = header.get(f"CRPIX1{wcskey}")
crpix2 = header.get(f"CRPIX2{wcskey}")
return Sip(a, b, ap, bp, (crpix1, crpix2))
def _write_sip_kw(self):
"""
Write out SIP keywords. Returns a dictionary of key-value
pairs.
"""
if self.sip is None:
return {}
keywords = {}
def write_array(name, a):
if a is None:
return
size = a.shape[0]
trdir = "sky to detector" if name[-1] == "P" else "detector to sky"
comment = (
f'SIP polynomial order, axis {ord(name[0]) - ord("A"):d}, {trdir:s}'
)
keywords[f"{name}_ORDER"] = size - 1, comment
comment = "SIP distortion coefficient"
for i in range(size):
for j in range(size - i):
if a[i, j] != 0.0:
keywords[f"{name}_{i:d}_{j:d}"] = a[i, j], comment
write_array("A", self.sip.a)
write_array("B", self.sip.b)
write_array("AP", self.sip.ap)
write_array("BP", self.sip.bp)
return keywords
def _denormalize_sky(self, sky):
if self.wcs.lngtyp != "RA":
raise ValueError(
"WCS does not have longitude type of 'RA', therefore "
"(ra, dec) data can not be used as input"
)
if self.wcs.lattyp != "DEC":
raise ValueError(
"WCS does not have longitude type of 'DEC', therefore "
"(ra, dec) data can not be used as input"
)
if self.wcs.naxis == 2:
if self.wcs.lng == 0 and self.wcs.lat == 1:
return sky
elif self.wcs.lng == 1 and self.wcs.lat == 0:
# Reverse the order of the columns
return sky[:, ::-1]
else:
raise ValueError(
"WCS does not have longitude and latitude celestial "
"axes, therefore (ra, dec) data can not be used as input"
)
else:
if self.wcs.lng < 0 or self.wcs.lat < 0:
raise ValueError(
"WCS does not have both longitude and latitude "
"celestial axes, therefore (ra, dec) data can not be "
"used as input"
)
out = np.zeros((sky.shape[0], self.wcs.naxis))
out[:, self.wcs.lng] = sky[:, 0]
out[:, self.wcs.lat] = sky[:, 1]
return out
def _normalize_sky(self, sky):
if self.wcs.lngtyp != "RA":
raise ValueError(
"WCS does not have longitude type of 'RA', therefore "
"(ra, dec) data can not be returned"
)
if self.wcs.lattyp != "DEC":
raise ValueError(
"WCS does not have longitude type of 'DEC', therefore "
"(ra, dec) data can not be returned"
)
if self.wcs.naxis == 2:
if self.wcs.lng == 0 and self.wcs.lat == 1:
return sky
elif self.wcs.lng == 1 and self.wcs.lat == 0:
# Reverse the order of the columns
return sky[:, ::-1]
else:
raise ValueError(
"WCS does not have longitude and latitude celestial "
"axes, therefore (ra, dec) data can not be returned"
)
else:
if self.wcs.lng < 0 or self.wcs.lat < 0:
raise ValueError(
"WCS does not have both longitude and latitude celestial "
"axes, therefore (ra, dec) data can not be returned"
)
out = np.empty((sky.shape[0], 2))
out[:, 0] = sky[:, self.wcs.lng]
out[:, 1] = sky[:, self.wcs.lat]
return out
def _array_converter(self, func, sky, *args, ra_dec_order=False):
"""
A helper function to support reading either a pair of arrays
or a single Nx2 array.
"""
def _return_list_of_arrays(axes, origin):
if any(x.size == 0 for x in axes):
return axes
try:
axes = np.broadcast_arrays(*axes)
except ValueError:
raise ValueError(
"Coordinate arrays are not broadcastable to each other"
)
xy = np.hstack([x.reshape((x.size, 1)) for x in axes])
if ra_dec_order and sky == "input":
xy = self._denormalize_sky(xy)
output = func(xy, origin)
if ra_dec_order and sky == "output":
output = self._normalize_sky(output)
return (
output[:, 0].reshape(axes[0].shape),
output[:, 1].reshape(axes[0].shape),
)
return [output[:, i].reshape(axes[0].shape) for i in range(output.shape[1])]
def _return_single_array(xy, origin):
if xy.shape[-1] != self.naxis:
raise ValueError(
"When providing two arguments, the array must be "
f"of shape (N, {self.naxis})"
)
if 0 in xy.shape:
return xy
if ra_dec_order and sky == "input":
xy = self._denormalize_sky(xy)
result = func(xy, origin)
if ra_dec_order and sky == "output":
result = self._normalize_sky(result)
return result
if len(args) == 2:
try:
xy, origin = args
xy = np.asarray(xy)
origin = int(origin)
except Exception:
raise TypeError(
"When providing two arguments, they must be "
f"(coords[N][{self.naxis}], origin)"
)
if xy.shape == () or len(xy.shape) == 1:
return _return_list_of_arrays([xy], origin)
return _return_single_array(xy, origin)
elif len(args) == self.naxis + 1:
axes = args[:-1]
origin = args[-1]
try:
axes = [np.asarray(x) for x in axes]
origin = int(origin)
except Exception:
raise TypeError(
"When providing more than two arguments, they must be "
+ "a 1-D array for each axis, followed by an origin."
)
return _return_list_of_arrays(axes, origin)
raise TypeError(
f"WCS projection has {self.naxis} dimensions, so expected 2 (an Nx{self.naxis} array "
f"and the origin argument) or {self.naxis + 1} arguments (the position in each "
f"dimension, and the origin argument). Instead, {len(args)} arguments were "
"given."
)
def all_pix2world(self, *args, **kwargs):
return self._array_converter(self._all_pix2world, "output", *args, **kwargs)
all_pix2world.__doc__ = f"""
Transforms pixel coordinates to world coordinates.
Performs all of the following in series:
- Detector to image plane correction (if present in the
FITS file)
- `SIP`_ distortion correction (if present in the FITS
file)
- `distortion paper`_ table-lookup correction (if present
in the FITS file)
- `wcslib`_ "core" WCS transformation
Parameters
----------
{docstrings.TWO_OR_MORE_ARGS('naxis', 8)}
For a transformation that is not two-dimensional, the
two-argument form must be used.
{docstrings.RA_DEC_ORDER(8)}
Returns
-------
{docstrings.RETURNS('sky coordinates, in degrees', 8)}
Notes
-----
The order of the axes for the result is determined by the
``CTYPEia`` keywords in the FITS header, therefore it may not
always be of the form (*ra*, *dec*). The
`~astropy.wcs.Wcsprm.lat`, `~astropy.wcs.Wcsprm.lng`,
`~astropy.wcs.Wcsprm.lattyp` and `~astropy.wcs.Wcsprm.lngtyp`
members can be used to determine the order of the axes.
Raises
------
MemoryError
Memory allocation failed.
SingularMatrixError
Linear transformation matrix is singular.
InconsistentAxisTypesError
Inconsistent or unrecognized coordinate axis types.
ValueError
Invalid parameter value.
ValueError
Invalid coordinate transformation parameters.
ValueError
x- and y-coordinate arrays are not the same size.
InvalidTransformError
Invalid coordinate transformation parameters.
InvalidTransformError
Ill-conditioned coordinate transformation parameters.
"""
def wcs_pix2world(self, *args, **kwargs):
if self.wcs is None:
raise ValueError("No basic WCS settings were created.")
return self._array_converter(
lambda xy, o: self.wcs.p2s(xy, o)["world"], "output", *args, **kwargs
)
wcs_pix2world.__doc__ = f"""
Transforms pixel coordinates to world coordinates by doing
only the basic `wcslib`_ transformation.
No `SIP`_ or `distortion paper`_ table lookup correction is
applied. To perform distortion correction, see
`~astropy.wcs.WCS.all_pix2world`,
`~astropy.wcs.WCS.sip_pix2foc`, `~astropy.wcs.WCS.p4_pix2foc`,
or `~astropy.wcs.WCS.pix2foc`.
Parameters
----------
{docstrings.TWO_OR_MORE_ARGS('naxis', 8)}
For a transformation that is not two-dimensional, the
two-argument form must be used.
{docstrings.RA_DEC_ORDER(8)}
Returns
-------
{docstrings.RETURNS('world coordinates, in degrees', 8)}
Raises
------
MemoryError
Memory allocation failed.
SingularMatrixError
Linear transformation matrix is singular.
InconsistentAxisTypesError
Inconsistent or unrecognized coordinate axis types.
ValueError
Invalid parameter value.
ValueError
Invalid coordinate transformation parameters.
ValueError
x- and y-coordinate arrays are not the same size.
InvalidTransformError
Invalid coordinate transformation parameters.
InvalidTransformError
Ill-conditioned coordinate transformation parameters.
Notes
-----
The order of the axes for the result is determined by the
``CTYPEia`` keywords in the FITS header, therefore it may not
always be of the form (*ra*, *dec*). The
`~astropy.wcs.Wcsprm.lat`, `~astropy.wcs.Wcsprm.lng`,
`~astropy.wcs.Wcsprm.lattyp` and `~astropy.wcs.Wcsprm.lngtyp`
members can be used to determine the order of the axes.
"""
def _all_world2pix(
self, world, origin, tolerance, maxiter, adaptive, detect_divergence, quiet
):
# ############################################################
# # DESCRIPTION OF THE NUMERICAL METHOD ##
# ############################################################
# In this section I will outline the method of solving
# the inverse problem of converting world coordinates to
# pixel coordinates (*inverse* of the direct transformation
# `all_pix2world`) and I will summarize some of the aspects
# of the method proposed here and some of the issues of the
# original `all_world2pix` (in relation to this method)
# discussed in https://github.com/astropy/astropy/issues/1977
# A more detailed discussion can be found here:
# https://github.com/astropy/astropy/pull/2373
#
#
# ### Background ###
#
#
# I will refer here to the [SIP Paper]
# (http://fits.gsfc.nasa.gov/registry/sip/SIP_distortion_v1_0.pdf).
# According to this paper, the effect of distortions as
# described in *their* equation (1) is:
#
# (1) x = CD*(u+f(u)),
#
# where `x` is a *vector* of "intermediate spherical
# coordinates" (equivalent to (x,y) in the paper) and `u`
# is a *vector* of "pixel coordinates", and `f` is a vector
# function describing geometrical distortions
# (see equations 2 and 3 in SIP Paper.
# However, I prefer to use `w` for "intermediate world
# coordinates", `x` for pixel coordinates, and assume that
# transformation `W` performs the **linear**
# (CD matrix + projection onto celestial sphere) part of the
# conversion from pixel coordinates to world coordinates.
# Then we can re-write (1) as:
#
# (2) w = W*(x+f(x)) = T(x)
#
# In `astropy.wcs.WCS` transformation `W` is represented by
# the `wcs_pix2world` member, while the combined ("total")
# transformation (linear part + distortions) is performed by
# `all_pix2world`. Below I summarize the notations and their
# equivalents in `astropy.wcs.WCS`:
#
# | Equation term | astropy.WCS/meaning |
# | ------------- | ---------------------------- |
# | `x` | pixel coordinates |
# | `w` | world coordinates |
# | `W` | `wcs_pix2world()` |
# | `W^{-1}` | `wcs_world2pix()` |
# | `T` | `all_pix2world()` |
# | `x+f(x)` | `pix2foc()` |
#
#
# ### Direct Solving of Equation (2) ###
#
#
# In order to find the pixel coordinates that correspond to
# given world coordinates `w`, it is necessary to invert
# equation (2): `x=T^{-1}(w)`, or solve equation `w==T(x)`
# for `x`. However, this approach has the following
# disadvantages:
# 1. It requires unnecessary transformations (see next
# section).
# 2. It is prone to "RA wrapping" issues as described in
# https://github.com/astropy/astropy/issues/1977
# (essentially because `all_pix2world` may return points with
# a different phase than user's input `w`).
#
#
# ### Description of the Method Used here ###
#
#
# By applying inverse linear WCS transformation (`W^{-1}`)
# to both sides of equation (2) and introducing notation `x'`
# (prime) for the pixels coordinates obtained from the world
# coordinates by applying inverse *linear* WCS transformation
# ("focal plane coordinates"):
#
# (3) x' = W^{-1}(w)
#
# we obtain the following equation:
#
# (4) x' = x+f(x),
#
# or,
#
# (5) x = x'-f(x)
#
# This equation is well suited for solving using the method
# of fixed-point iterations
# (http://en.wikipedia.org/wiki/Fixed-point_iteration):
#
# (6) x_{i+1} = x'-f(x_i)
#
# As an initial value of the pixel coordinate `x_0` we take
# "focal plane coordinate" `x'=W^{-1}(w)=wcs_world2pix(w)`.
# We stop iterations when `|x_{i+1}-x_i|<tolerance`. We also
# consider the process to be diverging if
# `|x_{i+1}-x_i|>|x_i-x_{i-1}|`
# **when** `|x_{i+1}-x_i|>=tolerance` (when current
# approximation is close to the true solution,
# `|x_{i+1}-x_i|>|x_i-x_{i-1}|` may be due to rounding errors
# and we ignore such "divergences" when
# `|x_{i+1}-x_i|<tolerance`). It may appear that checking for
# `|x_{i+1}-x_i|<tolerance` in order to ignore divergence is
# unnecessary since the iterative process should stop anyway,
# however, the proposed implementation of this iterative
# process is completely vectorized and, therefore, we may
# continue iterating over *some* points even though they have
# converged to within a specified tolerance (while iterating
# over other points that have not yet converged to
# a solution).
#
# In order to efficiently implement iterative process (6)
# using available methods in `astropy.wcs.WCS`, we add and
# subtract `x_i` from the right side of equation (6):
#
# (7) x_{i+1} = x'-(x_i+f(x_i))+x_i = x'-pix2foc(x_i)+x_i,
#
# where `x'=wcs_world2pix(w)` and it is computed only *once*
# before the beginning of the iterative process (and we also
# set `x_0=x'`). By using `pix2foc` at each iteration instead
# of `all_pix2world` we get about 25% increase in performance
# (by not performing the linear `W` transformation at each
# step) and we also avoid the "RA wrapping" issue described
# above (by working in focal plane coordinates and avoiding
# pix->world transformations).
#
# As an added benefit, the process converges to the correct
# solution in just one iteration when distortions are not
# present (compare to
# https://github.com/astropy/astropy/issues/1977 and
# https://github.com/astropy/astropy/pull/2294): in this case
# `pix2foc` is the identical transformation
# `x_i=pix2foc(x_i)` and from equation (7) we get:
#
# x' = x_0 = wcs_world2pix(w)
# x_1 = x' - pix2foc(x_0) + x_0 = x' - pix2foc(x') + x' = x'
# = wcs_world2pix(w) = x_0
# =>
# |x_1-x_0| = 0 < tolerance (with tolerance > 0)
#
# However, for performance reasons, it is still better to
# avoid iterations altogether and return the exact linear
# solution (`wcs_world2pix`) right-away when non-linear
# distortions are not present by checking that attributes
# `sip`, `cpdis1`, `cpdis2`, `det2im1`, and `det2im2` are
# *all* `None`.
#
#
# ### Outline of the Algorithm ###
#
#
# While the proposed code is relatively long (considering
# the simplicity of the algorithm), this is due to: 1)
# checking if iterative solution is necessary at all; 2)
# checking for divergence; 3) re-implementation of the
# completely vectorized algorithm as an "adaptive" vectorized
# algorithm (for cases when some points diverge for which we
# want to stop iterations). In my tests, the adaptive version
# of the algorithm is about 50% slower than non-adaptive
# version for all HST images.
#
# The essential part of the vectorized non-adaptive algorithm
# (without divergence and other checks) can be described
# as follows:
#
# pix0 = self.wcs_world2pix(world, origin)
# pix = pix0.copy() # 0-order solution
#
# for k in range(maxiter):
# # find correction to the previous solution:
# dpix = self.pix2foc(pix, origin) - pix0
#
# # compute norm (L2) of the correction:
# dn = np.linalg.norm(dpix, axis=1)
#
# # apply correction:
# pix -= dpix
#
# # check convergence:
# if np.max(dn) < tolerance:
# break
#
# return pix
#
# Here, the input parameter `world` can be a `MxN` array
# where `M` is the number of coordinate axes in WCS and `N`
# is the number of points to be converted simultaneously to
# image coordinates.
#
#
# ### IMPORTANT NOTE: ###
#
# If, in the future releases of the `~astropy.wcs`,
# `pix2foc` will not apply all the required distortion
# corrections then in the code below, calls to `pix2foc` will
# have to be replaced with
# wcs_world2pix(all_pix2world(pix_list, origin), origin)
#
# ############################################################
# # INITIALIZE ITERATIVE PROCESS: ##
# ############################################################
# initial approximation (linear WCS based only)
pix0 = self.wcs_world2pix(world, origin)
# Check that an iterative solution is required at all
# (when any of the non-CD-matrix-based corrections are
# present). If not required return the initial
# approximation (pix0).
if not self.has_distortion:
# No non-WCS corrections detected so
# simply return initial approximation:
return pix0
pix = pix0.copy() # 0-order solution
# initial correction:
dpix = self.pix2foc(pix, origin) - pix0
# Update initial solution:
pix -= dpix
# Norm (L2) squared of the correction:
dn = np.sum(dpix * dpix, axis=1)
dnprev = dn.copy() # if adaptive else dn
tol2 = tolerance**2
# Prepare for iterative process
k = 1
ind = None
inddiv = None
# Turn off numpy runtime warnings for 'invalid' and 'over':
old_invalid = np.geterr()["invalid"]
old_over = np.geterr()["over"]
np.seterr(invalid="ignore", over="ignore")
# ############################################################
# # NON-ADAPTIVE ITERATIONS: ##
# ############################################################
if not adaptive:
# Fixed-point iterations:
while np.nanmax(dn) >= tol2 and k < maxiter:
# Find correction to the previous solution:
dpix = self.pix2foc(pix, origin) - pix0
# Compute norm (L2) squared of the correction:
dn = np.sum(dpix * dpix, axis=1)
# Check for divergence (we do this in two stages
# to optimize performance for the most common
# scenario when successive approximations converge):
if detect_divergence:
divergent = dn >= dnprev
if np.any(divergent):
# Find solutions that have not yet converged:
slowconv = dn >= tol2
(inddiv,) = np.where(divergent & slowconv)
if inddiv.shape[0] > 0:
# Update indices of elements that
# still need correction:
conv = dn < dnprev
iconv = np.where(conv)
# Apply correction:
dpixgood = dpix[iconv]
pix[iconv] -= dpixgood
dpix[iconv] = dpixgood
# For the next iteration choose
# non-divergent points that have not yet
# converged to the requested accuracy:
(ind,) = np.where(slowconv & conv)
pix0 = pix0[ind]
dnprev[ind] = dn[ind]
k += 1
# Switch to adaptive iterations:
adaptive = True
break
# Save current correction magnitudes for later:
dnprev = dn
# Apply correction:
pix -= dpix
k += 1
# ############################################################
# # ADAPTIVE ITERATIONS: ##
# ############################################################
if adaptive:
if ind is None:
(ind,) = np.where(np.isfinite(pix).all(axis=1))
pix0 = pix0[ind]
# "Adaptive" fixed-point iterations:
while ind.shape[0] > 0 and k < maxiter:
# Find correction to the previous solution:
dpixnew = self.pix2foc(pix[ind], origin) - pix0
# Compute norm (L2) of the correction:
dnnew = np.sum(np.square(dpixnew), axis=1)
# Bookkeeping of corrections:
dnprev[ind] = dn[ind].copy()
dn[ind] = dnnew
if detect_divergence:
# Find indices of pixels that are converging:
conv = dnnew < dnprev[ind]
iconv = np.where(conv)
iiconv = ind[iconv]
# Apply correction:
dpixgood = dpixnew[iconv]
pix[iiconv] -= dpixgood
dpix[iiconv] = dpixgood
# Find indices of solutions that have not yet
# converged to the requested accuracy
# AND that do not diverge:
(subind,) = np.where((dnnew >= tol2) & conv)
else:
# Apply correction:
pix[ind] -= dpixnew
dpix[ind] = dpixnew
# Find indices of solutions that have not yet
# converged to the requested accuracy:
(subind,) = np.where(dnnew >= tol2)
# Choose solutions that need more iterations:
ind = ind[subind]
pix0 = pix0[subind]
k += 1
# ############################################################
# # FINAL DETECTION OF INVALID, DIVERGING, ##
# # AND FAILED-TO-CONVERGE POINTS ##
# ############################################################
# Identify diverging and/or invalid points:
invalid = (~np.all(np.isfinite(pix), axis=1)) & (
np.all(np.isfinite(world), axis=1)
)
# When detect_divergence==False, dnprev is outdated
# (it is the norm of the very first correction).
# Still better than nothing...
(inddiv,) = np.where(((dn >= tol2) & (dn >= dnprev)) | invalid)
if inddiv.shape[0] == 0:
inddiv = None
# Identify points that did not converge within 'maxiter'
# iterations:
if k >= maxiter:
(ind,) = np.where((dn >= tol2) & (dn < dnprev) & (~invalid))
if ind.shape[0] == 0:
ind = None
else:
ind = None
# Restore previous numpy error settings:
np.seterr(invalid=old_invalid, over=old_over)
# ############################################################
# # RAISE EXCEPTION IF DIVERGING OR TOO SLOWLY CONVERGING ##
# # DATA POINTS HAVE BEEN DETECTED: ##
# ############################################################
if (ind is not None or inddiv is not None) and not quiet:
if inddiv is None:
raise NoConvergence(
"'WCS.all_world2pix' failed to "
f"converge to the requested accuracy after {k:d} "
"iterations.",
best_solution=pix,
accuracy=np.abs(dpix),
niter=k,
slow_conv=ind,
divergent=None,
)
else:
raise NoConvergence(
"'WCS.all_world2pix' failed to "
"converge to the requested accuracy.\n"
f"After {k:d} iterations, the solution is diverging "
"at least for one input point.",
best_solution=pix,
accuracy=np.abs(dpix),
niter=k,
slow_conv=ind,
divergent=inddiv,
)
return pix
@deprecated_renamed_argument("accuracy", "tolerance", "4.3")
def all_world2pix(
self,
*args,
tolerance=1e-4,
maxiter=20,
adaptive=False,
detect_divergence=True,
quiet=False,
**kwargs,
):
if self.wcs is None:
raise ValueError("No basic WCS settings were created.")
return self._array_converter(
lambda *args, **kwargs: self._all_world2pix(
*args,
tolerance=tolerance,
maxiter=maxiter,
adaptive=adaptive,
detect_divergence=detect_divergence,
quiet=quiet,
),
"input",
*args,
**kwargs,
)
all_world2pix.__doc__ = f"""
all_world2pix(*arg, tolerance=1.0e-4, maxiter=20,
adaptive=False, detect_divergence=True, quiet=False)
Transforms world coordinates to pixel coordinates, using
numerical iteration to invert the full forward transformation
`~astropy.wcs.WCS.all_pix2world` with complete
distortion model.
Parameters
----------
{docstrings.TWO_OR_MORE_ARGS('naxis', 8)}
For a transformation that is not two-dimensional, the
two-argument form must be used.
{docstrings.RA_DEC_ORDER(8)}
tolerance : float, optional (default = 1.0e-4)
Tolerance of solution. Iteration terminates when the
iterative solver estimates that the "true solution" is
within this many pixels current estimate, more
specifically, when the correction to the solution found
during the previous iteration is smaller
(in the sense of the L2 norm) than ``tolerance``.
maxiter : int, optional (default = 20)
Maximum number of iterations allowed to reach a solution.
quiet : bool, optional (default = False)
Do not throw :py:class:`NoConvergence` exceptions when
the method does not converge to a solution with the
required accuracy within a specified number of maximum
iterations set by ``maxiter`` parameter. Instead,
simply return the found solution.
Other Parameters
----------------
adaptive : bool, optional (default = False)
Specifies whether to adaptively select only points that
did not converge to a solution within the required
accuracy for the next iteration. Default is recommended
for HST as well as most other instruments.
.. note::
The :py:meth:`all_world2pix` uses a vectorized
implementation of the method of consecutive
approximations (see ``Notes`` section below) in which it
iterates over *all* input points *regardless* until
the required accuracy has been reached for *all* input
points. In some cases it may be possible that
*almost all* points have reached the required accuracy
but there are only a few of input data points for
which additional iterations may be needed (this
depends mostly on the characteristics of the geometric
distortions for a given instrument). In this situation
it may be advantageous to set ``adaptive`` = `True` in
which case :py:meth:`all_world2pix` will continue
iterating *only* over the points that have not yet
converged to the required accuracy. However, for the
HST's ACS/WFC detector, which has the strongest
distortions of all HST instruments, testing has
shown that enabling this option would lead to a about
50-100% penalty in computational time (depending on
specifics of the image, geometric distortions, and
number of input points to be converted). Therefore,
for HST and possibly instruments, it is recommended
to set ``adaptive`` = `False`. The only danger in
getting this setting wrong will be a performance
penalty.
.. note::
When ``detect_divergence`` is `True`,
:py:meth:`all_world2pix` will automatically switch
to the adaptive algorithm once divergence has been
detected.
detect_divergence : bool, optional (default = True)
Specifies whether to perform a more detailed analysis
of the convergence to a solution. Normally
:py:meth:`all_world2pix` may not achieve the required
accuracy if either the ``tolerance`` or ``maxiter`` arguments
are too low. However, it may happen that for some
geometric distortions the conditions of convergence for
the the method of consecutive approximations used by
:py:meth:`all_world2pix` may not be satisfied, in which
case consecutive approximations to the solution will
diverge regardless of the ``tolerance`` or ``maxiter``
settings.
When ``detect_divergence`` is `False`, these divergent
points will be detected as not having achieved the
required accuracy (without further details). In addition,
if ``adaptive`` is `False` then the algorithm will not
know that the solution (for specific points) is diverging
and will continue iterating and trying to "improve"
diverging solutions. This may result in ``NaN`` or
``Inf`` values in the return results (in addition to a
performance penalties). Even when ``detect_divergence``
is `False`, :py:meth:`all_world2pix`, at the end of the
iterative process, will identify invalid results
(``NaN`` or ``Inf``) as "diverging" solutions and will
raise :py:class:`NoConvergence` unless the ``quiet``
parameter is set to `True`.
When ``detect_divergence`` is `True`,
:py:meth:`all_world2pix` will detect points for which
current correction to the coordinates is larger than
the correction applied during the previous iteration
**if** the requested accuracy **has not yet been
achieved**. In this case, if ``adaptive`` is `True`,
these points will be excluded from further iterations and
if ``adaptive`` is `False`, :py:meth:`all_world2pix` will
automatically switch to the adaptive algorithm. Thus, the
reported divergent solution will be the latest converging
solution computed immediately *before* divergence
has been detected.
.. note::
When accuracy has been achieved, small increases in
current corrections may be possible due to rounding
errors (when ``adaptive`` is `False`) and such
increases will be ignored.
.. note::
Based on our testing using HST ACS/WFC images, setting
``detect_divergence`` to `True` will incur about 5-20%
performance penalty with the larger penalty
corresponding to ``adaptive`` set to `True`.
Because the benefits of enabling this
feature outweigh the small performance penalty,
especially when ``adaptive`` = `False`, it is
recommended to set ``detect_divergence`` to `True`,
unless extensive testing of the distortion models for
images from specific instruments show a good stability
of the numerical method for a wide range of
coordinates (even outside the image itself).
.. note::
Indices of the diverging inverse solutions will be
reported in the ``divergent`` attribute of the
raised :py:class:`NoConvergence` exception object.
Returns
-------
{docstrings.RETURNS('pixel coordinates', 8)}
Notes
-----
The order of the axes for the input world array is determined by
the ``CTYPEia`` keywords in the FITS header, therefore it may
not always be of the form (*ra*, *dec*). The
`~astropy.wcs.Wcsprm.lat`, `~astropy.wcs.Wcsprm.lng`,
`~astropy.wcs.Wcsprm.lattyp`, and
`~astropy.wcs.Wcsprm.lngtyp`
members can be used to determine the order of the axes.
Using the method of fixed-point iterations approximations we
iterate starting with the initial approximation, which is
computed using the non-distortion-aware
:py:meth:`wcs_world2pix` (or equivalent).
The :py:meth:`all_world2pix` function uses a vectorized
implementation of the method of consecutive approximations and
therefore it is highly efficient (>30x) when *all* data points
that need to be converted from sky coordinates to image
coordinates are passed at *once*. Therefore, it is advisable,
whenever possible, to pass as input a long array of all points
that need to be converted to :py:meth:`all_world2pix` instead
of calling :py:meth:`all_world2pix` for each data point. Also
see the note to the ``adaptive`` parameter.
Raises
------
NoConvergence
The method did not converge to a
solution to the required accuracy within a specified
number of maximum iterations set by the ``maxiter``
parameter. To turn off this exception, set ``quiet`` to
`True`. Indices of the points for which the requested
accuracy was not achieved (if any) will be listed in the
``slow_conv`` attribute of the
raised :py:class:`NoConvergence` exception object.
See :py:class:`NoConvergence` documentation for
more details.
MemoryError
Memory allocation failed.
SingularMatrixError
Linear transformation matrix is singular.
InconsistentAxisTypesError
Inconsistent or unrecognized coordinate axis types.
ValueError
Invalid parameter value.
ValueError
Invalid coordinate transformation parameters.
ValueError
x- and y-coordinate arrays are not the same size.
InvalidTransformError
Invalid coordinate transformation parameters.
InvalidTransformError
Ill-conditioned coordinate transformation parameters.
Examples
--------
>>> import astropy.io.fits as fits
>>> import astropy.wcs as wcs
>>> import numpy as np
>>> import os
>>> filename = os.path.join(wcs.__path__[0], 'tests/data/j94f05bgq_flt.fits')
>>> hdulist = fits.open(filename)
>>> w = wcs.WCS(hdulist[('sci',1)].header, hdulist)
>>> hdulist.close()
>>> ra, dec = w.all_pix2world([1,2,3], [1,1,1], 1)
>>> print(ra) # doctest: +FLOAT_CMP
[ 5.52645627 5.52649663 5.52653698]
>>> print(dec) # doctest: +FLOAT_CMP
[-72.05171757 -72.05171276 -72.05170795]
>>> radec = w.all_pix2world([[1,1], [2,1], [3,1]], 1)
>>> print(radec) # doctest: +FLOAT_CMP
[[ 5.52645627 -72.05171757]
[ 5.52649663 -72.05171276]
[ 5.52653698 -72.05170795]]
>>> x, y = w.all_world2pix(ra, dec, 1)
>>> print(x) # doctest: +FLOAT_CMP
[ 1.00000238 2.00000237 3.00000236]
>>> print(y) # doctest: +FLOAT_CMP
[ 0.99999996 0.99999997 0.99999997]
>>> xy = w.all_world2pix(radec, 1)
>>> print(xy) # doctest: +FLOAT_CMP
[[ 1.00000238 0.99999996]
[ 2.00000237 0.99999997]
[ 3.00000236 0.99999997]]
>>> xy = w.all_world2pix(radec, 1, maxiter=3,
... tolerance=1.0e-10, quiet=False)
Traceback (most recent call last):
...
NoConvergence: 'WCS.all_world2pix' failed to converge to the
requested accuracy. After 3 iterations, the solution is
diverging at least for one input point.
>>> # Now try to use some diverging data:
>>> divradec = w.all_pix2world([[1.0, 1.0],
... [10000.0, 50000.0],
... [3.0, 1.0]], 1)
>>> print(divradec) # doctest: +FLOAT_CMP
[[ 5.52645627 -72.05171757]
[ 7.15976932 -70.8140779 ]
[ 5.52653698 -72.05170795]]
>>> # First, turn detect_divergence on:
>>> try: # doctest: +FLOAT_CMP
... xy = w.all_world2pix(divradec, 1, maxiter=20,
... tolerance=1.0e-4, adaptive=False,
... detect_divergence=True,
... quiet=False)
... except wcs.wcs.NoConvergence as e:
... print("Indices of diverging points: {{0}}"
... .format(e.divergent))
... print("Indices of poorly converging points: {{0}}"
... .format(e.slow_conv))
... print("Best solution:\\n{{0}}".format(e.best_solution))
... print("Achieved accuracy:\\n{{0}}".format(e.accuracy))
Indices of diverging points: [1]
Indices of poorly converging points: None
Best solution:
[[ 1.00000238e+00 9.99999965e-01]
[ -1.99441636e+06 1.44309097e+06]
[ 3.00000236e+00 9.99999966e-01]]
Achieved accuracy:
[[ 6.13968380e-05 8.59638593e-07]
[ 8.59526812e+11 6.61713548e+11]
[ 6.09398446e-05 8.38759724e-07]]
>>> raise e
Traceback (most recent call last):
...
NoConvergence: 'WCS.all_world2pix' failed to converge to the
requested accuracy. After 5 iterations, the solution is
diverging at least for one input point.
>>> # This time turn detect_divergence off:
>>> try: # doctest: +FLOAT_CMP
... xy = w.all_world2pix(divradec, 1, maxiter=20,
... tolerance=1.0e-4, adaptive=False,
... detect_divergence=False,
... quiet=False)
... except wcs.wcs.NoConvergence as e:
... print("Indices of diverging points: {{0}}"
... .format(e.divergent))
... print("Indices of poorly converging points: {{0}}"
... .format(e.slow_conv))
... print("Best solution:\\n{{0}}".format(e.best_solution))
... print("Achieved accuracy:\\n{{0}}".format(e.accuracy))
Indices of diverging points: [1]
Indices of poorly converging points: None
Best solution:
[[ 1.00000009 1. ]
[ nan nan]
[ 3.00000009 1. ]]
Achieved accuracy:
[[ 2.29417358e-06 3.21222995e-08]
[ nan nan]
[ 2.27407877e-06 3.13005639e-08]]
>>> raise e
Traceback (most recent call last):
...
NoConvergence: 'WCS.all_world2pix' failed to converge to the
requested accuracy. After 6 iterations, the solution is
diverging at least for one input point.
"""
def wcs_world2pix(self, *args, **kwargs):
if self.wcs is None:
raise ValueError("No basic WCS settings were created.")
return self._array_converter(
lambda xy, o: self.wcs.s2p(xy, o)["pixcrd"], "input", *args, **kwargs
)
wcs_world2pix.__doc__ = f"""
Transforms world coordinates to pixel coordinates, using only
the basic `wcslib`_ WCS transformation. No `SIP`_ or
`distortion paper`_ table lookup transformation is applied.
Parameters
----------
{docstrings.TWO_OR_MORE_ARGS('naxis', 8)}
For a transformation that is not two-dimensional, the
two-argument form must be used.
{docstrings.RA_DEC_ORDER(8)}
Returns
-------
{docstrings.RETURNS('pixel coordinates', 8)}
Notes
-----
The order of the axes for the input world array is determined by
the ``CTYPEia`` keywords in the FITS header, therefore it may
not always be of the form (*ra*, *dec*). The
`~astropy.wcs.Wcsprm.lat`, `~astropy.wcs.Wcsprm.lng`,
`~astropy.wcs.Wcsprm.lattyp` and `~astropy.wcs.Wcsprm.lngtyp`
members can be used to determine the order of the axes.
Raises
------
MemoryError
Memory allocation failed.
SingularMatrixError
Linear transformation matrix is singular.
InconsistentAxisTypesError
Inconsistent or unrecognized coordinate axis types.
ValueError
Invalid parameter value.
ValueError
Invalid coordinate transformation parameters.
ValueError
x- and y-coordinate arrays are not the same size.
InvalidTransformError
Invalid coordinate transformation parameters.
InvalidTransformError
Ill-conditioned coordinate transformation parameters.
"""
def pix2foc(self, *args):
return self._array_converter(self._pix2foc, None, *args)
pix2foc.__doc__ = f"""
Convert pixel coordinates to focal plane coordinates using the
`SIP`_ polynomial distortion convention and `distortion
paper`_ table-lookup correction.
The output is in absolute pixel coordinates, not relative to
``CRPIX``.
Parameters
----------
{docstrings.TWO_OR_MORE_ARGS('2', 8)}
Returns
-------
{docstrings.RETURNS('focal coordinates', 8)}
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
"""
def p4_pix2foc(self, *args):
return self._array_converter(self._p4_pix2foc, None, *args)
p4_pix2foc.__doc__ = f"""
Convert pixel coordinates to focal plane coordinates using
`distortion paper`_ table-lookup correction.
The output is in absolute pixel coordinates, not relative to
``CRPIX``.
Parameters
----------
{docstrings.TWO_OR_MORE_ARGS('2', 8)}
Returns
-------
{docstrings.RETURNS('focal coordinates', 8)}
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
"""
def det2im(self, *args):
return self._array_converter(self._det2im, None, *args)
det2im.__doc__ = f"""
Convert detector coordinates to image plane coordinates using
`distortion paper`_ table-lookup correction.
The output is in absolute pixel coordinates, not relative to
``CRPIX``.
Parameters
----------
{docstrings.TWO_OR_MORE_ARGS('2', 8)}
Returns
-------
{docstrings.RETURNS('pixel coordinates', 8)}
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
"""
def sip_pix2foc(self, *args):
if self.sip is None:
if len(args) == 2:
return args[0]
elif len(args) == 3:
return args[:2]
else:
raise TypeError("Wrong number of arguments")
return self._array_converter(self.sip.pix2foc, None, *args)
sip_pix2foc.__doc__ = f"""
Convert pixel coordinates to focal plane coordinates using the
`SIP`_ polynomial distortion convention.
The output is in pixel coordinates, relative to ``CRPIX``.
FITS WCS `distortion paper`_ table lookup correction is not
applied, even if that information existed in the FITS file
that initialized this :class:`~astropy.wcs.WCS` object. To
correct for that, use `~astropy.wcs.WCS.pix2foc` or
`~astropy.wcs.WCS.p4_pix2foc`.
Parameters
----------
{docstrings.TWO_OR_MORE_ARGS('2', 8)}
Returns
-------
{docstrings.RETURNS('focal coordinates', 8)}
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
"""
def sip_foc2pix(self, *args):
if self.sip is None:
if len(args) == 2:
return args[0]
elif len(args) == 3:
return args[:2]
else:
raise TypeError("Wrong number of arguments")
return self._array_converter(self.sip.foc2pix, None, *args)
sip_foc2pix.__doc__ = f"""
Convert focal plane coordinates to pixel coordinates using the
`SIP`_ polynomial distortion convention.
FITS WCS `distortion paper`_ table lookup distortion
correction is not applied, even if that information existed in
the FITS file that initialized this `~astropy.wcs.WCS` object.
Parameters
----------
{docstrings.TWO_OR_MORE_ARGS('2', 8)}
Returns
-------
{docstrings.RETURNS('pixel coordinates', 8)}
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
"""
def proj_plane_pixel_scales(self):
"""
Calculate pixel scales along each axis of the image pixel at
the ``CRPIX`` location once it is projected onto the
"plane of intermediate world coordinates" as defined in
`Greisen & Calabretta 2002, A&A, 395, 1061 <https://ui.adsabs.harvard.edu/abs/2002A%26A...395.1061G>`_.
.. note::
This method is concerned **only** about the transformation
"image plane"->"projection plane" and **not** about the
transformation "celestial sphere"->"projection plane"->"image plane".
Therefore, this function ignores distortions arising due to
non-linear nature of most projections.
.. note::
This method only returns sensible answers if the WCS contains
celestial axes, i.e., the `~astropy.wcs.WCS.celestial` WCS object.
Returns
-------
scale : list of `~astropy.units.Quantity`
A vector of projection plane increments corresponding to each
pixel side (axis).
See Also
--------
astropy.wcs.utils.proj_plane_pixel_scales
"""
from astropy.wcs.utils import proj_plane_pixel_scales # Avoid circular import
values = proj_plane_pixel_scales(self)
units = [u.Unit(x) for x in self.wcs.cunit]
return [
value * unit for (value, unit) in zip(values, units)
] # Can have different units
def proj_plane_pixel_area(self):
"""
For a **celestial** WCS (see `astropy.wcs.WCS.celestial`), returns pixel
area of the image pixel at the ``CRPIX`` location once it is projected
onto the "plane of intermediate world coordinates" as defined in
`Greisen & Calabretta 2002, A&A, 395, 1061 <https://ui.adsabs.harvard.edu/abs/2002A%26A...395.1061G>`_.
.. note::
This function is concerned **only** about the transformation
"image plane"->"projection plane" and **not** about the
transformation "celestial sphere"->"projection plane"->"image plane".
Therefore, this function ignores distortions arising due to
non-linear nature of most projections.
.. note::
This method only returns sensible answers if the WCS contains
celestial axes, i.e., the `~astropy.wcs.WCS.celestial` WCS object.
Returns
-------
area : `~astropy.units.Quantity`
Area (in the projection plane) of the pixel at ``CRPIX`` location.
Raises
------
ValueError
Pixel area is defined only for 2D pixels. Most likely the
`~astropy.wcs.Wcsprm.cd` matrix of the `~astropy.wcs.WCS.celestial`
WCS is not a square matrix of second order.
Notes
-----
Depending on the application, square root of the pixel area can be used to
represent a single pixel scale of an equivalent square pixel
whose area is equal to the area of a generally non-square pixel.
See Also
--------
astropy.wcs.utils.proj_plane_pixel_area
"""
from astropy.wcs.utils import proj_plane_pixel_area # Avoid circular import
value = proj_plane_pixel_area(self)
unit = u.Unit(self.wcs.cunit[0]) * u.Unit(self.wcs.cunit[1]) # 2D only
return value * unit
def to_fits(self, relax=False, key=None):
"""
Generate an `~astropy.io.fits.HDUList` object with all of the
information stored in this object. This should be logically identical
to the input FITS file, but it will be normalized in a number of ways.
See `to_header` for some warnings about the output produced.
Parameters
----------
relax : bool or int, optional
Degree of permissiveness:
- `False` (default): Write all extensions that are
considered to be safe and recommended.
- `True`: Write all recognized informal extensions of the
WCS standard.
- `int`: a bit field selecting specific extensions to
write. See :ref:`astropy:relaxwrite` for details.
key : str
The name of a particular WCS transform to use. This may be
either ``' '`` or ``'A'``-``'Z'`` and corresponds to the ``"a"``
part of the ``CTYPEia`` cards.
Returns
-------
hdulist : `~astropy.io.fits.HDUList`
"""
header = self.to_header(relax=relax, key=key)
hdu = fits.PrimaryHDU(header=header)
hdulist = fits.HDUList(hdu)
self._write_det2im(hdulist)
self._write_distortion_kw(hdulist)
return hdulist
def to_header(self, relax=None, key=None):
"""Generate an `astropy.io.fits.Header` object with the basic WCS
and SIP information stored in this object. This should be
logically identical to the input FITS file, but it will be
normalized in a number of ways.
.. warning::
This function does not write out FITS WCS `distortion
paper`_ information, since that requires multiple FITS
header data units. To get a full representation of
everything in this object, use `to_fits`.
Parameters
----------
relax : bool or int, optional
Degree of permissiveness:
- `False` (default): Write all extensions that are
considered to be safe and recommended.
- `True`: Write all recognized informal extensions of the
WCS standard.
- `int`: a bit field selecting specific extensions to
write. See :ref:`astropy:relaxwrite` for details.
If the ``relax`` keyword argument is not given and any
keywords were omitted from the output, an
`~astropy.utils.exceptions.AstropyWarning` is displayed.
To override this, explicitly pass a value to ``relax``.
key : str
The name of a particular WCS transform to use. This may be
either ``' '`` or ``'A'``-``'Z'`` and corresponds to the ``"a"``
part of the ``CTYPEia`` cards.
Returns
-------
header : `astropy.io.fits.Header`
Notes
-----
The output header will almost certainly differ from the input in a
number of respects:
1. The output header only contains WCS-related keywords. In
particular, it does not contain syntactically-required
keywords such as ``SIMPLE``, ``NAXIS``, ``BITPIX``, or
``END``.
2. Deprecated (e.g. ``CROTAn``) or non-standard usage will
be translated to standard (this is partially dependent on
whether ``fix`` was applied).
3. Quantities will be converted to the units used internally,
basically SI with the addition of degrees.
4. Floating-point quantities may be given to a different decimal
precision.
5. Elements of the ``PCi_j`` matrix will be written if and
only if they differ from the unit matrix. Thus, if the
matrix is unity then no elements will be written.
6. Additional keywords such as ``WCSAXES``, ``CUNITia``,
``LONPOLEa`` and ``LATPOLEa`` may appear.
7. The original keycomments will be lost, although
`to_header` tries hard to write meaningful comments.
8. Keyword order may be changed.
"""
# default precision for numerical WCS keywords
precision = WCSHDO_P14 # Defined by C-ext
display_warning = False
if relax is None:
display_warning = True
relax = False
if relax not in (True, False):
do_sip = relax & WCSHDO_SIP
relax &= ~WCSHDO_SIP
else:
do_sip = relax
relax = WCSHDO_all if relax is True else WCSHDO_safe # Defined by C-ext
relax = precision | relax
if self.wcs is not None:
if key is not None:
orig_key = self.wcs.alt
self.wcs.alt = key
header_string = self.wcs.to_header(relax)
header = fits.Header.fromstring(header_string)
keys_to_remove = ["", " ", "COMMENT"]
for kw in keys_to_remove:
if kw in header:
del header[kw]
# Check if we can handle TPD distortion correctly
if _WCS_TPD_WARN_LT71:
for kw, val in header.items():
if kw[:5] in ("CPDIS", "CQDIS") and val == "TPD":
warnings.warn(
f"WCS contains a TPD distortion model in {kw}. WCSLIB"
f" {_wcs.__version__} is writing this in a format"
" incompatible with current versions - please update to"
" 7.4 or use the bundled WCSLIB.",
AstropyWarning,
)
elif _WCS_TPD_WARN_LT74:
for kw, val in header.items():
if kw[:5] in ("CPDIS", "CQDIS") and val == "TPD":
warnings.warn(
f"WCS contains a TPD distortion model in {kw}, which"
" requires WCSLIB 7.4 or later to store in a FITS header"
f" (having {_wcs.__version__}).",
AstropyWarning,
)
else:
header = fits.Header()
if do_sip and self.sip is not None:
if self.wcs is not None and any(
not ctyp.endswith("-SIP") for ctyp in self.wcs.ctype
):
self._fix_ctype(header, add_sip=True)
for kw, val in self._write_sip_kw().items():
header[kw] = val
if (
not do_sip
and self.wcs is not None
and any(self.wcs.ctype)
and self.sip is not None
):
# This is called when relax is not False or WCSHDO_SIP
# The default case of ``relax=None`` is handled further in the code.
header = self._fix_ctype(header, add_sip=False)
if display_warning:
full_header = self.to_header(relax=True, key=key)
missing_keys = []
for kw, val in full_header.items():
if kw not in header:
missing_keys.append(kw)
if len(missing_keys):
warnings.warn(
"Some non-standard WCS keywords were excluded:"
f" {', '.join(missing_keys)} Use the ``relax`` kwarg to control"
" this.",
AstropyWarning,
)
# called when ``relax=None``
# This is different from the case of ``relax=False``.
if any(self.wcs.ctype) and self.sip is not None:
header = self._fix_ctype(header, add_sip=False, log_message=False)
# Finally reset the key. This must be called after ``_fix_ctype``.
if key is not None:
self.wcs.alt = orig_key
return header
def _fix_ctype(self, header, add_sip=True, log_message=True):
"""
Parameters
----------
header : `~astropy.io.fits.Header`
FITS header.
add_sip : bool
Flag indicating whether "-SIP" should be added or removed from CTYPE keywords.
Remove "-SIP" from CTYPE when writing out a header with relax=False.
This needs to be done outside ``to_header`` because ``to_header`` runs
twice when ``relax=False`` and the second time ``relax`` is set to ``True``
to display the missing keywords.
If the user requested SIP distortion to be written out add "-SIP" to
CTYPE if it is missing.
"""
_add_sip_to_ctype = """
Inconsistent SIP distortion information is present in the current WCS:
SIP coefficients were detected, but CTYPE is missing "-SIP" suffix,
therefore the current WCS is internally inconsistent.
Because relax has been set to True, the resulting output WCS will have
"-SIP" appended to CTYPE in order to make the header internally consistent.
However, this may produce incorrect astrometry in the output WCS, if
in fact the current WCS is already distortion-corrected.
Therefore, if current WCS is already distortion-corrected (eg, drizzled)
then SIP distortion components should not apply. In that case, for a WCS
that is already distortion-corrected, please remove the SIP coefficients
from the header.
"""
if log_message:
if add_sip:
log.info(_add_sip_to_ctype)
for i in range(1, self.naxis + 1):
# strip() must be called here to cover the case of alt key= " "
kw = f"CTYPE{i}{self.wcs.alt}".strip()
if kw in header:
if add_sip:
val = header[kw].strip("-SIP") + "-SIP"
else:
val = header[kw].strip("-SIP")
header[kw] = val
else:
continue
return header
def to_header_string(self, relax=None):
"""
Identical to `to_header`, but returns a string containing the
header cards.
"""
return str(self.to_header(relax))
def footprint_to_file(
self, filename="footprint.reg", color="green", width=2, coordsys=None
):
"""
Writes out a `ds9`_ style regions file. It can be loaded
directly by `ds9`_.
Parameters
----------
filename : str, optional
Output file name - default is ``'footprint.reg'``
color : str, optional
Color to use when plotting the line.
width : int, optional
Width of the region line.
coordsys : str, optional
Coordinate system. If not specified (default), the ``radesys``
value is used. For all possible values, see
http://ds9.si.edu/doc/ref/region.html#RegionFileFormat
"""
comments = (
"# Region file format: DS9 version 4.0 \n"
'# global color=green font="helvetica 12 bold '
"select=1 highlite=1 edit=1 move=1 delete=1 "
"include=1 fixed=0 source\n"
)
coordsys = coordsys or self.wcs.radesys
if coordsys not in (
"PHYSICAL",
"IMAGE",
"FK4",
"B1950",
"FK5",
"J2000",
"GALACTIC",
"ECLIPTIC",
"ICRS",
"LINEAR",
"AMPLIFIER",
"DETECTOR",
):
raise ValueError(
f"Coordinate system '{coordsys}' is not supported. A valid"
" one can be given with the 'coordsys' argument."
)
with open(filename, mode="w") as f:
f.write(comments)
f.write(f"{coordsys}\n")
f.write("polygon(")
ftpr = self.calc_footprint()
if ftpr is not None:
ftpr.tofile(f, sep=",")
f.write(f") # color={color}, width={width:d} \n")
def _get_naxis(self, header=None):
_naxis = []
if header is not None and not isinstance(header, (str, bytes)):
for naxis in itertools.count(1):
try:
_naxis.append(header[f"NAXIS{naxis}"])
except KeyError:
break
if len(_naxis) == 0:
_naxis = [0, 0]
elif len(_naxis) == 1:
_naxis.append(0)
self._naxis = _naxis
def printwcs(self):
print(repr(self))
def __repr__(self):
"""
Return a short description. Simply porting the behavior from
the `printwcs()` method.
"""
description = ["WCS Keywords\n", f"Number of WCS axes: {self.naxis!r}"]
sfmt = " : " + "".join(["{" + f"{i}" + "!r} " for i in range(self.naxis)])
keywords = ["CTYPE", "CRVAL", "CRPIX"]
values = [self.wcs.ctype, self.wcs.crval, self.wcs.crpix]
for keyword, value in zip(keywords, values):
description.append(keyword + sfmt.format(*value))
if hasattr(self.wcs, "pc"):
for i in range(self.naxis):
s = ""
for j in range(self.naxis):
s += "".join(["PC", str(i + 1), "_", str(j + 1), " "])
s += sfmt
description.append(s.format(*self.wcs.pc[i]))
s = "CDELT" + sfmt
description.append(s.format(*self.wcs.cdelt))
elif hasattr(self.wcs, "cd"):
for i in range(self.naxis):
s = ""
for j in range(self.naxis):
s += "".join(["CD", str(i + 1), "_", str(j + 1), " "])
s += sfmt
description.append(s.format(*self.wcs.cd[i]))
description.append(f"NAXIS : {' '.join(map(str, self._naxis))}")
return "\n".join(description)
def get_axis_types(self):
"""
Similar to `self.wcsprm.axis_types <astropy.wcs.Wcsprm.axis_types>`
but provides the information in a more Python-friendly format.
Returns
-------
result : list of dict
Returns a list of dictionaries, one for each axis, each
containing attributes about the type of that axis.
Each dictionary has the following keys:
- 'coordinate_type':
- None: Non-specific coordinate type.
- 'stokes': Stokes coordinate.
- 'celestial': Celestial coordinate (including ``CUBEFACE``).
- 'spectral': Spectral coordinate.
- 'scale':
- 'linear': Linear axis.
- 'quantized': Quantized axis (``STOKES``, ``CUBEFACE``).
- 'non-linear celestial': Non-linear celestial axis.
- 'non-linear spectral': Non-linear spectral axis.
- 'logarithmic': Logarithmic axis.
- 'tabular': Tabular axis.
- 'group'
- Group number, e.g. lookup table number
- 'number'
- For celestial axes:
- 0: Longitude coordinate.
- 1: Latitude coordinate.
- 2: ``CUBEFACE`` number.
- For lookup tables:
- the axis number in a multidimensional table.
``CTYPEia`` in ``"4-3"`` form with unrecognized algorithm code will
generate an error.
"""
if self.wcs is None:
raise AttributeError("This WCS object does not have a wcsprm object.")
coordinate_type_map = {0: None, 1: "stokes", 2: "celestial", 3: "spectral"}
scale_map = {
0: "linear",
1: "quantized",
2: "non-linear celestial",
3: "non-linear spectral",
4: "logarithmic",
5: "tabular",
}
result = []
for axis_type in self.wcs.axis_types:
subresult = {}
coordinate_type = (axis_type // 1000) % 10
subresult["coordinate_type"] = coordinate_type_map[coordinate_type]
scale = (axis_type // 100) % 10
subresult["scale"] = scale_map[scale]
group = (axis_type // 10) % 10
subresult["group"] = group
number = axis_type % 10
subresult["number"] = number
result.append(subresult)
return result
def __reduce__(self):
"""
Support pickling of WCS objects. This is done by serializing
to an in-memory FITS file and dumping that as a string.
"""
hdulist = self.to_fits(relax=True)
buffer = io.BytesIO()
hdulist.writeto(buffer)
dct = self.__dict__.copy()
dct["_alt_wcskey"] = self.wcs.alt
return (
__WCS_unpickle__,
(
self.__class__,
dct,
buffer.getvalue(),
),
)
def dropaxis(self, dropax):
"""
Remove an axis from the WCS.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The WCS with naxis to be chopped to naxis-1
dropax : int
The index of the WCS to drop, counting from 0 (i.e., python convention,
not FITS convention)
Returns
-------
`~astropy.wcs.WCS`
A new `~astropy.wcs.WCS` instance with one axis fewer
"""
inds = list(range(self.wcs.naxis))
inds.pop(dropax)
# axis 0 has special meaning to sub
# if wcs.wcs.ctype == ['RA','DEC','VLSR'], you want
# wcs.sub([1,2]) to get 'RA','DEC' back
return self.sub([i + 1 for i in inds])
def swapaxes(self, ax0, ax1):
"""
Swap axes in a WCS.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The WCS to have its axes swapped
ax0 : int
ax1 : int
The indices of the WCS to be swapped, counting from 0 (i.e., python
convention, not FITS convention)
Returns
-------
`~astropy.wcs.WCS`
A new `~astropy.wcs.WCS` instance with the same number of axes,
but two swapped
"""
inds = list(range(self.wcs.naxis))
inds[ax0], inds[ax1] = inds[ax1], inds[ax0]
return self.sub([i + 1 for i in inds])
def reorient_celestial_first(self):
"""
Reorient the WCS such that the celestial axes are first, followed by
the spectral axis, followed by any others.
Assumes at least celestial axes are present.
"""
return self.sub(
[WCSSUB_CELESTIAL, WCSSUB_SPECTRAL, WCSSUB_STOKES, WCSSUB_TIME]
) # Defined by C-ext
def slice(self, view, numpy_order=True):
"""
Slice a WCS instance using a Numpy slice. The order of the slice should
be reversed (as for the data) compared to the natural WCS order.
Parameters
----------
view : tuple
A tuple containing the same number of slices as the WCS system.
The ``step`` method, the third argument to a slice, is not
presently supported.
numpy_order : bool
Use numpy order, i.e. slice the WCS so that an identical slice
applied to a numpy array will slice the array and WCS in the same
way. If set to `False`, the WCS will be sliced in FITS order,
meaning the first slice will be applied to the *last* numpy index
but the *first* WCS axis.
Returns
-------
wcs_new : `~astropy.wcs.WCS`
A new resampled WCS axis
"""
if hasattr(view, "__len__") and len(view) > self.wcs.naxis:
raise ValueError("Must have # of slices <= # of WCS axes")
elif not hasattr(view, "__len__"): # view MUST be an iterable
view = [view]
if not all(isinstance(x, slice) for x in view):
# We need to drop some dimensions, but this may not always be
# possible with .sub due to correlated axes, so instead we use the
# generalized slicing infrastructure from astropy.wcs.wcsapi.
return SlicedFITSWCS(self, view)
# NOTE: we could in principle use SlicedFITSWCS as above for all slicing,
# but in the simple case where there are no axes dropped, we can just
# create a full WCS object with updated WCS parameters which is faster
# for this specific case and also backward-compatible.
wcs_new = self.deepcopy()
if wcs_new.sip is not None:
sip_crpix = wcs_new.sip.crpix.tolist()
for i, iview in enumerate(view):
if iview.step is not None and iview.step < 0:
raise NotImplementedError("Reversing an axis is not implemented.")
if numpy_order:
wcs_index = self.wcs.naxis - 1 - i
else:
wcs_index = i
if iview.step is not None and iview.start is None:
# Slice from "None" is equivalent to slice from 0 (but one
# might want to downsample, so allow slices with
# None,None,step or None,stop,step)
iview = slice(0, iview.stop, iview.step)
if iview.start is not None:
if iview.step not in (None, 1):
crpix = self.wcs.crpix[wcs_index]
cdelt = self.wcs.cdelt[wcs_index]
# equivalently (keep this comment so you can compare eqns):
# wcs_new.wcs.crpix[wcs_index] =
# (crpix - iview.start)*iview.step + 0.5 - iview.step/2.
crp = (
(crpix - iview.start - 1.0) / iview.step
+ 0.5
+ 1.0 / iview.step / 2.0
)
wcs_new.wcs.crpix[wcs_index] = crp
if wcs_new.sip is not None:
sip_crpix[wcs_index] = crp
wcs_new.wcs.cdelt[wcs_index] = cdelt * iview.step
else:
wcs_new.wcs.crpix[wcs_index] -= iview.start
if wcs_new.sip is not None:
sip_crpix[wcs_index] -= iview.start
try:
# range requires integers but the other attributes can also
# handle arbitrary values, so this needs to be in a try/except.
nitems = len(builtins.range(self._naxis[wcs_index])[iview])
except TypeError as exc:
if "indices must be integers" not in str(exc):
raise
warnings.warn(
f"NAXIS{wcs_index} attribute is not updated because at "
f"least one index ('{iview}') is no integer.",
AstropyUserWarning,
)
else:
wcs_new._naxis[wcs_index] = nitems
if wcs_new.sip is not None:
wcs_new.sip = Sip(
self.sip.a, self.sip.b, self.sip.ap, self.sip.bp, sip_crpix
)
return wcs_new
def __getitem__(self, item):
# "getitem" is a shortcut for self.slice; it is very limited
# there is no obvious and unambiguous interpretation of wcs[1,2,3]
# We COULD allow wcs[1] to link to wcs.sub([2])
# (wcs[i] -> wcs.sub([i+1])
return self.slice(item)
def __iter__(self):
# Having __getitem__ makes Python think WCS is iterable. However,
# Python first checks whether __iter__ is present, so we can raise an
# exception here.
raise TypeError(f"'{self.__class__.__name__}' object is not iterable")
@property
def axis_type_names(self):
"""
World names for each coordinate axis.
Returns
-------
list of str
A list of names along each axis.
"""
names = list(self.wcs.cname)
types = self.wcs.ctype
for i in range(len(names)):
if len(names[i]) > 0:
continue
names[i] = types[i].split("-")[0]
return names
@property
def celestial(self):
"""
A copy of the current WCS with only the celestial axes included.
"""
return self.sub([WCSSUB_CELESTIAL]) # Defined by C-ext
@property
def is_celestial(self):
return self.has_celestial and self.naxis == 2
@property
def has_celestial(self):
try:
return self.wcs.lng >= 0 and self.wcs.lat >= 0
except InconsistentAxisTypesError:
return False
@property
def spectral(self):
"""
A copy of the current WCS with only the spectral axes included.
"""
return self.sub([WCSSUB_SPECTRAL]) # Defined by C-ext
@property
def is_spectral(self):
return self.has_spectral and self.naxis == 1
@property
def has_spectral(self):
try:
return self.wcs.spec >= 0
except InconsistentAxisTypesError:
return False
@property
def temporal(self):
"""
A copy of the current WCS with only the time axes included.
"""
if not _WCSSUB_TIME_SUPPORT:
raise NotImplementedError(
"Support for 'temporal' axis requires WCSLIB version 7.8 or "
f"greater but linked WCSLIB version is {_wcs.__version__}"
)
return self.sub([WCSSUB_TIME]) # Defined by C-ext
@property
def is_temporal(self):
return self.has_temporal and self.naxis == 1
@property
def has_temporal(self):
return any(t // 1000 == 4 for t in self.wcs.axis_types)
@property
def has_distortion(self):
"""
Returns `True` if any distortion terms are present.
"""
return (
self.sip is not None
or self.cpdis1 is not None
or self.cpdis2 is not None
or self.det2im1 is not None
and self.det2im2 is not None
)
@property
def pixel_scale_matrix(self):
try:
cdelt = np.diag(self.wcs.get_cdelt())
pc = self.wcs.get_pc()
except InconsistentAxisTypesError:
try:
# for non-celestial axes, get_cdelt doesn't work
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
"cdelt will be ignored since cd is present",
RuntimeWarning,
)
cdelt = np.dot(self.wcs.cd, np.diag(self.wcs.cdelt))
except AttributeError:
cdelt = np.diag(self.wcs.cdelt)
try:
pc = self.wcs.pc
except AttributeError:
pc = 1
pccd = np.dot(cdelt, pc)
return pccd
def footprint_contains(self, coord, **kwargs):
"""
Determines if a given SkyCoord is contained in the wcs footprint.
Parameters
----------
coord : `~astropy.coordinates.SkyCoord`
The coordinate to check if it is within the wcs coordinate.
**kwargs :
Additional arguments to pass to `~astropy.coordinates.SkyCoord.to_pixel`
Returns
-------
response : bool
True means the WCS footprint contains the coordinate, False means it does not.
"""
return coord.contained_by(self, **kwargs)
def __WCS_unpickle__(cls, dct, fits_data):
"""
Unpickles a WCS object from a serialized FITS string.
"""
self = cls.__new__(cls)
buffer = io.BytesIO(fits_data)
hdulist = fits.open(buffer)
naxis = dct.pop("naxis", None)
if naxis:
hdulist[0].header["naxis"] = naxis
naxes = dct.pop("_naxis", [])
for k, na in enumerate(naxes):
hdulist[0].header[f"naxis{k + 1:d}"] = na
kwargs = dct.pop("_init_kwargs", {})
self.__dict__.update(dct)
wcskey = dct.pop("_alt_wcskey", " ")
WCS.__init__(self, hdulist[0].header, hdulist, key=wcskey, **kwargs)
self.pixel_bounds = dct.get("_pixel_bounds", None)
return self
def find_all_wcs(
header, relax=True, keysel=None, fix=True, translate_units="", _do_set=True
):
"""
Find all the WCS transformations in the given header.
Parameters
----------
header : str or `~astropy.io.fits.Header` object.
relax : bool or int, optional
Degree of permissiveness:
- `True` (default): Admit all recognized informal extensions of the
WCS standard.
- `False`: Recognize only FITS keywords defined by the
published WCS standard.
- `int`: a bit field selecting specific extensions to accept.
See :ref:`astropy:relaxread` for details.
keysel : sequence of str, optional
A list of flags used to select the keyword types considered by
wcslib. When ``None``, only the standard image header
keywords are considered (and the underlying wcspih() C
function is called). To use binary table image array or pixel
list keywords, *keysel* must be set.
Each element in the list should be one of the following strings:
- 'image': Image header keywords
- 'binary': Binary table image array keywords
- 'pixel': Pixel list keywords
Keywords such as ``EQUIna`` or ``RFRQna`` that are common to
binary table image arrays and pixel lists (including
``WCSNna`` and ``TWCSna``) are selected by both 'binary' and
'pixel'.
fix : bool, optional
When `True` (default), call `~astropy.wcs.Wcsprm.fix` on
the resulting objects to fix any non-standard uses in the
header. `FITSFixedWarning` warnings will be emitted if any
changes were made.
translate_units : str, optional
Specify which potentially unsafe translations of non-standard
unit strings to perform. By default, performs none. See
`WCS.fix` for more information about this parameter. Only
effective when ``fix`` is `True`.
Returns
-------
wcses : list of `WCS`
"""
if isinstance(header, (str, bytes)):
header_string = header
elif isinstance(header, fits.Header):
header_string = header.tostring()
else:
raise TypeError("header must be a string or astropy.io.fits.Header object")
keysel_flags = _parse_keysel(keysel)
if isinstance(header_string, str):
header_bytes = header_string.encode("ascii")
else:
header_bytes = header_string
wcsprms = _wcs.find_all_wcs(header_bytes, relax, keysel_flags)
result = []
for wcsprm in wcsprms:
subresult = WCS(fix=False, _do_set=False)
subresult.wcs = wcsprm
result.append(subresult)
if fix:
subresult.fix(translate_units)
if _do_set:
subresult.wcs.set()
return result
def validate(source):
"""
Prints a WCS validation report for the given FITS file.
Parameters
----------
source : str or file-like or `~astropy.io.fits.HDUList`
The FITS file to validate.
Returns
-------
results : list subclass instance
The result is returned as nested lists. The first level
corresponds to the HDUs in the given file. The next level has
an entry for each WCS found in that header. The special
subclass of list will pretty-print the results as a table when
printed.
"""
class _WcsValidateWcsResult(list):
def __init__(self, key):
self._key = key
def __repr__(self):
result = [f" WCS key '{self._key or ' '}':"]
if len(self):
for entry in self:
for i, line in enumerate(entry.splitlines()):
if i == 0:
initial_indent = " - "
else:
initial_indent = " "
result.extend(
textwrap.wrap(
line,
initial_indent=initial_indent,
subsequent_indent=" ",
)
)
else:
result.append(" No issues.")
return "\n".join(result)
class _WcsValidateHduResult(list):
def __init__(self, hdu_index, hdu_name):
self._hdu_index = hdu_index
self._hdu_name = hdu_name
list.__init__(self)
def __repr__(self):
if len(self):
if self._hdu_name:
hdu_name = f" ({self._hdu_name})"
else:
hdu_name = ""
result = [f"HDU {self._hdu_index}{hdu_name}:"]
for wcs in self:
result.append(repr(wcs))
return "\n".join(result)
return ""
class _WcsValidateResults(list):
def __repr__(self):
result = []
for hdu in self:
content = repr(hdu)
if len(content):
result.append(content)
return "\n\n".join(result)
global __warningregistry__
if isinstance(source, fits.HDUList):
hdulist = source
else:
hdulist = fits.open(source)
results = _WcsValidateResults()
for i, hdu in enumerate(hdulist):
hdu_results = _WcsValidateHduResult(i, hdu.name)
results.append(hdu_results)
with warnings.catch_warnings(record=True) as warning_lines:
wcses = find_all_wcs(
hdu.header, relax=_wcs.WCSHDR_reject, fix=False, _do_set=False
)
for wcs in wcses:
wcs_results = _WcsValidateWcsResult(wcs.wcs.alt)
hdu_results.append(wcs_results)
try:
del __warningregistry__
except NameError:
pass
with warnings.catch_warnings(record=True) as warning_lines:
warnings.resetwarnings()
warnings.simplefilter("always", FITSFixedWarning, append=True)
try:
WCS(
hdu.header,
hdulist,
key=wcs.wcs.alt or " ",
relax=_wcs.WCSHDR_reject,
fix=True,
_do_set=False,
)
except WcsError as e:
wcs_results.append(str(e))
wcs_results.extend([str(x.message) for x in warning_lines])
return results
|
60ae849657f93cdc875f84e6de309e48abd12823076ec2121918307967af4f79 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
The astropy.time package provides functionality for manipulating times and
dates. Specific emphasis is placed on supporting time scales (e.g. UTC, TAI,
UT1) and time representations (e.g. JD, MJD, ISO 8601) that are used in
astronomy.
"""
import copy
import enum
import operator
import os
import threading
from datetime import date, datetime, timedelta
from time import strftime
from warnings import warn
import erfa
import numpy as np
from astropy import constants as const
from astropy import units as u
from astropy.extern import _strptime
from astropy.units import UnitConversionError
from astropy.utils import ShapedLikeNDArray
from astropy.utils.data_info import MixinInfo, data_info_factory
from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyWarning
# Import TimeFromEpoch to avoid breaking code that followed the old example of
# making a custom timescale in the documentation.
from .formats import TimeFromEpoch # noqa: F401
from .formats import (
TIME_DELTA_FORMATS,
TIME_FORMATS,
TimeAstropyTime,
TimeDatetime,
TimeJD,
TimeUnique,
)
from .time_helper.function_helpers import CUSTOM_FUNCTIONS, UNSUPPORTED_FUNCTIONS
from .utils import day_frac
__all__ = [
"TimeBase",
"Time",
"TimeDelta",
"TimeInfo",
"TimeInfoBase",
"update_leap_seconds",
"TIME_SCALES",
"STANDARD_TIME_SCALES",
"TIME_DELTA_SCALES",
"ScaleValueError",
"OperandTypeError",
"TimeDeltaMissingUnitWarning",
]
STANDARD_TIME_SCALES = ("tai", "tcb", "tcg", "tdb", "tt", "ut1", "utc")
LOCAL_SCALES = ("local",)
TIME_TYPES = {
scale: scales for scales in (STANDARD_TIME_SCALES, LOCAL_SCALES) for scale in scales
}
TIME_SCALES = STANDARD_TIME_SCALES + LOCAL_SCALES
MULTI_HOPS = {
("tai", "tcb"): ("tt", "tdb"),
("tai", "tcg"): ("tt",),
("tai", "ut1"): ("utc",),
("tai", "tdb"): ("tt",),
("tcb", "tcg"): ("tdb", "tt"),
("tcb", "tt"): ("tdb",),
("tcb", "ut1"): ("tdb", "tt", "tai", "utc"),
("tcb", "utc"): ("tdb", "tt", "tai"),
("tcg", "tdb"): ("tt",),
("tcg", "ut1"): ("tt", "tai", "utc"),
("tcg", "utc"): ("tt", "tai"),
("tdb", "ut1"): ("tt", "tai", "utc"),
("tdb", "utc"): ("tt", "tai"),
("tt", "ut1"): ("tai", "utc"),
("tt", "utc"): ("tai",),
}
GEOCENTRIC_SCALES = ("tai", "tt", "tcg")
BARYCENTRIC_SCALES = ("tcb", "tdb")
ROTATIONAL_SCALES = ("ut1",)
TIME_DELTA_TYPES = {
scale: scales
for scales in (
GEOCENTRIC_SCALES,
BARYCENTRIC_SCALES,
ROTATIONAL_SCALES,
LOCAL_SCALES,
)
for scale in scales
}
TIME_DELTA_SCALES = (
GEOCENTRIC_SCALES + BARYCENTRIC_SCALES + ROTATIONAL_SCALES + LOCAL_SCALES
)
# For time scale changes, we need L_G and L_B, which are stored in erfam.h as
# /* L_G = 1 - d(TT)/d(TCG) */
# define ERFA_ELG (6.969290134e-10)
# /* L_B = 1 - d(TDB)/d(TCB), and TDB (s) at TAI 1977/1/1.0 */
# define ERFA_ELB (1.550519768e-8)
# These are exposed in erfa as erfa.ELG and erfa.ELB.
# Implied: d(TT)/d(TCG) = 1-L_G
# and d(TCG)/d(TT) = 1/(1-L_G) = 1 + (1-(1-L_G))/(1-L_G) = 1 + L_G/(1-L_G)
# scale offsets as second = first + first * scale_offset[(first,second)]
SCALE_OFFSETS = {
("tt", "tai"): None,
("tai", "tt"): None,
("tcg", "tt"): -erfa.ELG,
("tt", "tcg"): erfa.ELG / (1.0 - erfa.ELG),
("tcg", "tai"): -erfa.ELG,
("tai", "tcg"): erfa.ELG / (1.0 - erfa.ELG),
("tcb", "tdb"): -erfa.ELB,
("tdb", "tcb"): erfa.ELB / (1.0 - erfa.ELB),
}
# triple-level dictionary, yay!
SIDEREAL_TIME_MODELS = {
"mean": {
"IAU2006": {"function": erfa.gmst06, "scales": ("ut1", "tt")},
"IAU2000": {"function": erfa.gmst00, "scales": ("ut1", "tt")},
"IAU1982": {"function": erfa.gmst82, "scales": ("ut1",), "include_tio": False},
},
"apparent": {
"IAU2006A": {"function": erfa.gst06a, "scales": ("ut1", "tt")},
"IAU2000A": {"function": erfa.gst00a, "scales": ("ut1", "tt")},
"IAU2000B": {"function": erfa.gst00b, "scales": ("ut1",)},
"IAU1994": {"function": erfa.gst94, "scales": ("ut1",), "include_tio": False},
},
}
class _LeapSecondsCheck(enum.Enum):
NOT_STARTED = 0 # No thread has reached the check
RUNNING = 1 # A thread is running update_leap_seconds (_LEAP_SECONDS_LOCK is held)
DONE = 2 # update_leap_seconds has completed
_LEAP_SECONDS_CHECK = _LeapSecondsCheck.NOT_STARTED
_LEAP_SECONDS_LOCK = threading.RLock()
def _compress_array_dims(arr):
"""Compress array by allowing at most 2 * edgeitems + 1 in each dimension.
Parameters
----------
arr : array-like
Array to compress.
Returns
-------
out : array-like
Compressed array.
"""
idxs = []
edgeitems = np.get_printoptions()["edgeitems"]
# Build up a list of index arrays for each dimension, allowing no more than
# 2 * edgeitems + 1 elements in each dimension.
for dim in range(arr.ndim):
if arr.shape[dim] > 2 * edgeitems:
# The middle [edgeitems] value does not matter as it gets replaced
# by ... in the output.
idxs.append(
np.concatenate(
[np.arange(edgeitems), [edgeitems], np.arange(-edgeitems, 0)]
)
)
else:
idxs.append(np.arange(arr.shape[dim]))
# Use the magic np.ix_ function to effectively treat each index array as a
# slicing operator.
idxs_ix = np.ix_(*idxs)
out = arr[idxs_ix]
return out
class TimeInfoBase(MixinInfo):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
This base class is common between TimeInfo and TimeDeltaInfo.
"""
attr_names = MixinInfo.attr_names | {"serialize_method"}
_supports_indexing = True
# The usual tuple of attributes needed for serialization is replaced
# by a property, since Time can be serialized different ways.
_represent_as_dict_extra_attrs = (
"format",
"scale",
"precision",
"in_subfmt",
"out_subfmt",
"location",
"_delta_ut1_utc",
"_delta_tdb_tt",
)
# When serializing, write out the `value` attribute using the column name.
_represent_as_dict_primary_data = "value"
mask_val = np.ma.masked
@property
def _represent_as_dict_attrs(self):
method = self.serialize_method[self._serialize_context]
if method == "formatted_value":
out = ("value",)
elif method == "jd1_jd2":
out = ("jd1", "jd2")
else:
raise ValueError("serialize method must be 'formatted_value' or 'jd1_jd2'")
return out + self._represent_as_dict_extra_attrs
def __init__(self, bound=False):
super().__init__(bound)
# If bound to a data object instance then create the dict of attributes
# which stores the info attribute values.
if bound:
# Specify how to serialize this object depending on context.
# If ``True`` for a context, then use formatted ``value`` attribute
# (e.g. the ISO time string). If ``False`` then use float jd1 and jd2.
self.serialize_method = {
"fits": "jd1_jd2",
"ecsv": "formatted_value",
"hdf5": "jd1_jd2",
"yaml": "jd1_jd2",
"parquet": "jd1_jd2",
None: "jd1_jd2",
}
def get_sortable_arrays(self):
"""
Return a list of arrays which can be lexically sorted to represent
the order of the parent column.
Returns
-------
arrays : list of ndarray
"""
parent = self._parent
jd_approx = parent.jd
jd_remainder = (parent - parent.__class__(jd_approx, format="jd")).jd
return [jd_approx, jd_remainder]
@property
def unit(self):
return None
info_summary_stats = staticmethod(
data_info_factory(
names=MixinInfo._stats,
funcs=[getattr(np, stat) for stat in MixinInfo._stats],
)
)
# When Time has mean, std, min, max methods:
# funcs = [lambda x: getattr(x, stat)() for stat_name in MixinInfo._stats])
def _construct_from_dict(self, map):
if "jd1" in map and "jd2" in map:
# Initialize as JD but revert to desired format and out_subfmt (if needed)
format = map.pop("format")
out_subfmt = map.pop("out_subfmt", None)
map["format"] = "jd"
map["val"] = map.pop("jd1")
map["val2"] = map.pop("jd2")
out = self._parent_cls(**map)
out.format = format
if out_subfmt is not None:
out.out_subfmt = out_subfmt
else:
map["val"] = map.pop("value")
out = self._parent_cls(**map)
return out
def new_like(self, cols, length, metadata_conflicts="warn", name=None):
"""
Return a new Time instance which is consistent with the input Time objects
``cols`` and has ``length`` rows.
This is intended for creating an empty Time instance whose elements can
be set in-place for table operations like join or vstack. It checks
that the input locations and attributes are consistent. This is used
when a Time object is used as a mixin column in an astropy Table.
Parameters
----------
cols : list
List of input columns (Time objects)
length : int
Length of the output column object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output column name
Returns
-------
col : Time (or subclass)
Empty instance of this class consistent with ``cols``
"""
# Get merged info attributes like shape, dtype, format, description, etc.
attrs = self.merge_cols_attributes(
cols, metadata_conflicts, name, ("meta", "description")
)
attrs.pop("dtype") # Not relevant for Time
col0 = cols[0]
# Check that location is consistent for all Time objects
for col in cols[1:]:
# This is the method used by __setitem__ to ensure that the right side
# has a consistent location (and coerce data if necessary, but that does
# not happen in this case since `col` is already a Time object). If this
# passes then any subsequent table operations via setitem will work.
try:
col0._make_value_equivalent(slice(None), col)
except ValueError:
raise ValueError("input columns have inconsistent locations")
# Make a new Time object with the desired shape and attributes
shape = (length,) + attrs.pop("shape")
jd2000 = 2451544.5 # Arbitrary JD value J2000.0 that will work with ERFA
jd1 = np.full(shape, jd2000, dtype="f8")
jd2 = np.zeros(shape, dtype="f8")
tm_attrs = {
attr: getattr(col0, attr) for attr in ("scale", "location", "precision")
}
out = self._parent_cls(jd1, jd2, format="jd", **tm_attrs)
out.format = col0.format
out.out_subfmt = col0.out_subfmt
out.in_subfmt = col0.in_subfmt
# Set remaining info attributes
for attr, value in attrs.items():
setattr(out.info, attr, value)
return out
class TimeInfo(TimeInfoBase):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
"""
def _represent_as_dict(self, attrs=None):
"""Get the values for the parent ``attrs`` and return as a dict.
By default, uses '_represent_as_dict_attrs'.
"""
map = super()._represent_as_dict(attrs=attrs)
# TODO: refactor these special cases into the TimeFormat classes?
# The datetime64 format requires special handling for ECSV (see #12840).
# The `value` has numpy dtype datetime64 but this is not an allowed
# datatype for ECSV. Instead convert to a string representation.
if (
self._serialize_context == "ecsv"
and map["format"] == "datetime64"
and "value" in map
):
map["value"] = map["value"].astype("U")
# The datetime format is serialized as ISO with no loss of precision.
if map["format"] == "datetime" and "value" in map:
map["value"] = np.vectorize(lambda x: x.isoformat())(map["value"])
return map
def _construct_from_dict(self, map):
# See comment above. May need to convert string back to datetime64.
# Note that _serialize_context is not set here so we just look for the
# string value directly.
if (
map["format"] == "datetime64"
and "value" in map
and map["value"].dtype.kind == "U"
):
map["value"] = map["value"].astype("datetime64")
# Convert back to datetime objects for datetime format.
if map["format"] == "datetime" and "value" in map:
from datetime import datetime
map["value"] = np.vectorize(datetime.fromisoformat)(map["value"])
delta_ut1_utc = map.pop("_delta_ut1_utc", None)
delta_tdb_tt = map.pop("_delta_tdb_tt", None)
out = super()._construct_from_dict(map)
if delta_ut1_utc is not None:
out._delta_ut1_utc = delta_ut1_utc
if delta_tdb_tt is not None:
out._delta_tdb_tt = delta_tdb_tt
return out
class TimeDeltaInfo(TimeInfoBase):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
"""
_represent_as_dict_extra_attrs = ("format", "scale")
def new_like(self, cols, length, metadata_conflicts="warn", name=None):
"""
Return a new TimeDelta instance which is consistent with the input Time objects
``cols`` and has ``length`` rows.
This is intended for creating an empty Time instance whose elements can
be set in-place for table operations like join or vstack. It checks
that the input locations and attributes are consistent. This is used
when a Time object is used as a mixin column in an astropy Table.
Parameters
----------
cols : list
List of input columns (Time objects)
length : int
Length of the output column object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output column name
Returns
-------
col : Time (or subclass)
Empty instance of this class consistent with ``cols``
"""
# Get merged info attributes like shape, dtype, format, description, etc.
attrs = self.merge_cols_attributes(
cols, metadata_conflicts, name, ("meta", "description")
)
attrs.pop("dtype") # Not relevant for Time
col0 = cols[0]
# Make a new Time object with the desired shape and attributes
shape = (length,) + attrs.pop("shape")
jd1 = np.zeros(shape, dtype="f8")
jd2 = np.zeros(shape, dtype="f8")
out = self._parent_cls(jd1, jd2, format="jd", scale=col0.scale)
out.format = col0.format
# Set remaining info attributes
for attr, value in attrs.items():
setattr(out.info, attr, value)
return out
class TimeBase(ShapedLikeNDArray):
"""Base time class from which Time and TimeDelta inherit."""
# Make sure that reverse arithmetic (e.g., TimeDelta.__rmul__)
# gets called over the __mul__ of Numpy arrays.
__array_priority__ = 20000
# Declare that Time can be used as a Table column by defining the
# attribute where column attributes will be stored.
_astropy_column_attrs = None
def __getnewargs__(self):
return (self._time,)
def _init_from_vals(
self,
val,
val2,
format,
scale,
copy,
precision=None,
in_subfmt=None,
out_subfmt=None,
):
"""
Set the internal _format, scale, and _time attrs from user
inputs. This handles coercion into the correct shapes and
some basic input validation.
"""
if precision is None:
precision = 3
if in_subfmt is None:
in_subfmt = "*"
if out_subfmt is None:
out_subfmt = "*"
# Coerce val into an array
val = _make_array(val, copy)
# If val2 is not None, ensure consistency
if val2 is not None:
val2 = _make_array(val2, copy)
try:
np.broadcast(val, val2)
except ValueError:
raise ValueError(
"Input val and val2 have inconsistent shape; "
"they cannot be broadcast together."
)
if scale is not None:
if not (isinstance(scale, str) and scale.lower() in self.SCALES):
raise ScaleValueError(
f"Scale {scale!r} is not in the allowed scales "
f"{sorted(self.SCALES)}"
)
# If either of the input val, val2 are masked arrays then
# find the masked elements and fill them.
mask, val, val2 = _check_for_masked_and_fill(val, val2)
# Parse / convert input values into internal jd1, jd2 based on format
self._time = self._get_time_fmt(
val, val2, format, scale, precision, in_subfmt, out_subfmt
)
self._format = self._time.name
# Hack from #9969 to allow passing the location value that has been
# collected by the TimeAstropyTime format class up to the Time level.
# TODO: find a nicer way.
if hasattr(self._time, "_location"):
self.location = self._time._location
del self._time._location
# If any inputs were masked then masked jd2 accordingly. From above
# routine ``mask`` must be either Python bool False or an bool ndarray
# with shape broadcastable to jd2.
if mask is not False:
mask = np.broadcast_to(mask, self._time.jd2.shape)
self._time.jd1[mask] = 2451544.5 # Set to JD for 2000-01-01
self._time.jd2[mask] = np.nan
def _get_time_fmt(self, val, val2, format, scale, precision, in_subfmt, out_subfmt):
"""
Given the supplied val, val2, format and scale try to instantiate
the corresponding TimeFormat class to convert the input values into
the internal jd1 and jd2.
If format is `None` and the input is a string-type or object array then
guess available formats and stop when one matches.
"""
if format is None and (
val.dtype.kind in ("S", "U", "O", "M") or val.dtype.names
):
# Input is a string, object, datetime, or a table-like ndarray
# (structured array, recarray). These input types can be
# uniquely identified by the format classes.
formats = [
(name, cls)
for name, cls in self.FORMATS.items()
if issubclass(cls, TimeUnique)
]
# AstropyTime is a pseudo-format that isn't in the TIME_FORMATS registry,
# but try to guess it at the end.
formats.append(("astropy_time", TimeAstropyTime))
elif not (isinstance(format, str) and format.lower() in self.FORMATS):
if format is None:
raise ValueError(
"No time format was given, and the input is not unique"
)
else:
raise ValueError(
f"Format {format!r} is not one of the allowed formats "
f"{sorted(self.FORMATS)}"
)
else:
formats = [(format, self.FORMATS[format])]
assert formats
problems = {}
for name, cls in formats:
try:
return cls(val, val2, scale, precision, in_subfmt, out_subfmt)
except UnitConversionError:
raise
except (ValueError, TypeError) as err:
# If ``format`` specified then there is only one possibility, so raise
# immediately and include the upstream exception message to make it
# easier for user to see what is wrong.
if len(formats) == 1:
raise ValueError(
f"Input values did not match the format class {format}:"
+ os.linesep
+ f"{err.__class__.__name__}: {err}"
) from err
else:
problems[name] = err
else:
raise ValueError(
"Input values did not match any of the formats where the format "
f"keyword is optional: {problems}"
) from problems[formats[0][0]]
@property
def writeable(self):
return self._time.jd1.flags.writeable & self._time.jd2.flags.writeable
@writeable.setter
def writeable(self, value):
self._time.jd1.flags.writeable = value
self._time.jd2.flags.writeable = value
@property
def format(self):
"""
Get or set time format.
The format defines the way times are represented when accessed via the
``.value`` attribute. By default it is the same as the format used for
initializing the `Time` instance, but it can be set to any other value
that could be used for initialization. These can be listed with::
>>> list(Time.FORMATS)
['jd', 'mjd', 'decimalyear', 'unix', 'unix_tai', 'cxcsec', 'gps', 'plot_date',
'stardate', 'datetime', 'ymdhms', 'iso', 'isot', 'yday', 'datetime64',
'fits', 'byear', 'jyear', 'byear_str', 'jyear_str']
"""
return self._format
@format.setter
def format(self, format):
"""Set time format."""
if format not in self.FORMATS:
raise ValueError(f"format must be one of {list(self.FORMATS)}")
format_cls = self.FORMATS[format]
# Get the new TimeFormat object to contain time in new format. Possibly
# coerce in/out_subfmt to '*' (default) if existing subfmt values are
# not valid in the new format.
self._time = format_cls(
self._time.jd1,
self._time.jd2,
self._time._scale,
self.precision,
in_subfmt=format_cls._get_allowed_subfmt(self.in_subfmt),
out_subfmt=format_cls._get_allowed_subfmt(self.out_subfmt),
from_jd=True,
)
self._format = format
def to_string(self):
"""Output a string representation of the Time or TimeDelta object.
Similar to ``str(self.value)`` (which uses numpy array formatting) but
array values are evaluated only for the items that actually are output.
For large arrays this can be a substantial performance improvement.
Returns
-------
out : str
String representation of the time values.
"""
npo = np.get_printoptions()
if self.size < npo["threshold"]:
out = str(self.value)
else:
# Compress time object by allowing at most 2 * npo["edgeitems"] + 1
# in each dimension. Then force numpy to use "summary mode" of
# showing only the edge items by setting the size threshold to 0.
# TODO: use np.core.arrayprint._leading_trailing if we have support for
# np.concatenate. See #8610.
tm = _compress_array_dims(self)
with np.printoptions(threshold=0):
out = str(tm.value)
return out
def __repr__(self):
return "<{} object: scale='{}' format='{}' value={}>".format(
self.__class__.__name__, self.scale, self.format, self.to_string()
)
def __str__(self):
return self.to_string()
def __hash__(self):
try:
loc = getattr(self, "location", None)
if loc is not None:
loc = loc.x.to_value(u.m), loc.y.to_value(u.m), loc.z.to_value(u.m)
return hash((self.jd1, self.jd2, self.scale, loc))
except TypeError:
if self.ndim != 0:
reason = "(must be scalar)"
elif self.masked:
reason = "(value is masked)"
else:
raise
raise TypeError(f"unhashable type: '{self.__class__.__name__}' {reason}")
@property
def scale(self):
"""Time scale."""
return self._time.scale
def _set_scale(self, scale):
"""
This is the key routine that actually does time scale conversions.
This is not public and not connected to the read-only scale property.
"""
if scale == self.scale:
return
if scale not in self.SCALES:
raise ValueError(
f"Scale {scale!r} is not in the allowed scales {sorted(self.SCALES)}"
)
if scale == "utc" or self.scale == "utc":
# If doing a transform involving UTC then check that the leap
# seconds table is up to date.
_check_leapsec()
# Determine the chain of scale transformations to get from the current
# scale to the new scale. MULTI_HOPS contains a dict of all
# transformations (xforms) that require intermediate xforms.
# The MULTI_HOPS dict is keyed by (sys1, sys2) in alphabetical order.
xform = (self.scale, scale)
xform_sort = tuple(sorted(xform))
multi = MULTI_HOPS.get(xform_sort, ())
xforms = xform_sort[:1] + multi + xform_sort[-1:]
# If we made the reverse xform then reverse it now.
if xform_sort != xform:
xforms = tuple(reversed(xforms))
# Transform the jd1,2 pairs through the chain of scale xforms.
jd1, jd2 = self._time.jd1, self._time.jd2_filled
for sys1, sys2 in zip(xforms[:-1], xforms[1:]):
# Some xforms require an additional delta_ argument that is
# provided through Time methods. These values may be supplied by
# the user or computed based on available approximations. The
# get_delta_ methods are available for only one combination of
# sys1, sys2 though the property applies for both xform directions.
args = [jd1, jd2]
for sys12 in ((sys1, sys2), (sys2, sys1)):
dt_method = "_get_delta_{}_{}".format(*sys12)
try:
get_dt = getattr(self, dt_method)
except AttributeError:
pass
else:
args.append(get_dt(jd1, jd2))
break
conv_func = getattr(erfa, sys1 + sys2)
jd1, jd2 = conv_func(*args)
jd1, jd2 = day_frac(jd1, jd2)
if self.masked:
jd2[self.mask] = np.nan
self._time = self.FORMATS[self.format](
jd1,
jd2,
scale,
self.precision,
self.in_subfmt,
self.out_subfmt,
from_jd=True,
)
@property
def precision(self):
"""
Decimal precision when outputting seconds as floating point (int
value between 0 and 9 inclusive).
"""
return self._time.precision
@precision.setter
def precision(self, val):
del self.cache
self._time.precision = val
@property
def in_subfmt(self):
"""
Unix wildcard pattern to select subformats for parsing string input
times.
"""
return self._time.in_subfmt
@in_subfmt.setter
def in_subfmt(self, val):
self._time.in_subfmt = val
del self.cache
@property
def out_subfmt(self):
"""
Unix wildcard pattern to select subformats for outputting times.
"""
return self._time.out_subfmt
@out_subfmt.setter
def out_subfmt(self, val):
# Setting the out_subfmt property here does validation of ``val``
self._time.out_subfmt = val
del self.cache
@property
def shape(self):
"""The shape of the time instances.
Like `~numpy.ndarray.shape`, can be set to a new shape by assigning a
tuple. Note that if different instances share some but not all
underlying data, setting the shape of one instance can make the other
instance unusable. Hence, it is strongly recommended to get new,
reshaped instances with the ``reshape`` method.
Raises
------
ValueError
If the new shape has the wrong total number of elements.
AttributeError
If the shape of the ``jd1``, ``jd2``, ``location``,
``delta_ut1_utc``, or ``delta_tdb_tt`` attributes cannot be changed
without the arrays being copied. For these cases, use the
`Time.reshape` method (which copies any arrays that cannot be
reshaped in-place).
"""
return self._time.jd1.shape
@shape.setter
def shape(self, shape):
del self.cache
# We have to keep track of arrays that were already reshaped,
# since we may have to return those to their original shape if a later
# shape-setting fails.
reshaped = []
oldshape = self.shape
# In-place reshape of data/attributes. Need to access _time.jd1/2 not
# self.jd1/2 because the latter are not guaranteed to be the actual
# data, and in fact should not be directly changeable from the public
# API.
for obj, attr in (
(self._time, "jd1"),
(self._time, "jd2"),
(self, "_delta_ut1_utc"),
(self, "_delta_tdb_tt"),
(self, "location"),
):
val = getattr(obj, attr, None)
if val is not None and val.size > 1:
try:
val.shape = shape
except Exception:
for val2 in reshaped:
val2.shape = oldshape
raise
else:
reshaped.append(val)
def _shaped_like_input(self, value):
if self._time.jd1.shape:
if isinstance(value, np.ndarray):
return value
else:
raise TypeError(
f"JD is an array ({self._time.jd1!r}) but value is not ({value!r})"
)
else:
# zero-dimensional array, is it safe to unbox?
if (
isinstance(value, np.ndarray)
and not value.shape
and not np.ma.is_masked(value)
):
if value.dtype.kind == "M":
# existing test doesn't want datetime64 converted
return value[()]
elif value.dtype.fields:
# Unpack but keep field names; .item() doesn't
# Still don't get python types in the fields
return value[()]
else:
return value.item()
else:
return value
@property
def jd1(self):
"""
First of the two doubles that internally store time value(s) in JD.
"""
jd1 = self._time.mask_if_needed(self._time.jd1)
return self._shaped_like_input(jd1)
@property
def jd2(self):
"""
Second of the two doubles that internally store time value(s) in JD.
"""
jd2 = self._time.mask_if_needed(self._time.jd2)
return self._shaped_like_input(jd2)
def to_value(self, format, subfmt="*"):
"""Get time values expressed in specified output format.
This method allows representing the ``Time`` object in the desired
output ``format`` and optional sub-format ``subfmt``. Available
built-in formats include ``jd``, ``mjd``, ``iso``, and so forth. Each
format can have its own sub-formats
For built-in numerical formats like ``jd`` or ``unix``, ``subfmt`` can
be one of 'float', 'long', 'decimal', 'str', or 'bytes'. Here, 'long'
uses ``numpy.longdouble`` for somewhat enhanced precision (with
the enhancement depending on platform), and 'decimal'
:class:`decimal.Decimal` for full precision. For 'str' and 'bytes', the
number of digits is also chosen such that time values are represented
accurately.
For built-in date-like string formats, one of 'date_hms', 'date_hm', or
'date' (or 'longdate_hms', etc., for 5-digit years in
`~astropy.time.TimeFITS`). For sub-formats including seconds, the
number of digits used for the fractional seconds is as set by
`~astropy.time.Time.precision`.
Parameters
----------
format : str
The format in which one wants the time values. Default: the current
format.
subfmt : str or None, optional
Value or wildcard pattern to select the sub-format in which the
values should be given. The default of '*' picks the first
available for a given format, i.e., 'float' or 'date_hms'.
If `None`, use the instance's ``out_subfmt``.
"""
# TODO: add a precision argument (but ensure it is keyword argument
# only, to make life easier for TimeDelta.to_value()).
if format not in self.FORMATS:
raise ValueError(f"format must be one of {list(self.FORMATS)}")
cache = self.cache["format"]
# Try to keep cache behaviour like it was in astropy < 4.0.
key = format if subfmt is None else (format, subfmt)
if key not in cache:
if format == self.format:
tm = self
else:
tm = self.replicate(format=format)
# Some TimeFormat subclasses may not be able to handle being passes
# on a out_subfmt. This includes some core classes like
# TimeBesselianEpochString that do not have any allowed subfmts. But
# those do deal with `self.out_subfmt` internally, so if subfmt is
# the same, we do not pass it on.
kwargs = {}
if subfmt is not None and subfmt != tm.out_subfmt:
kwargs["out_subfmt"] = subfmt
try:
value = tm._time.to_value(parent=tm, **kwargs)
except TypeError as exc:
# Try validating subfmt, e.g. for formats like 'jyear_str' that
# do not implement out_subfmt in to_value() (because there are
# no allowed subformats). If subfmt is not valid this gives the
# same exception as would have occurred if the call to
# `to_value()` had succeeded.
tm._time._select_subfmts(subfmt)
# Subfmt was valid, so fall back to the original exception to see
# if it was lack of support for out_subfmt as a call arg.
if "unexpected keyword argument 'out_subfmt'" in str(exc):
raise ValueError(
f"to_value() method for format {format!r} does not "
"support passing a 'subfmt' argument"
) from None
else:
# Some unforeseen exception so raise.
raise
value = tm._shaped_like_input(value)
cache[key] = value
return cache[key]
@property
def value(self):
"""Time value(s) in current format."""
return self.to_value(self.format, None)
@property
def masked(self):
return self._time.masked
@property
def mask(self):
return self._time.mask
def insert(self, obj, values, axis=0):
"""
Insert values before the given indices in the column and return
a new `~astropy.time.Time` or `~astropy.time.TimeDelta` object.
The values to be inserted must conform to the rules for in-place setting
of ``Time`` objects (see ``Get and set values`` in the ``Time``
documentation).
The API signature matches the ``np.insert`` API, but is more limited.
The specification of insert index ``obj`` must be a single integer,
and the ``axis`` must be ``0`` for simple row insertion before the
index.
Parameters
----------
obj : int
Integer index before which ``values`` is inserted.
values : array-like
Value(s) to insert. If the type of ``values`` is different
from that of quantity, ``values`` is converted to the matching type.
axis : int, optional
Axis along which to insert ``values``. Default is 0, which is the
only allowed value and will insert a row.
Returns
-------
out : `~astropy.time.Time` subclass
New time object with inserted value(s)
"""
# Validate inputs: obj arg is integer, axis=0, self is not a scalar, and
# input index is in bounds.
try:
idx0 = operator.index(obj)
except TypeError:
raise TypeError("obj arg must be an integer")
if axis != 0:
raise ValueError("axis must be 0")
if not self.shape:
raise TypeError(
f"cannot insert into scalar {self.__class__.__name__} object"
)
if abs(idx0) > len(self):
raise IndexError(
f"index {idx0} is out of bounds for axis 0 with size {len(self)}"
)
# Turn negative index into positive
if idx0 < 0:
idx0 = len(self) + idx0
# For non-Time object, use numpy to help figure out the length. (Note annoying
# case of a string input that has a length which is not the length we want).
if not isinstance(values, self.__class__):
values = np.asarray(values)
n_values = len(values) if values.shape else 1
# Finally make the new object with the correct length and set values for the
# three sections, before insert, the insert, and after the insert.
out = self.__class__.info.new_like(
[self], len(self) + n_values, name=self.info.name
)
out._time.jd1[:idx0] = self._time.jd1[:idx0]
out._time.jd2[:idx0] = self._time.jd2[:idx0]
# This uses the Time setting machinery to coerce and validate as necessary.
out[idx0 : idx0 + n_values] = values
out._time.jd1[idx0 + n_values :] = self._time.jd1[idx0:]
out._time.jd2[idx0 + n_values :] = self._time.jd2[idx0:]
return out
def __setitem__(self, item, value):
if not self.writeable:
if self.shape:
raise ValueError(
f"{self.__class__.__name__} object is read-only. Make a "
'copy() or set "writeable" attribute to True.'
)
else:
raise ValueError(
f"scalar {self.__class__.__name__} object is read-only."
)
# Any use of setitem results in immediate cache invalidation
del self.cache
# Setting invalidates transform deltas
for attr in ("_delta_tdb_tt", "_delta_ut1_utc"):
if hasattr(self, attr):
delattr(self, attr)
if value is np.ma.masked or value is np.nan:
self._time.jd2[item] = np.nan
return
value = self._make_value_equivalent(item, value)
# Finally directly set the jd1/2 values. Locations are known to match.
if self.scale is not None:
value = getattr(value, self.scale)
self._time.jd1[item] = value._time.jd1
self._time.jd2[item] = value._time.jd2
def isclose(self, other, atol=None):
"""Returns a boolean or boolean array where two Time objects are
element-wise equal within a time tolerance.
This evaluates the expression below::
abs(self - other) <= atol
Parameters
----------
other : `~astropy.time.Time`
Time object for comparison.
atol : `~astropy.units.Quantity` or `~astropy.time.TimeDelta`
Absolute tolerance for equality with units of time (e.g. ``u.s`` or
``u.day``). Default is two bits in the 128-bit JD time representation,
equivalent to about 40 picosecs.
"""
if atol is None:
# Note: use 2 bits instead of 1 bit based on experience in precision
# tests, since taking the difference with a UTC time means one has
# to do a scale change.
atol = 2 * np.finfo(float).eps * u.day
if not isinstance(atol, (u.Quantity, TimeDelta)):
raise TypeError(
"'atol' argument must be a Quantity or TimeDelta instance, got "
f"{atol.__class__.__name__} instead"
)
try:
# Separate these out so user sees where the problem is
dt = self - other
dt = abs(dt)
out = dt <= atol
except Exception as err:
raise TypeError(
"'other' argument must support subtraction with Time "
"and return a value that supports comparison with "
f"{atol.__class__.__name__}: {err}"
)
return out
def copy(self, format=None):
"""
Return a fully independent copy the Time object, optionally changing
the format.
If ``format`` is supplied then the time format of the returned Time
object will be set accordingly, otherwise it will be unchanged from the
original.
In this method a full copy of the internal time arrays will be made.
The internal time arrays are normally not changeable by the user so in
most cases the ``replicate()`` method should be used.
Parameters
----------
format : str, optional
Time format of the copy.
Returns
-------
tm : Time object
Copy of this object
"""
return self._apply("copy", format=format)
def replicate(self, format=None, copy=False, cls=None):
"""
Return a replica of the Time object, optionally changing the format.
If ``format`` is supplied then the time format of the returned Time
object will be set accordingly, otherwise it will be unchanged from the
original.
If ``copy`` is set to `True` then a full copy of the internal time arrays
will be made. By default the replica will use a reference to the
original arrays when possible to save memory. The internal time arrays
are normally not changeable by the user so in most cases it should not
be necessary to set ``copy`` to `True`.
The convenience method copy() is available in which ``copy`` is `True`
by default.
Parameters
----------
format : str, optional
Time format of the replica.
copy : bool, optional
Return a true copy instead of using references where possible.
Returns
-------
tm : Time object
Replica of this object
"""
return self._apply("copy" if copy else "replicate", format=format, cls=cls)
def _apply(self, method, *args, format=None, cls=None, **kwargs):
"""Create a new time object, possibly applying a method to the arrays.
Parameters
----------
method : str or callable
If string, can be 'replicate' or the name of a relevant
`~numpy.ndarray` method. In the former case, a new time instance
with unchanged internal data is created, while in the latter the
method is applied to the internal ``jd1`` and ``jd2`` arrays, as
well as to possible ``location``, ``_delta_ut1_utc``, and
``_delta_tdb_tt`` arrays.
If a callable, it is directly applied to the above arrays.
Examples: 'copy', '__getitem__', 'reshape', `~numpy.broadcast_to`.
args : tuple
Any positional arguments for ``method``.
kwargs : dict
Any keyword arguments for ``method``. If the ``format`` keyword
argument is present, this will be used as the Time format of the
replica.
Examples
--------
Some ways this is used internally::
copy : ``_apply('copy')``
replicate : ``_apply('replicate')``
reshape : ``_apply('reshape', new_shape)``
index or slice : ``_apply('__getitem__', item)``
broadcast : ``_apply(np.broadcast, shape=new_shape)``
"""
new_format = self.format if format is None else format
if callable(method):
apply_method = lambda array: method(array, *args, **kwargs)
else:
if method == "replicate":
apply_method = None
else:
apply_method = operator.methodcaller(method, *args, **kwargs)
jd1, jd2 = self._time.jd1, self._time.jd2
if apply_method:
jd1 = apply_method(jd1)
jd2 = apply_method(jd2)
# Get a new instance of our class and set its attributes directly.
tm = super().__new__(cls or self.__class__)
tm._time = TimeJD(
jd1,
jd2,
self.scale,
precision=0,
in_subfmt="*",
out_subfmt="*",
from_jd=True,
)
# Optional ndarray attributes.
for attr in ("_delta_ut1_utc", "_delta_tdb_tt", "location"):
try:
val = getattr(self, attr)
except AttributeError:
continue
if apply_method:
# Apply the method to any value arrays (though skip if there is
# only an array scalar and the method would return a view,
# since in that case nothing would change).
if getattr(val, "shape", ()):
val = apply_method(val)
elif method == "copy" or method == "flatten":
# flatten should copy also for a single element array, but
# we cannot use it directly for array scalars, since it
# always returns a one-dimensional array. So, just copy.
val = copy.copy(val)
setattr(tm, attr, val)
# Copy other 'info' attr only if it has actually been defined and the
# time object is not a scalar (issue #10688).
# See PR #3898 for further explanation and justification, along
# with Quantity.__array_finalize__
if "info" in self.__dict__:
tm.info = self.info
# Make the new internal _time object corresponding to the format
# in the copy. If the format is unchanged this process is lightweight
# and does not create any new arrays.
if new_format not in tm.FORMATS:
raise ValueError(f"format must be one of {list(tm.FORMATS)}")
NewFormat = tm.FORMATS[new_format]
tm._time = NewFormat(
tm._time.jd1,
tm._time.jd2,
tm._time._scale,
precision=self.precision,
in_subfmt=NewFormat._get_allowed_subfmt(self.in_subfmt),
out_subfmt=NewFormat._get_allowed_subfmt(self.out_subfmt),
from_jd=True,
)
tm._format = new_format
tm.SCALES = self.SCALES
return tm
def __copy__(self):
"""
Overrides the default behavior of the `copy.copy` function in
the python stdlib to behave like `Time.copy`. Does *not* make a
copy of the JD arrays - only copies by reference.
"""
return self.replicate()
def __deepcopy__(self, memo):
"""
Overrides the default behavior of the `copy.deepcopy` function
in the python stdlib to behave like `Time.copy`. Does make a
copy of the JD arrays.
"""
return self.copy()
def _advanced_index(self, indices, axis=None, keepdims=False):
"""Turn argmin, argmax output into an advanced index.
Argmin, argmax output contains indices along a given axis in an array
shaped like the other dimensions. To use this to get values at the
correct location, a list is constructed in which the other axes are
indexed sequentially. For ``keepdims`` is ``True``, the net result is
the same as constructing an index grid with ``np.ogrid`` and then
replacing the ``axis`` item with ``indices`` with its shaped expanded
at ``axis``. For ``keepdims`` is ``False``, the result is the same but
with the ``axis`` dimension removed from all list entries.
For ``axis`` is ``None``, this calls :func:`~numpy.unravel_index`.
Parameters
----------
indices : array
Output of argmin or argmax.
axis : int or None
axis along which argmin or argmax was used.
keepdims : bool
Whether to construct indices that keep or remove the axis along
which argmin or argmax was used. Default: ``False``.
Returns
-------
advanced_index : list of arrays
Suitable for use as an advanced index.
"""
if axis is None:
return np.unravel_index(indices, self.shape)
ndim = self.ndim
if axis < 0:
axis = axis + ndim
if keepdims and indices.ndim < self.ndim:
indices = np.expand_dims(indices, axis)
index = [
indices
if i == axis
else np.arange(s).reshape(
(1,) * (i if keepdims or i < axis else i - 1)
+ (s,)
+ (1,) * (ndim - i - (1 if keepdims or i > axis else 2))
)
for i, s in enumerate(self.shape)
]
return tuple(index)
def argmin(self, axis=None, out=None):
"""Return indices of the minimum values along the given axis.
This is similar to :meth:`~numpy.ndarray.argmin`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used. See :func:`~numpy.argmin` for detailed documentation.
"""
# First get the minimum at normal precision.
jd1, jd2 = self.jd1, self.jd2
approx = np.min(jd1 + jd2, axis, keepdims=True)
# Approx is very close to the true minimum, and by subtracting it at
# full precision, all numbers near 0 can be represented correctly,
# so we can be sure we get the true minimum.
# The below is effectively what would be done for
# dt = (self - self.__class__(approx, format='jd')).jd
# which translates to:
# approx_jd1, approx_jd2 = day_frac(approx, 0.)
# dt = (self.jd1 - approx_jd1) + (self.jd2 - approx_jd2)
dt = (jd1 - approx) + jd2
return dt.argmin(axis, out)
def argmax(self, axis=None, out=None):
"""Return indices of the maximum values along the given axis.
This is similar to :meth:`~numpy.ndarray.argmax`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used. See :func:`~numpy.argmax` for detailed documentation.
"""
# For procedure, see comment on argmin.
jd1, jd2 = self.jd1, self.jd2
approx = np.max(jd1 + jd2, axis, keepdims=True)
dt = (jd1 - approx) + jd2
return dt.argmax(axis, out)
def argsort(self, axis=-1):
"""Returns the indices that would sort the time array.
This is similar to :meth:`~numpy.ndarray.argsort`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used, and that corresponding attributes are copied. Internally,
it uses :func:`~numpy.lexsort`, and hence no sort method can be chosen.
"""
# For procedure, see comment on argmin.
jd1, jd2 = self.jd1, self.jd2
approx = jd1 + jd2
remainder = (jd1 - approx) + jd2
if axis is None:
return np.lexsort((remainder.ravel(), approx.ravel()))
else:
return np.lexsort(keys=(remainder, approx), axis=axis)
def min(self, axis=None, out=None, keepdims=False):
"""Minimum along a given axis.
This is similar to :meth:`~numpy.ndarray.min`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used, and that corresponding attributes are copied.
Note that the ``out`` argument is present only for compatibility with
``np.min``; since `Time` instances are immutable, it is not possible
to have an actual ``out`` to store the result in.
"""
if out is not None:
raise ValueError(
"Since `Time` instances are immutable, ``out`` "
"cannot be set to anything but ``None``."
)
return self[self._advanced_index(self.argmin(axis), axis, keepdims)]
def max(self, axis=None, out=None, keepdims=False):
"""Maximum along a given axis.
This is similar to :meth:`~numpy.ndarray.max`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used, and that corresponding attributes are copied.
Note that the ``out`` argument is present only for compatibility with
``np.max``; since `Time` instances are immutable, it is not possible
to have an actual ``out`` to store the result in.
"""
if out is not None:
raise ValueError(
"Since `Time` instances are immutable, ``out`` "
"cannot be set to anything but ``None``."
)
return self[self._advanced_index(self.argmax(axis), axis, keepdims)]
def ptp(self, axis=None, out=None, keepdims=False):
"""Peak to peak (maximum - minimum) along a given axis.
This is similar to :meth:`~numpy.ndarray.ptp`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used.
Note that the ``out`` argument is present only for compatibility with
`~numpy.ptp`; since `Time` instances are immutable, it is not possible
to have an actual ``out`` to store the result in.
"""
if out is not None:
raise ValueError(
"Since `Time` instances are immutable, ``out`` "
"cannot be set to anything but ``None``."
)
return self.max(axis, keepdims=keepdims) - self.min(axis, keepdims=keepdims)
def sort(self, axis=-1):
"""Return a copy sorted along the specified axis.
This is similar to :meth:`~numpy.ndarray.sort`, but internally uses
indexing with :func:`~numpy.lexsort` to ensure that the full precision
given by the two doubles ``jd1`` and ``jd2`` is kept, and that
corresponding attributes are properly sorted and copied as well.
Parameters
----------
axis : int or None
Axis to be sorted. If ``None``, the flattened array is sorted.
By default, sort over the last axis.
"""
return self[self._advanced_index(self.argsort(axis), axis, keepdims=True)]
def mean(self, axis=None, dtype=None, out=None, keepdims=False, *, where=True):
"""Mean along a given axis.
This is similar to :meth:`~numpy.ndarray.mean`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2`` is
used, and that corresponding attributes are copied.
Note that the ``out`` argument is present only for compatibility with
``np.mean``; since `Time` instances are immutable, it is not possible
to have an actual ``out`` to store the result in.
Similarly, the ``dtype`` argument is also present for compatibility
only; it has no meaning for `Time`.
Parameters
----------
axis : None or int or tuple of ints, optional
Axis or axes along which the means are computed. The default is to
compute the mean of the flattened array.
dtype : None
Only present for compatibility with :meth:`~numpy.ndarray.mean`,
must be `None`.
out : None
Only present for compatibility with :meth:`~numpy.ndarray.mean`,
must be `None`.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
where : array_like of bool, optional
Elements to include in the mean. See `~numpy.ufunc.reduce` for
details.
Returns
-------
m : Time
A new Time instance containing the mean values
"""
if dtype is not None:
raise ValueError("Cannot set ``dtype`` on `Time` instances")
if out is not None:
raise ValueError(
"Since `Time` instances are immutable, ``out`` "
"cannot be set to anything but ``None``."
)
where = where & ~self.mask
where_broadcasted = np.broadcast_to(where, self.shape)
kwargs = dict(
axis=axis,
keepdims=keepdims,
where=where,
)
divisor = np.sum(where_broadcasted, axis=axis, keepdims=keepdims)
if np.any(divisor == 0):
raise ValueError(
"Mean over zero elements is not supported as it would give an undefined"
" time;see issue https://github.com/astropy/astropy/issues/6509"
)
jd1, jd2 = day_frac(
val1=np.sum(np.ma.getdata(self.jd1), **kwargs),
val2=np.sum(np.ma.getdata(self.jd2), **kwargs),
divisor=divisor,
)
result = type(self)(
val=jd1,
val2=jd2,
format="jd",
scale=self.scale,
copy=False,
)
result.format = self.format
return result
@property
def cache(self):
"""
Return the cache associated with this instance.
"""
return self._time.cache
@cache.deleter
def cache(self):
del self._time.cache
def __getattr__(self, attr):
"""
Get dynamic attributes to output format or do timescale conversion.
"""
if attr in self.SCALES and self.scale is not None:
cache = self.cache["scale"]
if attr not in cache:
if attr == self.scale:
tm = self
else:
tm = self.replicate()
tm._set_scale(attr)
if tm.shape:
# Prevent future modification of cached array-like object
tm.writeable = False
cache[attr] = tm
return cache[attr]
elif attr in self.FORMATS:
return self.to_value(attr, subfmt=None)
elif attr in TIME_SCALES: # allowed ones done above (self.SCALES)
if self.scale is None:
raise ScaleValueError(
"Cannot convert TimeDelta with "
"undefined scale to any defined scale."
)
else:
raise ScaleValueError(
f"Cannot convert {self.__class__.__name__} with scale "
f"'{self.scale}' to scale '{attr}'"
)
else:
# Should raise AttributeError
return self.__getattribute__(attr)
def __dir__(self):
return sorted(set(super().__dir__()) | set(self.SCALES) | set(self.FORMATS))
def _match_shape(self, val):
"""
Ensure that `val` is matched to length of self. If val has length 1
then broadcast, otherwise cast to double and make sure shape matches.
"""
val = _make_array(val, copy=True) # be conservative and copy
if val.size > 1 and val.shape != self.shape:
try:
# check the value can be broadcast to the shape of self.
val = np.broadcast_to(val, self.shape, subok=True)
except Exception:
raise ValueError(
"Attribute shape must match or be broadcastable to that of "
"Time object. Typically, give either a single value or "
"one for each time."
)
return val
def _time_comparison(self, other, op):
"""If other is of same class as self, compare difference in self.scale.
Otherwise, return NotImplemented.
"""
if other.__class__ is not self.__class__:
try:
other = self.__class__(other, scale=self.scale)
except Exception:
# Let other have a go.
return NotImplemented
if (
self.scale is not None
and self.scale not in other.SCALES
or other.scale is not None
and other.scale not in self.SCALES
):
# Other will also not be able to do it, so raise a TypeError
# immediately, allowing us to explain why it doesn't work.
raise TypeError(
f"Cannot compare {self.__class__.__name__} instances with "
f"scales '{self.scale}' and '{other.scale}'"
)
if self.scale is not None and other.scale is not None:
other = getattr(other, self.scale)
return op((self.jd1 - other.jd1) + (self.jd2 - other.jd2), 0.0)
def __lt__(self, other):
return self._time_comparison(other, operator.lt)
def __le__(self, other):
return self._time_comparison(other, operator.le)
def __eq__(self, other):
"""
If other is an incompatible object for comparison, return `False`.
Otherwise, return `True` if the time difference between self and
other is zero.
"""
return self._time_comparison(other, operator.eq)
def __ne__(self, other):
"""
If other is an incompatible object for comparison, return `True`.
Otherwise, return `False` if the time difference between self and
other is zero.
"""
return self._time_comparison(other, operator.ne)
def __gt__(self, other):
return self._time_comparison(other, operator.gt)
def __ge__(self, other):
return self._time_comparison(other, operator.ge)
class Time(TimeBase):
"""
Represent and manipulate times and dates for astronomy.
A `Time` object is initialized with one or more times in the ``val``
argument. The input times in ``val`` must conform to the specified
``format`` and must correspond to the specified time ``scale``. The
optional ``val2`` time input should be supplied only for numeric input
formats (e.g. JD) where very high precision (better than 64-bit precision)
is required.
The allowed values for ``format`` can be listed with::
>>> list(Time.FORMATS)
['jd', 'mjd', 'decimalyear', 'unix', 'unix_tai', 'cxcsec', 'gps', 'plot_date',
'stardate', 'datetime', 'ymdhms', 'iso', 'isot', 'yday', 'datetime64',
'fits', 'byear', 'jyear', 'byear_str', 'jyear_str']
See also: http://docs.astropy.org/en/stable/time/
Parameters
----------
val : sequence, ndarray, number, str, bytes, or `~astropy.time.Time` object
Value(s) to initialize the time or times. Bytes are decoded as ascii.
val2 : sequence, ndarray, or number; optional
Value(s) to initialize the time or times. Only used for numerical
input, to help preserve precision.
format : str, optional
Format of input value(s)
scale : str, optional
Time scale of input value(s), must be one of the following:
('tai', 'tcb', 'tcg', 'tdb', 'tt', 'ut1', 'utc')
precision : int, optional
Digits of precision in string representation of time
in_subfmt : str, optional
Unix glob to select subformats for parsing input times
out_subfmt : str, optional
Unix glob to select subformat for outputting times
location : `~astropy.coordinates.EarthLocation` or tuple, optional
If given as an tuple, it should be able to initialize an
an EarthLocation instance, i.e., either contain 3 items with units of
length for geocentric coordinates, or contain a longitude, latitude,
and an optional height for geodetic coordinates.
Can be a single location, or one for each input time.
If not given, assumed to be the center of the Earth for time scale
transformations to and from the solar-system barycenter.
copy : bool, optional
Make a copy of the input values
"""
SCALES = TIME_SCALES
"""List of time scales"""
FORMATS = TIME_FORMATS
"""Dict of time formats"""
def __new__(
cls,
val,
val2=None,
format=None,
scale=None,
precision=None,
in_subfmt=None,
out_subfmt=None,
location=None,
copy=False,
):
if isinstance(val, Time):
self = val.replicate(format=format, copy=copy, cls=cls)
else:
self = super().__new__(cls)
return self
def __init__(
self,
val,
val2=None,
format=None,
scale=None,
precision=None,
in_subfmt=None,
out_subfmt=None,
location=None,
copy=False,
):
if location is not None:
from astropy.coordinates import EarthLocation
if isinstance(location, EarthLocation):
self.location = location
else:
self.location = EarthLocation(*location)
if self.location.size == 1:
self.location = self.location.squeeze()
else:
if not hasattr(self, "location"):
self.location = None
if isinstance(val, Time):
# Update _time formatting parameters if explicitly specified
if precision is not None:
self._time.precision = precision
if in_subfmt is not None:
self._time.in_subfmt = in_subfmt
if out_subfmt is not None:
self._time.out_subfmt = out_subfmt
self.SCALES = TIME_TYPES[self.scale]
if scale is not None:
self._set_scale(scale)
else:
self._init_from_vals(
val, val2, format, scale, copy, precision, in_subfmt, out_subfmt
)
self.SCALES = TIME_TYPES[self.scale]
if self.location is not None and (
self.location.size > 1 and self.location.shape != self.shape
):
try:
# check the location can be broadcast to self's shape.
self.location = np.broadcast_to(self.location, self.shape, subok=True)
except Exception as err:
raise ValueError(
f"The location with shape {self.location.shape} cannot be "
f"broadcast against time with shape {self.shape}. "
"Typically, either give a single location or one for each time."
) from err
def _make_value_equivalent(self, item, value):
"""Coerce setitem value into an equivalent Time object."""
# If there is a vector location then broadcast to the Time shape
# and then select with ``item``
if self.location is not None and self.location.shape:
self_location = np.broadcast_to(self.location, self.shape, subok=True)[item]
else:
self_location = self.location
if isinstance(value, Time):
# Make sure locations are compatible. Location can be either None or
# a Location object.
if self_location is None and value.location is None:
match = True
elif (self_location is None and value.location is not None) or (
self_location is not None and value.location is None
):
match = False
else:
match = np.all(self_location == value.location)
if not match:
raise ValueError(
"cannot set to Time with different location: expected "
f"location={self_location} and got location={value.location}"
)
else:
try:
value = self.__class__(value, scale=self.scale, location=self_location)
except Exception:
try:
value = self.__class__(
value,
scale=self.scale,
format=self.format,
location=self_location,
)
except Exception as err:
raise ValueError(
f"cannot convert value to a compatible Time object: {err}"
)
return value
@classmethod
def now(cls):
"""
Creates a new object corresponding to the instant in time this
method is called.
.. note::
"Now" is determined using the `~datetime.datetime.utcnow`
function, so its accuracy and precision is determined by that
function. Generally that means it is set by the accuracy of
your system clock.
Returns
-------
nowtime : :class:`~astropy.time.Time`
A new `Time` object (or a subclass of `Time` if this is called from
such a subclass) at the current time.
"""
# call `utcnow` immediately to be sure it's ASAP
dtnow = datetime.utcnow()
return cls(val=dtnow, format="datetime", scale="utc")
info = TimeInfo()
@classmethod
def strptime(cls, time_string, format_string, **kwargs):
"""
Parse a string to a Time according to a format specification.
See `time.strptime` documentation for format specification.
>>> Time.strptime('2012-Jun-30 23:59:60', '%Y-%b-%d %H:%M:%S')
<Time object: scale='utc' format='isot' value=2012-06-30T23:59:60.000>
Parameters
----------
time_string : str, sequence, or ndarray
Objects containing time data of type string
format_string : str
String specifying format of time_string.
kwargs : dict
Any keyword arguments for ``Time``. If the ``format`` keyword
argument is present, this will be used as the Time format.
Returns
-------
time_obj : `~astropy.time.Time`
A new `~astropy.time.Time` object corresponding to the input
``time_string``.
"""
time_array = np.asarray(time_string)
if time_array.dtype.kind not in ("U", "S"):
raise TypeError(
"Expected type is string, a bytes-like object or a sequence "
f"of these. Got dtype '{time_array.dtype.kind}'"
)
to_string = (
str
if time_array.dtype.kind == "U"
else lambda x: str(x.item(), encoding="ascii")
)
iterator = np.nditer([time_array, None], op_dtypes=[time_array.dtype, "U30"])
for time, formatted in iterator:
tt, fraction = _strptime._strptime(to_string(time), format_string)
time_tuple = tt[:6] + (fraction,)
formatted[...] = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}.{:06}".format(
*time_tuple
)
format = kwargs.pop("format", None)
out = cls(*iterator.operands[1:], format="isot", **kwargs)
if format is not None:
out.format = format
return out
def strftime(self, format_spec):
"""
Convert Time to a string or a numpy.array of strings according to a
format specification.
See `time.strftime` documentation for format specification.
Parameters
----------
format_spec : str
Format definition of return string.
Returns
-------
formatted : str or numpy.array
String or numpy.array of strings formatted according to the given
format string.
"""
formatted_strings = []
for sk in self.replicate("iso")._time.str_kwargs():
date_tuple = date(sk["year"], sk["mon"], sk["day"]).timetuple()
datetime_tuple = (
sk["year"],
sk["mon"],
sk["day"],
sk["hour"],
sk["min"],
sk["sec"],
date_tuple[6],
date_tuple[7],
-1,
)
fmtd_str = format_spec
if "%f" in fmtd_str:
fmtd_str = fmtd_str.replace(
"%f",
"{frac:0{precision}}".format(
frac=sk["fracsec"], precision=self.precision
),
)
fmtd_str = strftime(fmtd_str, datetime_tuple)
formatted_strings.append(fmtd_str)
if self.isscalar:
return formatted_strings[0]
else:
return np.array(formatted_strings).reshape(self.shape)
def light_travel_time(
self, skycoord, kind="barycentric", location=None, ephemeris=None
):
"""Light travel time correction to the barycentre or heliocentre.
The frame transformations used to calculate the location of the solar
system barycentre and the heliocentre rely on the erfa routine epv00,
which is consistent with the JPL DE405 ephemeris to an accuracy of
11.2 km, corresponding to a light travel time of 4 microseconds.
The routine assumes the source(s) are at large distance, i.e., neglects
finite-distance effects.
Parameters
----------
skycoord : `~astropy.coordinates.SkyCoord`
The sky location to calculate the correction for.
kind : str, optional
``'barycentric'`` (default) or ``'heliocentric'``
location : `~astropy.coordinates.EarthLocation`, optional
The location of the observatory to calculate the correction for.
If no location is given, the ``location`` attribute of the Time
object is used
ephemeris : str, optional
Solar system ephemeris to use (e.g., 'builtin', 'jpl'). By default,
use the one set with ``astropy.coordinates.solar_system_ephemeris.set``.
For more information, see `~astropy.coordinates.solar_system_ephemeris`.
Returns
-------
time_offset : `~astropy.time.TimeDelta`
The time offset between the barycentre or Heliocentre and Earth,
in TDB seconds. Should be added to the original time to get the
time in the Solar system barycentre or the Heliocentre.
Also, the time conversion to BJD will then include the relativistic correction as well.
"""
if kind.lower() not in ("barycentric", "heliocentric"):
raise ValueError(
"'kind' parameter must be one of 'heliocentric' or 'barycentric'"
)
if location is None:
if self.location is None:
raise ValueError(
"An EarthLocation needs to be set or passed in to calculate bary- "
"or heliocentric corrections"
)
location = self.location
from astropy.coordinates import (
GCRS,
HCRS,
ICRS,
CartesianRepresentation,
UnitSphericalRepresentation,
solar_system_ephemeris,
)
# ensure sky location is ICRS compatible
if not skycoord.is_transformable_to(ICRS()):
raise ValueError("Given skycoord is not transformable to the ICRS")
# get location of observatory in ITRS coordinates at this Time
try:
itrs = location.get_itrs(obstime=self)
except Exception:
raise ValueError(
"Supplied location does not have a valid `get_itrs` method"
)
with solar_system_ephemeris.set(ephemeris):
if kind.lower() == "heliocentric":
# convert to heliocentric coordinates, aligned with ICRS
cpos = itrs.transform_to(HCRS(obstime=self)).cartesian.xyz
else:
# first we need to convert to GCRS coordinates with the correct
# obstime, since ICRS coordinates have no frame time
gcrs_coo = itrs.transform_to(GCRS(obstime=self))
# convert to barycentric (BCRS) coordinates, aligned with ICRS
cpos = gcrs_coo.transform_to(ICRS()).cartesian.xyz
# get unit ICRS vector to star
spos = (
skycoord.icrs.represent_as(UnitSphericalRepresentation)
.represent_as(CartesianRepresentation)
.xyz
)
# Move X,Y,Z to last dimension, to enable possible broadcasting below.
cpos = np.rollaxis(cpos, 0, cpos.ndim)
spos = np.rollaxis(spos, 0, spos.ndim)
# calculate light travel time correction
tcor_val = (spos * cpos).sum(axis=-1) / const.c
return TimeDelta(tcor_val, scale="tdb")
def earth_rotation_angle(self, longitude=None):
"""Calculate local Earth rotation angle.
Parameters
----------
longitude : `~astropy.units.Quantity`, `~astropy.coordinates.EarthLocation`, str, or None; optional
The longitude on the Earth at which to compute the Earth rotation
angle (taken from a location as needed). If `None` (default), taken
from the ``location`` attribute of the Time instance. If the special
string 'tio', the result will be relative to the Terrestrial
Intermediate Origin (TIO) (i.e., the output of `~erfa.era00`).
Returns
-------
`~astropy.coordinates.Longitude`
Local Earth rotation angle with units of hourangle.
See Also
--------
astropy.time.Time.sidereal_time
References
----------
IAU 2006 NFA Glossary
(currently located at: https://syrte.obspm.fr/iauWGnfa/NFA_Glossary.html)
Notes
-----
The difference between apparent sidereal time and Earth rotation angle
is the equation of the origins, which is the angle between the Celestial
Intermediate Origin (CIO) and the equinox. Applying apparent sidereal
time to the hour angle yields the true apparent Right Ascension with
respect to the equinox, while applying the Earth rotation angle yields
the intermediate (CIRS) Right Ascension with respect to the CIO.
The result includes the TIO locator (s'), which positions the Terrestrial
Intermediate Origin on the equator of the Celestial Intermediate Pole (CIP)
and is rigorously corrected for polar motion.
(except when ``longitude='tio'``).
"""
if isinstance(longitude, str) and longitude == "tio":
longitude = 0
include_tio = False
else:
include_tio = True
return self._sid_time_or_earth_rot_ang(
longitude=longitude,
function=erfa.era00,
scales=("ut1",),
include_tio=include_tio,
)
def sidereal_time(self, kind, longitude=None, model=None):
"""Calculate sidereal time.
Parameters
----------
kind : str
``'mean'`` or ``'apparent'``, i.e., accounting for precession
only, or also for nutation.
longitude : `~astropy.units.Quantity`, `~astropy.coordinates.EarthLocation`, str, or None; optional
The longitude on the Earth at which to compute the Earth rotation
angle (taken from a location as needed). If `None` (default), taken
from the ``location`` attribute of the Time instance. If the special
string 'greenwich' or 'tio', the result will be relative to longitude
0 for models before 2000, and relative to the Terrestrial Intermediate
Origin (TIO) for later ones (i.e., the output of the relevant ERFA
function that calculates greenwich sidereal time).
model : str or None; optional
Precession (and nutation) model to use. The available ones are:
- {0}: {1}
- {2}: {3}
If `None` (default), the last (most recent) one from the appropriate
list above is used.
Returns
-------
`~astropy.coordinates.Longitude`
Local sidereal time, with units of hourangle.
See Also
--------
astropy.time.Time.earth_rotation_angle
References
----------
IAU 2006 NFA Glossary
(currently located at: https://syrte.obspm.fr/iauWGnfa/NFA_Glossary.html)
Notes
-----
The difference between apparent sidereal time and Earth rotation angle
is the equation of the origins, which is the angle between the Celestial
Intermediate Origin (CIO) and the equinox. Applying apparent sidereal
time to the hour angle yields the true apparent Right Ascension with
respect to the equinox, while applying the Earth rotation angle yields
the intermediate (CIRS) Right Ascension with respect to the CIO.
For the IAU precession models from 2000 onwards, the result includes the
TIO locator (s'), which positions the Terrestrial Intermediate Origin on
the equator of the Celestial Intermediate Pole (CIP) and is rigorously
corrected for polar motion (except when ``longitude='tio'`` or ``'greenwich'``).
""" # (docstring is formatted below)
if kind.lower() not in SIDEREAL_TIME_MODELS:
raise ValueError(
"The kind of sidereal time has to be "
+ " or ".join(sorted(SIDEREAL_TIME_MODELS))
)
available_models = SIDEREAL_TIME_MODELS[kind.lower()]
if model is None:
model = sorted(available_models)[-1]
elif model.upper() not in available_models:
raise ValueError(
f"Model {model} not implemented for {kind} sidereal time; "
f"available models are {sorted(available_models)}"
)
model_kwargs = available_models[model.upper()]
if isinstance(longitude, str) and longitude in ("tio", "greenwich"):
longitude = 0
model_kwargs = model_kwargs.copy()
model_kwargs["include_tio"] = False
return self._sid_time_or_earth_rot_ang(longitude=longitude, **model_kwargs)
if isinstance(sidereal_time.__doc__, str):
sidereal_time.__doc__ = sidereal_time.__doc__.format(
"apparent",
sorted(SIDEREAL_TIME_MODELS["apparent"]),
"mean",
sorted(SIDEREAL_TIME_MODELS["mean"]),
)
def _sid_time_or_earth_rot_ang(self, longitude, function, scales, include_tio=True):
"""Calculate a local sidereal time or Earth rotation angle.
Parameters
----------
longitude : `~astropy.units.Quantity`, `~astropy.coordinates.EarthLocation`, str, or None; optional
The longitude on the Earth at which to compute the Earth rotation
angle (taken from a location as needed). If `None` (default), taken
from the ``location`` attribute of the Time instance.
function : callable
The ERFA function to use.
scales : tuple of str
The time scales that the function requires on input.
include_tio : bool, optional
Whether to includes the TIO locator corrected for polar motion.
Should be `False` for pre-2000 IAU models. Default: `True`.
Returns
-------
`~astropy.coordinates.Longitude`
Local sidereal time or Earth rotation angle, with units of hourangle.
"""
from astropy.coordinates import EarthLocation, Longitude
from astropy.coordinates.builtin_frames.utils import get_polar_motion
from astropy.coordinates.matrix_utilities import rotation_matrix
if longitude is None:
if self.location is None:
raise ValueError(
"No longitude is given but the location for "
"the Time object is not set."
)
longitude = self.location.lon
elif isinstance(longitude, EarthLocation):
longitude = longitude.lon
else:
# Sanity check on input; default unit is degree.
longitude = Longitude(longitude, u.degree, copy=False)
theta = self._call_erfa(function, scales)
if include_tio:
# TODO: this duplicates part of coordinates.erfa_astrom.ErfaAstrom.apio;
# maybe posisble to factor out to one or the other.
sp = self._call_erfa(erfa.sp00, ("tt",))
xp, yp = get_polar_motion(self)
# Form the rotation matrix, CIRS to apparent [HA,Dec].
r = (
rotation_matrix(longitude, "z")
@ rotation_matrix(-yp, "x", unit=u.radian)
@ rotation_matrix(-xp, "y", unit=u.radian)
@ rotation_matrix(theta + sp, "z", unit=u.radian)
)
# Solve for angle.
angle = np.arctan2(r[..., 0, 1], r[..., 0, 0]) << u.radian
else:
angle = longitude + (theta << u.radian)
return Longitude(angle, u.hourangle)
def _call_erfa(self, function, scales):
# TODO: allow erfa functions to be used on Time with __array_ufunc__.
erfa_parameters = [
getattr(getattr(self, scale)._time, jd_part)
for scale in scales
for jd_part in ("jd1", "jd2_filled")
]
result = function(*erfa_parameters)
if self.masked:
result[self.mask] = np.nan
return result
def get_delta_ut1_utc(self, iers_table=None, return_status=False):
"""Find UT1 - UTC differences by interpolating in IERS Table.
Parameters
----------
iers_table : `~astropy.utils.iers.IERS`, optional
Table containing UT1-UTC differences from IERS Bulletins A
and/or B. Default: `~astropy.utils.iers.earth_orientation_table`
(which in turn defaults to the combined version provided by
`~astropy.utils.iers.IERS_Auto`).
return_status : bool
Whether to return status values. If `False` (default), iers
raises `IndexError` if any time is out of the range
covered by the IERS table.
Returns
-------
ut1_utc : float or float array
UT1-UTC, interpolated in IERS Table
status : int or int array
Status values (if ``return_status=`True```)::
``astropy.utils.iers.FROM_IERS_B``
``astropy.utils.iers.FROM_IERS_A``
``astropy.utils.iers.FROM_IERS_A_PREDICTION``
``astropy.utils.iers.TIME_BEFORE_IERS_RANGE``
``astropy.utils.iers.TIME_BEYOND_IERS_RANGE``
Notes
-----
In normal usage, UT1-UTC differences are calculated automatically
on the first instance ut1 is needed.
Examples
--------
To check in code whether any times are before the IERS table range::
>>> from astropy.utils.iers import TIME_BEFORE_IERS_RANGE
>>> t = Time(['1961-01-01', '2000-01-01'], scale='utc')
>>> delta, status = t.get_delta_ut1_utc(return_status=True) # doctest: +REMOTE_DATA
>>> status == TIME_BEFORE_IERS_RANGE # doctest: +REMOTE_DATA
array([ True, False]...)
"""
if iers_table is None:
from astropy.utils.iers import earth_orientation_table
iers_table = earth_orientation_table.get()
return iers_table.ut1_utc(self.utc, return_status=return_status)
# Property for ERFA DUT arg = UT1 - UTC
def _get_delta_ut1_utc(self, jd1=None, jd2=None):
"""
Get ERFA DUT arg = UT1 - UTC. This getter takes optional jd1 and
jd2 args because it gets called that way when converting time scales.
If delta_ut1_utc is not yet set, this will interpolate them from the
the IERS table.
"""
# Sec. 4.3.1: the arg DUT is the quantity delta_UT1 = UT1 - UTC in
# seconds. It is obtained from tables published by the IERS.
if not hasattr(self, "_delta_ut1_utc"):
from astropy.utils.iers import earth_orientation_table
iers_table = earth_orientation_table.get()
# jd1, jd2 are normally set (see above), except if delta_ut1_utc
# is access directly; ensure we behave as expected for that case
if jd1 is None:
self_utc = self.utc
jd1, jd2 = self_utc._time.jd1, self_utc._time.jd2_filled
scale = "utc"
else:
scale = self.scale
# interpolate UT1-UTC in IERS table
delta = iers_table.ut1_utc(jd1, jd2)
# if we interpolated using UT1 jds, we may be off by one
# second near leap seconds (and very slightly off elsewhere)
if scale == "ut1":
# calculate UTC using the offset we got; the ERFA routine
# is tolerant of leap seconds, so will do this right
jd1_utc, jd2_utc = erfa.ut1utc(jd1, jd2, delta.to_value(u.s))
# calculate a better estimate using the nearly correct UTC
delta = iers_table.ut1_utc(jd1_utc, jd2_utc)
self._set_delta_ut1_utc(delta)
return self._delta_ut1_utc
def _set_delta_ut1_utc(self, val):
del self.cache
if hasattr(val, "to"): # Matches Quantity but also TimeDelta.
val = val.to(u.second).value
val = self._match_shape(val)
self._delta_ut1_utc = val
# Note can't use @property because _get_delta_tdb_tt is explicitly
# called with the optional jd1 and jd2 args.
delta_ut1_utc = property(_get_delta_ut1_utc, _set_delta_ut1_utc)
"""UT1 - UTC time scale offset"""
# Property for ERFA DTR arg = TDB - TT
def _get_delta_tdb_tt(self, jd1=None, jd2=None):
if not hasattr(self, "_delta_tdb_tt"):
# If jd1 and jd2 are not provided (which is the case for property
# attribute access) then require that the time scale is TT or TDB.
# Otherwise the computations here are not correct.
if jd1 is None or jd2 is None:
if self.scale not in ("tt", "tdb"):
raise ValueError(
"Accessing the delta_tdb_tt attribute is only "
"possible for TT or TDB time scales"
)
else:
jd1 = self._time.jd1
jd2 = self._time.jd2_filled
# First go from the current input time (which is either
# TDB or TT) to an approximate UT1. Since TT and TDB are
# pretty close (few msec?), assume TT. Similarly, since the
# UT1 terms are very small, use UTC instead of UT1.
njd1, njd2 = erfa.tttai(jd1, jd2)
njd1, njd2 = erfa.taiutc(njd1, njd2)
# subtract 0.5, so UT is fraction of the day from midnight
ut = day_frac(njd1 - 0.5, njd2)[1]
if self.location is None:
# Assume geocentric.
self._delta_tdb_tt = erfa.dtdb(jd1, jd2, ut, 0.0, 0.0, 0.0)
else:
location = self.location
# Geodetic params needed for d_tdb_tt()
lon = location.lon
rxy = np.hypot(location.x, location.y)
z = location.z
self._delta_tdb_tt = erfa.dtdb(
jd1,
jd2,
ut,
lon.to_value(u.radian),
rxy.to_value(u.km),
z.to_value(u.km),
)
return self._delta_tdb_tt
def _set_delta_tdb_tt(self, val):
del self.cache
if hasattr(val, "to"): # Matches Quantity but also TimeDelta.
val = val.to(u.second).value
val = self._match_shape(val)
self._delta_tdb_tt = val
# Note can't use @property because _get_delta_tdb_tt is explicitly
# called with the optional jd1 and jd2 args.
delta_tdb_tt = property(_get_delta_tdb_tt, _set_delta_tdb_tt)
"""TDB - TT time scale offset"""
def __sub__(self, other):
# T - Tdelta = T
# T - T = Tdelta
other_is_delta = not isinstance(other, Time)
if other_is_delta: # T - Tdelta
# Check other is really a TimeDelta or something that can initialize.
if not isinstance(other, TimeDelta):
try:
other = TimeDelta(other)
except Exception:
return NotImplemented
# we need a constant scale to calculate, which is guaranteed for
# TimeDelta, but not for Time (which can be UTC)
out = self.replicate()
if self.scale in other.SCALES:
if other.scale not in (out.scale, None):
other = getattr(other, out.scale)
else:
if other.scale is None:
out._set_scale("tai")
else:
if self.scale not in TIME_TYPES[other.scale]:
raise TypeError(
"Cannot subtract Time and TimeDelta instances "
f"with scales '{self.scale}' and '{other.scale}'"
)
out._set_scale(other.scale)
# remove attributes that are invalidated by changing time
for attr in ("_delta_ut1_utc", "_delta_tdb_tt"):
if hasattr(out, attr):
delattr(out, attr)
else: # T - T
# the scales should be compatible (e.g., cannot convert TDB to LOCAL)
if other.scale not in self.SCALES:
raise TypeError(
"Cannot subtract Time instances "
f"with scales '{self.scale}' and '{other.scale}'"
)
self_time = (
self._time if self.scale in TIME_DELTA_SCALES else self.tai._time
)
# set up TimeDelta, subtraction to be done shortly
out = TimeDelta(
self_time.jd1, self_time.jd2, format="jd", scale=self_time.scale
)
if other.scale != out.scale:
other = getattr(other, out.scale)
jd1 = out._time.jd1 - other._time.jd1
jd2 = out._time.jd2 - other._time.jd2
out._time.jd1, out._time.jd2 = day_frac(jd1, jd2)
if other_is_delta:
# Go back to left-side scale if needed
out._set_scale(self.scale)
return out
def __add__(self, other):
# T + Tdelta = T
# T + T = error
if isinstance(other, Time):
raise OperandTypeError(self, other, "+")
# Check other is really a TimeDelta or something that can initialize.
if not isinstance(other, TimeDelta):
try:
other = TimeDelta(other)
except Exception:
return NotImplemented
# ideally, we calculate in the scale of the Time item, since that is
# what we want the output in, but this may not be possible, since
# TimeDelta cannot be converted arbitrarily
out = self.replicate()
if self.scale in other.SCALES:
if other.scale not in (out.scale, None):
other = getattr(other, out.scale)
else:
if other.scale is None:
out._set_scale("tai")
else:
if self.scale not in TIME_TYPES[other.scale]:
raise TypeError(
"Cannot add Time and TimeDelta instances "
f"with scales '{self.scale}' and '{other.scale}'"
)
out._set_scale(other.scale)
# remove attributes that are invalidated by changing time
for attr in ("_delta_ut1_utc", "_delta_tdb_tt"):
if hasattr(out, attr):
delattr(out, attr)
jd1 = out._time.jd1 + other._time.jd1
jd2 = out._time.jd2 + other._time.jd2
out._time.jd1, out._time.jd2 = day_frac(jd1, jd2)
# Go back to left-side scale if needed
out._set_scale(self.scale)
return out
# Reverse addition is possible: <something-Tdelta-ish> + T
# but there is no case of <something> - T, so no __rsub__.
def __radd__(self, other):
return self.__add__(other)
def mean(self, axis=None, dtype=None, out=None, keepdims=False, *, where=True):
scale = self.scale
if scale == "utc":
self = self.tai
result = super().mean(
axis=axis, dtype=dtype, out=out, keepdims=keepdims, where=where
)
if scale == "utc":
result = result.utc
result.out_subfmt = self.out_subfmt
location = self.location
if self.location is not None:
if self.location.shape:
if axis is None:
axis_normalized = tuple(range(self.ndim))
elif isinstance(axis, int):
axis_normalized = (axis,)
else:
axis_normalized = axis
sl = [slice(None)] * self.location.ndim
for a in axis_normalized:
sl[a] = slice(0, 1)
if np.any(self.location != self.location[tuple(sl)]):
raise ValueError(
"`location` must be constant over the reduction axes."
)
if not keepdims:
for a in axis_normalized:
sl[a] = 0
location = self.location[tuple(sl)]
result.location = location
return result
def __array_function__(self, function, types, args, kwargs):
"""
Wrap numpy functions.
Parameters
----------
function : callable
Numpy function to wrap
types : iterable of classes
Classes that provide an ``__array_function__`` override. Can
in principle be used to interact with other classes. Below,
mostly passed on to `~numpy.ndarray`, which can only interact
with subclasses.
args : tuple
Positional arguments provided in the function call.
kwargs : dict
Keyword arguments provided in the function call.
"""
if function in CUSTOM_FUNCTIONS:
f = CUSTOM_FUNCTIONS[function]
return f(*args, **kwargs)
elif function in UNSUPPORTED_FUNCTIONS:
return NotImplemented
else:
return super().__array_function__(function, types, args, kwargs)
def to_datetime(self, timezone=None, leap_second_strict="raise"):
# TODO: this could likely go through to_value, as long as that
# had an **kwargs part that was just passed on to _time.
tm = self.replicate(format="datetime")
return tm._shaped_like_input(
tm._time.to_value(timezone, leap_second_strict=leap_second_strict)
)
to_datetime.__doc__ = TimeDatetime.to_value.__doc__
class TimeDeltaMissingUnitWarning(AstropyDeprecationWarning):
"""Warning for missing unit or format in TimeDelta."""
pass
class TimeDelta(TimeBase):
"""
Represent the time difference between two times.
A TimeDelta object is initialized with one or more times in the ``val``
argument. The input times in ``val`` must conform to the specified
``format``. The optional ``val2`` time input should be supplied only for
numeric input formats (e.g. JD) where very high precision (better than
64-bit precision) is required.
The allowed values for ``format`` can be listed with::
>>> list(TimeDelta.FORMATS)
['sec', 'jd', 'datetime']
Note that for time differences, the scale can be among three groups:
geocentric ('tai', 'tt', 'tcg'), barycentric ('tcb', 'tdb'), and rotational
('ut1'). Within each of these, the scales for time differences are the
same. Conversion between geocentric and barycentric is possible, as there
is only a scale factor change, but one cannot convert to or from 'ut1', as
this requires knowledge of the actual times, not just their difference. For
a similar reason, 'utc' is not a valid scale for a time difference: a UTC
day is not always 86400 seconds.
For more information see:
- https://docs.astropy.org/en/stable/time/
- https://docs.astropy.org/en/stable/time/index.html#time-deltas
Parameters
----------
val : sequence, ndarray, number, `~astropy.units.Quantity` or `~astropy.time.TimeDelta` object
Value(s) to initialize the time difference(s). Any quantities will
be converted appropriately (with care taken to avoid rounding
errors for regular time units).
val2 : sequence, ndarray, number, or `~astropy.units.Quantity`; optional
Additional values, as needed to preserve precision.
format : str, optional
Format of input value(s). For numerical inputs without units,
"jd" is assumed and values are interpreted as days.
A deprecation warning is raised in this case. To avoid the warning,
either specify the format or add units to the input values.
scale : str, optional
Time scale of input value(s), must be one of the following values:
('tdb', 'tt', 'ut1', 'tcg', 'tcb', 'tai'). If not given (or
``None``), the scale is arbitrary; when added or subtracted from a
``Time`` instance, it will be used without conversion.
copy : bool, optional
Make a copy of the input values
"""
SCALES = TIME_DELTA_SCALES
"""List of time delta scales."""
FORMATS = TIME_DELTA_FORMATS
"""Dict of time delta formats."""
info = TimeDeltaInfo()
def __new__(
cls,
val,
val2=None,
format=None,
scale=None,
precision=None,
in_subfmt=None,
out_subfmt=None,
location=None,
copy=False,
):
if isinstance(val, TimeDelta):
self = val.replicate(format=format, copy=copy, cls=cls)
else:
self = super().__new__(cls)
return self
def __init__(self, val, val2=None, format=None, scale=None, copy=False):
if isinstance(val, TimeDelta):
if scale is not None:
self._set_scale(scale)
else:
format = format or self._get_format(val)
self._init_from_vals(val, val2, format, scale, copy)
if scale is not None:
self.SCALES = TIME_DELTA_TYPES[scale]
@staticmethod
def _get_format(val):
if isinstance(val, timedelta):
return "datetime"
if getattr(val, "unit", None) is None:
warn(
"Numerical value without unit or explicit format passed to"
" TimeDelta, assuming days",
TimeDeltaMissingUnitWarning,
)
return "jd"
def replicate(self, *args, **kwargs):
out = super().replicate(*args, **kwargs)
out.SCALES = self.SCALES
return out
def to_datetime(self):
"""
Convert to ``datetime.timedelta`` object.
"""
tm = self.replicate(format="datetime")
return tm._shaped_like_input(tm._time.value)
def _set_scale(self, scale):
"""
This is the key routine that actually does time scale conversions.
This is not public and not connected to the read-only scale property.
"""
if scale == self.scale:
return
if scale not in self.SCALES:
raise ValueError(
"Scale {scale!r} is not in the allowed scales {sorted(self.SCALES)}"
)
# For TimeDelta, there can only be a change in scale factor,
# which is written as time2 - time1 = scale_offset * time1
scale_offset = SCALE_OFFSETS[(self.scale, scale)]
if scale_offset is None:
self._time.scale = scale
else:
jd1, jd2 = self._time.jd1, self._time.jd2
offset1, offset2 = day_frac(jd1, jd2, factor=scale_offset)
self._time = self.FORMATS[self.format](
jd1 + offset1,
jd2 + offset2,
scale,
self.precision,
self.in_subfmt,
self.out_subfmt,
from_jd=True,
)
def _add_sub(self, other, op):
"""Perform common elements of addition / subtraction for two delta times."""
# If not a TimeDelta then see if it can be turned into a TimeDelta.
if not isinstance(other, TimeDelta):
try:
other = TimeDelta(other)
except Exception:
return NotImplemented
# the scales should be compatible (e.g., cannot convert TDB to TAI)
if (
self.scale is not None
and self.scale not in other.SCALES
or other.scale is not None
and other.scale not in self.SCALES
):
raise TypeError(
"Cannot add TimeDelta instances with scales '{}' and '{}'".format(
self.scale, other.scale
)
)
# adjust the scale of other if the scale of self is set (or no scales)
if self.scale is not None or other.scale is None:
out = self.replicate()
if other.scale is not None:
other = getattr(other, self.scale)
else:
out = other.replicate()
jd1 = op(self._time.jd1, other._time.jd1)
jd2 = op(self._time.jd2, other._time.jd2)
out._time.jd1, out._time.jd2 = day_frac(jd1, jd2)
return out
def __add__(self, other):
# If other is a Time then use Time.__add__ to do the calculation.
if isinstance(other, Time):
return other.__add__(self)
return self._add_sub(other, operator.add)
def __sub__(self, other):
# TimeDelta - Time is an error
if isinstance(other, Time):
raise OperandTypeError(self, other, "-")
return self._add_sub(other, operator.sub)
def __radd__(self, other):
return self.__add__(other)
def __rsub__(self, other):
out = self.__sub__(other)
return -out
def __neg__(self):
"""Negation of a `TimeDelta` object."""
new = self.copy()
new._time.jd1 = -self._time.jd1
new._time.jd2 = -self._time.jd2
return new
def __abs__(self):
"""Absolute value of a `TimeDelta` object."""
jd1, jd2 = self._time.jd1, self._time.jd2
negative = jd1 + jd2 < 0
new = self.copy()
new._time.jd1 = np.where(negative, -jd1, jd1)
new._time.jd2 = np.where(negative, -jd2, jd2)
return new
def __mul__(self, other):
"""Multiplication of `TimeDelta` objects by numbers/arrays."""
# Check needed since otherwise the self.jd1 * other multiplication
# would enter here again (via __rmul__)
if isinstance(other, Time):
raise OperandTypeError(self, other, "*")
elif (isinstance(other, u.UnitBase) and other == u.dimensionless_unscaled) or (
isinstance(other, str) and other == ""
):
return self.copy()
# If other is something consistent with a dimensionless quantity
# (could just be a float or an array), then we can just multiple in.
try:
other = u.Quantity(other, u.dimensionless_unscaled, copy=False)
except Exception:
# If not consistent with a dimensionless quantity, try downgrading
# self to a quantity and see if things work.
try:
return self.to(u.day) * other
except Exception:
# The various ways we could multiply all failed;
# returning NotImplemented to give other a final chance.
return NotImplemented
jd1, jd2 = day_frac(self.jd1, self.jd2, factor=other.value)
out = TimeDelta(jd1, jd2, format="jd", scale=self.scale)
if self.format != "jd":
out = out.replicate(format=self.format)
return out
def __rmul__(self, other):
"""Multiplication of numbers/arrays with `TimeDelta` objects."""
return self.__mul__(other)
def __truediv__(self, other):
"""Division of `TimeDelta` objects by numbers/arrays."""
# Cannot do __mul__(1./other) as that looses precision
if (isinstance(other, u.UnitBase) and other == u.dimensionless_unscaled) or (
isinstance(other, str) and other == ""
):
return self.copy()
# If other is something consistent with a dimensionless quantity
# (could just be a float or an array), then we can just divide in.
try:
other = u.Quantity(other, u.dimensionless_unscaled, copy=False)
except Exception:
# If not consistent with a dimensionless quantity, try downgrading
# self to a quantity and see if things work.
try:
return self.to(u.day) / other
except Exception:
# The various ways we could divide all failed;
# returning NotImplemented to give other a final chance.
return NotImplemented
jd1, jd2 = day_frac(self.jd1, self.jd2, divisor=other.value)
out = TimeDelta(jd1, jd2, format="jd", scale=self.scale)
if self.format != "jd":
out = out.replicate(format=self.format)
return out
def __rtruediv__(self, other):
"""Division by `TimeDelta` objects of numbers/arrays."""
# Here, we do not have to worry about returning NotImplemented,
# since other has already had a chance to look at us.
return other / self.to(u.day)
def to(self, unit, equivalencies=[]):
"""
Convert to a quantity in the specified unit.
Parameters
----------
unit : unit-like
The unit to convert to.
equivalencies : list of tuple
A list of equivalence pairs to try if the units are not directly
convertible (see :ref:`astropy:unit_equivalencies`). If `None`, no
equivalencies will be applied at all, not even any set globallyq
or within a context.
Returns
-------
quantity : `~astropy.units.Quantity`
The quantity in the units specified.
See Also
--------
to_value : get the numerical value in a given unit.
"""
return u.Quantity(self._time.jd1 + self._time.jd2, u.day).to(
unit, equivalencies=equivalencies
)
def to_value(self, *args, **kwargs):
"""Get time delta values expressed in specified output format or unit.
This method is flexible and handles both conversion to a specified
``TimeDelta`` format / sub-format AND conversion to a specified unit.
If positional argument(s) are provided then the first one is checked
to see if it is a valid ``TimeDelta`` format, and next it is checked
to see if it is a valid unit or unit string.
To convert to a ``TimeDelta`` format and optional sub-format the options
are::
tm = TimeDelta(1.0 * u.s)
tm.to_value('jd') # equivalent of tm.jd
tm.to_value('jd', 'decimal') # convert to 'jd' as a Decimal object
tm.to_value('jd', subfmt='decimal')
tm.to_value(format='jd', subfmt='decimal')
To convert to a unit with optional equivalencies, the options are::
tm.to_value('hr') # convert to u.hr (hours)
tm.to_value('hr', []) # specify equivalencies as a positional arg
tm.to_value('hr', equivalencies=[])
tm.to_value(unit='hr', equivalencies=[])
The built-in `~astropy.time.TimeDelta` options for ``format`` are:
{'jd', 'sec', 'datetime'}.
For the two numerical formats 'jd' and 'sec', the available ``subfmt``
options are: {'float', 'long', 'decimal', 'str', 'bytes'}. Here, 'long'
uses ``numpy.longdouble`` for somewhat enhanced precision (with the
enhancement depending on platform), and 'decimal' instances of
:class:`decimal.Decimal` for full precision. For the 'str' and 'bytes'
sub-formats, the number of digits is also chosen such that time values
are represented accurately. Default: as set by ``out_subfmt`` (which by
default picks the first available for a given format, i.e., 'float').
Parameters
----------
format : str, optional
The format in which one wants the `~astropy.time.TimeDelta` values.
Default: the current format.
subfmt : str, optional
Possible sub-format in which the values should be given. Default: as
set by ``out_subfmt`` (which by default picks the first available
for a given format, i.e., 'float' or 'date_hms').
unit : `~astropy.units.UnitBase` instance or str, optional
The unit in which the value should be given.
equivalencies : list of tuple
A list of equivalence pairs to try if the units are not directly
convertible (see :ref:`astropy:unit_equivalencies`). If `None`, no
equivalencies will be applied at all, not even any set globally or
within a context.
Returns
-------
value : ndarray or scalar
The value in the format or units specified.
See Also
--------
to : Convert to a `~astropy.units.Quantity` instance in a given unit.
value : The time value in the current format.
"""
if not (args or kwargs):
raise TypeError("to_value() missing required format or unit argument")
# TODO: maybe allow 'subfmt' also for units, keeping full precision
# (effectively, by doing the reverse of quantity_day_frac)?
# This way, only equivalencies could lead to possible precision loss.
if "format" in kwargs or (
args != () and (args[0] is None or args[0] in self.FORMATS)
):
# Super-class will error with duplicate arguments, etc.
return super().to_value(*args, **kwargs)
# With positional arguments, we try parsing the first one as a unit,
# so that on failure we can give a more informative exception.
if args:
try:
unit = u.Unit(args[0])
except ValueError as exc:
raise ValueError(
"first argument is not one of the known "
f"formats ({list(self.FORMATS)}) and failed to parse as a unit."
) from exc
args = (unit,) + args[1:]
return u.Quantity(self._time.jd1 + self._time.jd2, u.day).to_value(
*args, **kwargs
)
def _make_value_equivalent(self, item, value):
"""Coerce setitem value into an equivalent TimeDelta object."""
if not isinstance(value, TimeDelta):
try:
value = self.__class__(value, scale=self.scale, format=self.format)
except Exception as err:
raise ValueError(
f"cannot convert value to a compatible TimeDelta object: {err}"
)
return value
def isclose(self, other, atol=None, rtol=0.0):
"""Returns a boolean or boolean array where two TimeDelta objects are
element-wise equal within a time tolerance.
This effectively evaluates the expression below::
abs(self - other) <= atol + rtol * abs(other)
Parameters
----------
other : `~astropy.units.Quantity` or `~astropy.time.TimeDelta`
Quantity or TimeDelta object for comparison.
atol : `~astropy.units.Quantity` or `~astropy.time.TimeDelta`
Absolute tolerance for equality with units of time (e.g. ``u.s`` or
``u.day``). Default is one bit in the 128-bit JD time representation,
equivalent to about 20 picosecs.
rtol : float
Relative tolerance for equality
"""
try:
other_day = other.to_value(u.day)
except Exception as err:
raise TypeError(f"'other' argument must support conversion to days: {err}")
if atol is None:
atol = np.finfo(float).eps * u.day
if not isinstance(atol, (u.Quantity, TimeDelta)):
raise TypeError(
"'atol' argument must be a Quantity or TimeDelta instance, got "
f"{atol.__class__.__name__} instead"
)
return np.isclose(
self.to_value(u.day), other_day, rtol=rtol, atol=atol.to_value(u.day)
)
class ScaleValueError(Exception):
pass
def _make_array(val, copy=False):
"""
Take ``val`` and convert/reshape to an array. If ``copy`` is `True`
then copy input values.
Returns
-------
val : ndarray
Array version of ``val``.
"""
if isinstance(val, (tuple, list)) and len(val) > 0 and isinstance(val[0], Time):
dtype = object
else:
dtype = None
val = np.array(val, copy=copy, subok=True, dtype=dtype)
# Allow only float64, string or object arrays as input
# (object is for datetime, maybe add more specific test later?)
# This also ensures the right byteorder for float64 (closes #2942).
if val.dtype.kind == "f" and val.dtype.itemsize >= np.dtype(np.float64).itemsize:
pass
elif val.dtype.kind in "OSUMaV":
pass
else:
val = np.asanyarray(val, dtype=np.float64)
return val
def _check_for_masked_and_fill(val, val2):
"""
If ``val`` or ``val2`` are masked arrays then fill them and cast
to ndarray.
Returns a mask corresponding to the logical-or of masked elements
in ``val`` and ``val2``. If neither is masked then the return ``mask``
is ``None``.
If either ``val`` or ``val2`` are masked then they are replaced
with filled versions of themselves.
Parameters
----------
val : ndarray or MaskedArray
Input val
val2 : ndarray or MaskedArray
Input val2
Returns
-------
mask, val, val2: ndarray or None
Mask: (None or bool ndarray), val, val2: ndarray
"""
def get_as_filled_ndarray(mask, val):
"""
Fill the given MaskedArray ``val`` from the first non-masked
element in the array. This ensures that upstream Time initialization
will succeed.
Note that nothing happens if there are no masked elements.
"""
fill_value = None
if np.any(val.mask):
# Final mask is the logical-or of inputs
mask = mask | val.mask
# First unmasked element. If all elements are masked then
# use fill_value=None from above which will use val.fill_value.
# As long as the user has set this appropriately then all will
# be fine.
val_unmasked = val.compressed() # 1-d ndarray of unmasked values
if len(val_unmasked) > 0:
fill_value = val_unmasked[0]
# Fill the input ``val``. If fill_value is None then this just returns
# an ndarray view of val (no copy).
val = val.filled(fill_value)
return mask, val
mask = False
if isinstance(val, np.ma.MaskedArray):
mask, val = get_as_filled_ndarray(mask, val)
if isinstance(val2, np.ma.MaskedArray):
mask, val2 = get_as_filled_ndarray(mask, val2)
return mask, val, val2
class OperandTypeError(TypeError):
def __init__(self, left, right, op=None):
op_string = "" if op is None else f" for {op}"
super().__init__(
"Unsupported operand type(s){}: '{}' and '{}'".format(
op_string, left.__class__.__name__, right.__class__.__name__
)
)
def _check_leapsec():
global _LEAP_SECONDS_CHECK
if _LEAP_SECONDS_CHECK != _LeapSecondsCheck.DONE:
with _LEAP_SECONDS_LOCK:
# There are three ways we can get here:
# 1. First call (NOT_STARTED).
# 2. Re-entrant call (RUNNING). We skip the initialisation
# and don't worry about leap second errors.
# 3. Another thread which raced with the first call
# (RUNNING). The first thread has relinquished the
# lock to us, so initialization is complete.
if _LEAP_SECONDS_CHECK == _LeapSecondsCheck.NOT_STARTED:
_LEAP_SECONDS_CHECK = _LeapSecondsCheck.RUNNING
update_leap_seconds()
_LEAP_SECONDS_CHECK = _LeapSecondsCheck.DONE
def update_leap_seconds(files=None):
"""If the current ERFA leap second table is out of date, try to update it.
Uses `astropy.utils.iers.LeapSeconds.auto_open` to try to find an
up-to-date table. See that routine for the definition of "out of date".
In order to make it safe to call this any time, all exceptions are turned
into warnings,
Parameters
----------
files : list of path-like, optional
List of files/URLs to attempt to open. By default, uses defined by
`astropy.utils.iers.LeapSeconds.auto_open`, which includes the table
used by ERFA itself, so if that is up to date, nothing will happen.
Returns
-------
n_update : int
Number of items updated.
"""
try:
from astropy.utils import iers
table = iers.LeapSeconds.auto_open(files)
return erfa.leap_seconds.update(table)
except Exception as exc:
warn(
f"leap-second auto-update failed due to the following exception: {exc!r}",
AstropyWarning,
)
return 0
|
5b11966ec42c7ba1ee2f422b92289a0d0e994d50a6da9c345200dc019dd0cbbc | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import datetime
import fnmatch
import re
import time
import warnings
from collections import OrderedDict, defaultdict
from decimal import Decimal
import erfa
import numpy as np
import astropy.units as u
from astropy.utils.decorators import classproperty, lazyproperty
from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyUserWarning
from . import _parse_times, conf, utils
from .utils import day_frac, quantity_day_frac, two_product, two_sum
__all__ = [
"AstropyDatetimeLeapSecondWarning",
"TimeFormat",
"TimeJD",
"TimeMJD",
"TimeFromEpoch",
"TimeUnix",
"TimeUnixTai",
"TimeCxcSec",
"TimeGPS",
"TimeDecimalYear",
"TimePlotDate",
"TimeUnique",
"TimeDatetime",
"TimeString",
"TimeISO",
"TimeISOT",
"TimeFITS",
"TimeYearDayTime",
"TimeEpochDate",
"TimeBesselianEpoch",
"TimeJulianEpoch",
"TimeDeltaFormat",
"TimeDeltaSec",
"TimeDeltaJD",
"TimeEpochDateString",
"TimeBesselianEpochString",
"TimeJulianEpochString",
"TIME_FORMATS",
"TIME_DELTA_FORMATS",
"TimezoneInfo",
"TimeDeltaDatetime",
"TimeDatetime64",
"TimeYMDHMS",
"TimeNumeric",
"TimeDeltaNumeric",
]
__doctest_skip__ = ["TimePlotDate"]
# These both get filled in at end after TimeFormat subclasses defined.
# Use an OrderedDict to fix the order in which formats are tried.
# This ensures, e.g., that 'isot' gets tried before 'fits'.
TIME_FORMATS = OrderedDict()
TIME_DELTA_FORMATS = OrderedDict()
# Translations between deprecated FITS timescales defined by
# Rots et al. 2015, A&A 574:A36, and timescales used here.
FITS_DEPRECATED_SCALES = {
"TDT": "tt",
"ET": "tt",
"GMT": "utc",
"UT": "utc",
"IAT": "tai",
}
class AstropyDatetimeLeapSecondWarning(AstropyUserWarning):
"""Warning for leap second when converting to datetime.datetime object."""
def _regexify_subfmts(subfmts):
"""
Iterate through each of the sub-formats and try substituting simple
regular expressions for the strptime codes for year, month, day-of-month,
hour, minute, second. If no % characters remain then turn the final string
into a compiled regex. This assumes time formats do not have a % in them.
This is done both to speed up parsing of strings and to allow mixed formats
where strptime does not quite work well enough.
"""
new_subfmts = []
for subfmt_tuple in subfmts:
subfmt_in = subfmt_tuple[1]
if isinstance(subfmt_in, str):
for strptime_code, regex in (
("%Y", r"(?P<year>\d\d\d\d)"),
("%m", r"(?P<mon>\d{1,2})"),
("%d", r"(?P<mday>\d{1,2})"),
("%H", r"(?P<hour>\d{1,2})"),
("%M", r"(?P<min>\d{1,2})"),
("%S", r"(?P<sec>\d{1,2})"),
):
subfmt_in = subfmt_in.replace(strptime_code, regex)
if "%" not in subfmt_in:
subfmt_tuple = (
subfmt_tuple[0],
re.compile(subfmt_in + "$"),
subfmt_tuple[2],
)
new_subfmts.append(subfmt_tuple)
return tuple(new_subfmts)
class TimeFormat:
"""
Base class for time representations.
Parameters
----------
val1 : numpy ndarray, list, number, str, or bytes
Values to initialize the time or times. Bytes are decoded as ascii.
Quantities with time units are allowed for formats where the
interpretation is unambiguous.
val2 : numpy ndarray, list, or number; optional
Value(s) to initialize the time or times. Only used for numerical
input, to help preserve precision.
scale : str
Time scale of input value(s)
precision : int
Precision for seconds as floating point
in_subfmt : str
Select subformat for inputting string times
out_subfmt : str
Select subformat for outputting string times
from_jd : bool
If true then val1, val2 are jd1, jd2
"""
_default_scale = "utc" # As of astropy 0.4
subfmts = ()
_registry = TIME_FORMATS
def __init__(
self, val1, val2, scale, precision, in_subfmt, out_subfmt, from_jd=False
):
self.scale = scale # validation of scale done later with _check_scale
self.precision = precision
self.in_subfmt = in_subfmt
self.out_subfmt = out_subfmt
self._jd1, self._jd2 = None, None
if from_jd:
self.jd1 = val1
self.jd2 = val2
else:
val1, val2 = self._check_val_type(val1, val2)
self.set_jds(val1, val2)
def __init_subclass__(cls, **kwargs):
# Register time formats that define a name, but leave out astropy_time since
# it is not a user-accessible format and is only used for initialization into
# a different format.
if "name" in cls.__dict__ and cls.name != "astropy_time":
# FIXME: check here that we're not introducing a collision with
# an existing method or attribute; problem is it could be either
# astropy.time.Time or astropy.time.TimeDelta, and at the point
# where this is run neither of those classes have necessarily been
# constructed yet.
if "value" in cls.__dict__ and not hasattr(cls.value, "fget"):
raise ValueError("If defined, 'value' must be a property")
cls._registry[cls.name] = cls
# If this class defines its own subfmts, preprocess the definitions.
if "subfmts" in cls.__dict__:
cls.subfmts = _regexify_subfmts(cls.subfmts)
return super().__init_subclass__(**kwargs)
@classmethod
def _get_allowed_subfmt(cls, subfmt):
"""Get an allowed subfmt for this class, either the input ``subfmt``
if this is valid or '*' as a default. This method gets used in situations
where the format of an existing Time object is changing and so the
out_ or in_subfmt may need to be coerced to the default '*' if that
``subfmt`` is no longer valid.
"""
try:
cls._select_subfmts(subfmt)
except ValueError:
subfmt = "*"
return subfmt
@property
def in_subfmt(self):
return self._in_subfmt
@in_subfmt.setter
def in_subfmt(self, subfmt):
# Validate subfmt value for this class, raises ValueError if not.
self._select_subfmts(subfmt)
self._in_subfmt = subfmt
@property
def out_subfmt(self):
return self._out_subfmt
@out_subfmt.setter
def out_subfmt(self, subfmt):
# Validate subfmt value for this class, raises ValueError if not.
self._select_subfmts(subfmt)
self._out_subfmt = subfmt
@property
def jd1(self):
return self._jd1
@jd1.setter
def jd1(self, jd1):
self._jd1 = _validate_jd_for_storage(jd1)
if self._jd2 is not None:
self._jd1, self._jd2 = _broadcast_writeable(self._jd1, self._jd2)
@property
def jd2(self):
return self._jd2
@jd2.setter
def jd2(self, jd2):
self._jd2 = _validate_jd_for_storage(jd2)
if self._jd1 is not None:
self._jd1, self._jd2 = _broadcast_writeable(self._jd1, self._jd2)
def __len__(self):
return len(self.jd1)
@property
def scale(self):
"""Time scale."""
self._scale = self._check_scale(self._scale)
return self._scale
@scale.setter
def scale(self, val):
self._scale = val
def mask_if_needed(self, value):
if self.masked:
value = np.ma.array(value, mask=self.mask, copy=False)
return value
@property
def mask(self):
if "mask" not in self.cache:
self.cache["mask"] = np.isnan(self.jd2)
if self.cache["mask"].shape:
self.cache["mask"].flags.writeable = False
return self.cache["mask"]
@property
def masked(self):
if "masked" not in self.cache:
self.cache["masked"] = bool(np.any(self.mask))
return self.cache["masked"]
@property
def jd2_filled(self):
return np.nan_to_num(self.jd2) if self.masked else self.jd2
@property
def precision(self):
return self._precision
@precision.setter
def precision(self, val):
# Verify precision is 0-9 (inclusive)
if not isinstance(val, int) or val < 0 or val > 9:
raise ValueError("precision attribute must be an int between 0 and 9")
self._precision = val
@lazyproperty
def cache(self):
"""
Return the cache associated with this instance.
"""
return defaultdict(dict)
def _check_val_type(self, val1, val2):
"""Input value validation, typically overridden by derived classes."""
# val1 cannot contain nan, but val2 can contain nan
isfinite1 = np.isfinite(val1)
if val1.size > 1: # Calling .all() on a scalar is surprisingly slow
isfinite1 = (
isfinite1.all()
) # Note: arr.all() about 3x faster than np.all(arr)
elif val1.size == 0:
isfinite1 = False
ok1 = (
val1.dtype.kind == "f"
and val1.dtype.itemsize >= 8
and isfinite1
or val1.size == 0
)
ok2 = (
val2 is None
or (
val2.dtype.kind == "f"
and val2.dtype.itemsize >= 8
and not np.any(np.isinf(val2))
)
or val2.size == 0
)
if not (ok1 and ok2):
raise TypeError(
f"Input values for {self.name} class must be finite doubles"
)
if getattr(val1, "unit", None) is not None:
# Convert any quantity-likes to days first, attempting to be
# careful with the conversion, so that, e.g., large numbers of
# seconds get converted without losing precision because
# 1/86400 is not exactly representable as a float.
val1 = u.Quantity(val1, copy=False)
if val2 is not None:
val2 = u.Quantity(val2, copy=False)
try:
val1, val2 = quantity_day_frac(val1, val2)
except u.UnitsError:
raise u.UnitConversionError(
"only quantities with time units can be "
"used to instantiate Time instances."
)
# We now have days, but the format may expect another unit.
# On purpose, multiply with 1./day_unit because typically it is
# 1./erfa.DAYSEC, and inverting it recovers the integer.
# (This conversion will get undone in format's set_jds, hence
# there may be room for optimizing this.)
factor = 1.0 / getattr(self, "unit", 1.0)
if factor != 1.0:
val1, carry = two_product(val1, factor)
carry += val2 * factor
val1, val2 = two_sum(val1, carry)
elif getattr(val2, "unit", None) is not None:
raise TypeError("Cannot mix float and Quantity inputs")
if val2 is None:
val2 = np.array(0, dtype=val1.dtype)
def asarray_or_scalar(val):
"""
Remove ndarray subclasses since for jd1/jd2 we want a pure ndarray
or a Python or numpy scalar.
"""
return np.asarray(val) if isinstance(val, np.ndarray) else val
return asarray_or_scalar(val1), asarray_or_scalar(val2)
def _check_scale(self, scale):
"""
Return a validated scale value.
If there is a class attribute 'scale' then that defines the default /
required time scale for this format. In this case if a scale value was
provided that needs to match the class default, otherwise return
the class default.
Otherwise just make sure that scale is in the allowed list of
scales. Provide a different error message if `None` (no value) was
supplied.
"""
if scale is None:
scale = self._default_scale
if scale not in TIME_SCALES:
raise ScaleValueError(
f"Scale value '{scale}' not in allowed values {TIME_SCALES}"
)
return scale
def set_jds(self, val1, val2):
"""
Set internal jd1 and jd2 from val1 and val2. Must be provided
by derived classes.
"""
raise NotImplementedError
def to_value(self, parent=None, out_subfmt=None):
"""
Return time representation from internal jd1 and jd2 in specified
``out_subfmt``.
This is the base method that ignores ``parent`` and uses the ``value``
property to compute the output. This is done by temporarily setting
``self.out_subfmt`` and calling ``self.value``. This is required for
legacy Format subclasses prior to astropy 4.0 New code should instead
implement the value functionality in ``to_value()`` and then make the
``value`` property be a simple call to ``self.to_value()``.
Parameters
----------
parent : object
Parent `~astropy.time.Time` object associated with this
`~astropy.time.TimeFormat` object
out_subfmt : str or None
Output subformt (use existing self.out_subfmt if `None`)
Returns
-------
value : numpy.array, numpy.ma.array
Array or masked array of formatted time representation values
"""
# Get value via ``value`` property, overriding out_subfmt temporarily if needed.
if out_subfmt is not None:
out_subfmt_orig = self.out_subfmt
try:
self.out_subfmt = out_subfmt
value = self.value
finally:
self.out_subfmt = out_subfmt_orig
else:
value = self.value
return self.mask_if_needed(value)
@property
def value(self):
raise NotImplementedError
@classmethod
def _select_subfmts(cls, pattern):
"""
Return a list of subformats where name matches ``pattern`` using
fnmatch.
If no subformat matches pattern then a ValueError is raised. A special
case is a format with no allowed subformats, i.e. subfmts=(), and
pattern='*'. This is OK and happens when this method is used for
validation of an out_subfmt.
"""
if not isinstance(pattern, str):
raise ValueError("subfmt attribute must be a string")
elif pattern == "*":
return cls.subfmts
subfmts = [x for x in cls.subfmts if fnmatch.fnmatchcase(x[0], pattern)]
if len(subfmts) == 0:
if len(cls.subfmts) == 0:
raise ValueError(f"subformat not allowed for format {cls.name}")
else:
subfmt_names = [x[0] for x in cls.subfmts]
raise ValueError(
f"subformat {pattern!r} must match one of "
f"{subfmt_names} for format {cls.name}"
)
return subfmts
class TimeNumeric(TimeFormat):
subfmts = (
("float", np.float64, None, np.add),
("long", np.longdouble, utils.longdouble_to_twoval, utils.twoval_to_longdouble),
("decimal", np.object_, utils.decimal_to_twoval, utils.twoval_to_decimal),
("str", np.str_, utils.decimal_to_twoval, utils.twoval_to_string),
("bytes", np.bytes_, utils.bytes_to_twoval, utils.twoval_to_bytes),
)
def _check_val_type(self, val1, val2):
"""Input value validation, typically overridden by derived classes."""
# Save original state of val2 because the super()._check_val_type below
# may change val2 from None to np.array(0). The value is saved in order
# to prevent a useless and slow call to np.result_type() below in the
# most common use-case of providing only val1.
orig_val2_is_none = val2 is None
if val1.dtype.kind == "f":
val1, val2 = super()._check_val_type(val1, val2)
elif not orig_val2_is_none or not (
val1.dtype.kind in "US"
or (
val1.dtype.kind == "O"
and all(isinstance(v, Decimal) for v in val1.flat)
)
):
raise TypeError(
f"for {self.name} class, input should be doubles, string, or Decimal, "
"and second values are only allowed for doubles."
)
val_dtype = (
val1.dtype if orig_val2_is_none else np.result_type(val1.dtype, val2.dtype)
)
subfmts = self._select_subfmts(self.in_subfmt)
for subfmt, dtype, convert, _ in subfmts:
if np.issubdtype(val_dtype, dtype):
break
else:
raise ValueError("input type not among selected sub-formats.")
if convert is not None:
try:
val1, val2 = convert(val1, val2)
except Exception:
raise TypeError(
f"for {self.name} class, input should be (long) doubles, string, "
"or Decimal, and second values are only allowed for "
"(long) doubles."
)
return val1, val2
def to_value(self, jd1=None, jd2=None, parent=None, out_subfmt=None):
"""
Return time representation from internal jd1 and jd2.
Subclasses that require ``parent`` or to adjust the jds should
override this method.
"""
# TODO: do this in __init_subclass__?
if self.__class__.value.fget is not self.__class__.to_value:
return self.value
if jd1 is None:
jd1 = self.jd1
if jd2 is None:
jd2 = self.jd2
if out_subfmt is None:
out_subfmt = self.out_subfmt
subfmt = self._select_subfmts(out_subfmt)[0]
kwargs = {}
if subfmt[0] in ("str", "bytes"):
unit = getattr(self, "unit", 1)
digits = int(np.ceil(np.log10(unit / np.finfo(float).eps)))
# TODO: allow a way to override the format.
kwargs["fmt"] = f".{digits}f"
value = subfmt[3](jd1, jd2, **kwargs)
return self.mask_if_needed(value)
value = property(to_value)
class TimeJD(TimeNumeric):
"""
Julian Date time format.
This represents the number of days since the beginning of
the Julian Period.
For example, 2451544.5 in JD is midnight on January 1, 2000.
"""
name = "jd"
def set_jds(self, val1, val2):
self._check_scale(self._scale) # Validate scale.
self.jd1, self.jd2 = day_frac(val1, val2)
class TimeMJD(TimeNumeric):
"""
Modified Julian Date time format.
This represents the number of days since midnight on November 17, 1858.
For example, 51544.0 in MJD is midnight on January 1, 2000.
"""
name = "mjd"
def set_jds(self, val1, val2):
self._check_scale(self._scale) # Validate scale.
jd1, jd2 = day_frac(val1, val2)
jd1 += erfa.DJM0 # erfa.DJM0=2400000.5 (from erfam.h).
self.jd1, self.jd2 = day_frac(jd1, jd2)
def to_value(self, **kwargs):
jd1 = self.jd1 - erfa.DJM0 # This cannot lose precision.
jd2 = self.jd2
return super().to_value(jd1=jd1, jd2=jd2, **kwargs)
value = property(to_value)
def _check_val_type_not_quantity(format_name, val1, val2):
# If val2 is a Quantity, the super() call that follows this check
# will raise a TypeError.
if hasattr(val1, "to") and getattr(val1, "unit", None) is not None:
raise ValueError(
f"cannot use Quantities for {format_name!r} format, as the unit of year "
"is defined as 365.25 days, while the length of year is variable "
"in this format. Use float instead."
)
class TimeDecimalYear(TimeNumeric):
"""
Time as a decimal year, with integer values corresponding to midnight
of the first day of each year.
For example 2000.5 corresponds to the ISO time '2000-07-02 00:00:00'.
Since for this format the length of the year varies between 365 and
366 days, it is not possible to use Quantity input, in which a year
is always 365.25 days.
"""
name = "decimalyear"
def _check_val_type(self, val1, val2):
_check_val_type_not_quantity(self.name, val1, val2)
# if val2 is a Quantity, super() will raise a TypeError.
return super()._check_val_type(val1, val2)
def set_jds(self, val1, val2):
self._check_scale(self._scale) # Validate scale.
sum12, err12 = two_sum(val1, val2)
iy_start = np.trunc(sum12).astype(int)
extra, y_frac = two_sum(sum12, -iy_start)
y_frac += extra + err12
val = (val1 + val2).astype(np.double)
iy_start = np.trunc(val).astype(int)
imon = np.ones_like(iy_start)
iday = np.ones_like(iy_start)
ihr = np.zeros_like(iy_start)
imin = np.zeros_like(iy_start)
isec = np.zeros_like(y_frac)
# Possible enhancement: use np.unique to only compute start, stop
# for unique values of iy_start.
scale = self.scale.upper().encode("ascii")
jd1_start, jd2_start = erfa.dtf2d(scale, iy_start, imon, iday, ihr, imin, isec)
jd1_end, jd2_end = erfa.dtf2d(scale, iy_start + 1, imon, iday, ihr, imin, isec)
t_start = Time(jd1_start, jd2_start, scale=self.scale, format="jd")
t_end = Time(jd1_end, jd2_end, scale=self.scale, format="jd")
t_frac = t_start + (t_end - t_start) * y_frac
self.jd1, self.jd2 = day_frac(t_frac.jd1, t_frac.jd2)
def to_value(self, **kwargs):
scale = self.scale.upper().encode("ascii")
iy_start, ims, ids, ihmsfs = erfa.d2dtf(
scale, 0, self.jd1, self.jd2_filled # precision=0
)
imon = np.ones_like(iy_start)
iday = np.ones_like(iy_start)
ihr = np.zeros_like(iy_start)
imin = np.zeros_like(iy_start)
isec = np.zeros_like(self.jd1)
# Possible enhancement: use np.unique to only compute start, stop
# for unique values of iy_start.
scale = self.scale.upper().encode("ascii")
jd1_start, jd2_start = erfa.dtf2d(scale, iy_start, imon, iday, ihr, imin, isec)
jd1_end, jd2_end = erfa.dtf2d(scale, iy_start + 1, imon, iday, ihr, imin, isec)
# Trying to be precise, but more than float64 not useful.
dt = (self.jd1 - jd1_start) + (self.jd2 - jd2_start)
dt_end = (jd1_end - jd1_start) + (jd2_end - jd2_start)
decimalyear = iy_start + dt / dt_end
return super().to_value(jd1=decimalyear, jd2=np.float64(0.0), **kwargs)
value = property(to_value)
class TimeFromEpoch(TimeNumeric):
"""
Base class for times that represent the interval from a particular
epoch as a numerical multiple of a unit time interval (e.g. seconds
or days).
"""
@classproperty(lazy=True)
def _epoch(cls):
# Ideally we would use `def epoch(cls)` here and not have the instance
# property below. However, this breaks the sphinx API docs generation
# in a way that was not resolved. See #10406 for details.
return Time(
cls.epoch_val,
cls.epoch_val2,
scale=cls.epoch_scale,
format=cls.epoch_format,
)
@property
def epoch(self):
"""Reference epoch time from which the time interval is measured."""
return self._epoch
def set_jds(self, val1, val2):
"""
Initialize the internal jd1 and jd2 attributes given val1 and val2.
For an TimeFromEpoch subclass like TimeUnix these will be floats giving
the effective seconds since an epoch time (e.g. 1970-01-01 00:00:00).
"""
# Form new JDs based on epoch time + time from epoch (converted to JD).
# One subtlety that might not be obvious is that 1.000 Julian days in
# UTC can be 86400 or 86401 seconds. For the TimeUnix format the
# assumption is that every day is exactly 86400 seconds, so this is, in
# principle, doing the math incorrectly, *except* that it matches the
# definition of Unix time which does not include leap seconds.
# note: use divisor=1./self.unit, since this is either 1 or 1/86400,
# and 1/86400 is not exactly representable as a float64, so multiplying
# by that will cause rounding errors. (But inverting it as a float64
# recovers the exact number)
day, frac = day_frac(val1, val2, divisor=1.0 / self.unit)
jd1 = self.epoch.jd1 + day
jd2 = self.epoch.jd2 + frac
# For the usual case that scale is the same as epoch_scale, we only need
# to ensure that abs(jd2) <= 0.5. Since abs(self.epoch.jd2) <= 0.5 and
# abs(frac) <= 0.5, we can do simple (fast) checks and arithmetic here
# without another call to day_frac(). Note also that `round(jd2.item())`
# is about 10x faster than `np.round(jd2)`` for a scalar.
if self.epoch.scale == self.scale:
jd1_extra = np.round(jd2) if jd2.shape else round(jd2.item())
jd1 += jd1_extra
jd2 -= jd1_extra
self.jd1, self.jd2 = jd1, jd2
return
# Create a temporary Time object corresponding to the new (jd1, jd2) in
# the epoch scale (e.g. UTC for TimeUnix) then convert that to the
# desired time scale for this object.
#
# A known limitation is that the transform from self.epoch_scale to
# self.scale cannot involve any metadata like lat or lon.
try:
tm = getattr(
Time(jd1, jd2, scale=self.epoch_scale, format="jd"), self.scale
)
except Exception as err:
raise ScaleValueError(
f"Cannot convert from '{self.name}' epoch scale '{self.epoch_scale}' "
f"to specified scale '{self.scale}', got error:\n{err}"
) from err
self.jd1, self.jd2 = day_frac(tm._time.jd1, tm._time.jd2)
def to_value(self, parent=None, **kwargs):
# Make sure that scale is the same as epoch scale so we can just
# subtract the epoch and convert
if self.scale != self.epoch_scale:
if parent is None:
raise ValueError("cannot compute value without parent Time object")
try:
tm = getattr(parent, self.epoch_scale)
except Exception as err:
raise ScaleValueError(
f"Cannot convert from '{self.name}' epoch scale "
f"'{self.epoch_scale}' to specified scale '{self.scale}', "
f"got error:\n{err}"
) from err
jd1, jd2 = tm._time.jd1, tm._time.jd2
else:
jd1, jd2 = self.jd1, self.jd2
# This factor is guaranteed to be exactly representable, which
# means time_from_epoch1 is calculated exactly.
factor = 1.0 / self.unit
time_from_epoch1 = (jd1 - self.epoch.jd1) * factor
time_from_epoch2 = (jd2 - self.epoch.jd2) * factor
return super().to_value(jd1=time_from_epoch1, jd2=time_from_epoch2, **kwargs)
value = property(to_value)
@property
def _default_scale(self):
return self.epoch_scale
class TimeUnix(TimeFromEpoch):
"""
Unix time (UTC): seconds from 1970-01-01 00:00:00 UTC, ignoring leap seconds.
For example, 946684800.0 in Unix time is midnight on January 1, 2000.
NOTE: this quantity is not exactly unix time and differs from the strict
POSIX definition by up to 1 second on days with a leap second. POSIX
unix time actually jumps backward by 1 second at midnight on leap second
days while this class value is monotonically increasing at 86400 seconds
per UTC day.
"""
name = "unix"
unit = 1.0 / erfa.DAYSEC # in days (1 day == 86400 seconds)
epoch_val = "1970-01-01 00:00:00"
epoch_val2 = None
epoch_scale = "utc"
epoch_format = "iso"
class TimeUnixTai(TimeUnix):
"""
Unix time (TAI): SI seconds elapsed since 1970-01-01 00:00:00 TAI (see caveats).
This will generally differ from standard (UTC) Unix time by the cumulative
integral number of leap seconds introduced into UTC since 1972-01-01 UTC
plus the initial offset of 10 seconds at that date.
This convention matches the definition of linux CLOCK_TAI
(https://www.cl.cam.ac.uk/~mgk25/posix-clocks.html),
and the Precision Time Protocol
(https://en.wikipedia.org/wiki/Precision_Time_Protocol), which
is also used by the White Rabbit protocol in High Energy Physics:
https://white-rabbit.web.cern.ch.
Caveats:
- Before 1972, fractional adjustments to UTC were made, so the difference
between ``unix`` and ``unix_tai`` time is no longer an integer.
- Because of the fractional adjustments, to be very precise, ``unix_tai``
is the number of seconds since ``1970-01-01 00:00:00 TAI`` or equivalently
``1969-12-31 23:59:51.999918 UTC``. The difference between TAI and UTC
at that epoch was 8.000082 sec.
- On the day of a positive leap second the difference between ``unix`` and
``unix_tai`` times increases linearly through the day by 1.0. See also the
documentation for the `~astropy.time.TimeUnix` class.
- Negative leap seconds are possible, though none have been needed to date.
Examples
--------
>>> # get the current offset between TAI and UTC
>>> from astropy.time import Time
>>> t = Time('2020-01-01', scale='utc')
>>> t.unix_tai - t.unix
37.0
>>> # Before 1972, the offset between TAI and UTC was not integer
>>> t = Time('1970-01-01', scale='utc')
>>> t.unix_tai - t.unix # doctest: +FLOAT_CMP
8.000082
>>> # Initial offset of 10 seconds in 1972
>>> t = Time('1972-01-01', scale='utc')
>>> t.unix_tai - t.unix
10.0
"""
name = "unix_tai"
epoch_val = "1970-01-01 00:00:00"
epoch_scale = "tai"
class TimeCxcSec(TimeFromEpoch):
"""
Chandra X-ray Center seconds from 1998-01-01 00:00:00 TT.
For example, 63072064.184 is midnight on January 1, 2000.
"""
name = "cxcsec"
unit = 1.0 / erfa.DAYSEC # in days (1 day == 86400 seconds)
epoch_val = "1998-01-01 00:00:00"
epoch_val2 = None
epoch_scale = "tt"
epoch_format = "iso"
class TimeGPS(TimeFromEpoch):
"""GPS time: seconds from 1980-01-06 00:00:00 UTC
For example, 630720013.0 is midnight on January 1, 2000.
Notes
-----
This implementation is strictly a representation of the number of seconds
(including leap seconds) since midnight UTC on 1980-01-06. GPS can also be
considered as a time scale which is ahead of TAI by a fixed offset
(to within about 100 nanoseconds).
For details, see https://www.usno.navy.mil/USNO/time/gps/usno-gps-time-transfer
"""
name = "gps"
unit = 1.0 / erfa.DAYSEC # in days (1 day == 86400 seconds)
epoch_val = "1980-01-06 00:00:19"
# above epoch is the same as Time('1980-01-06 00:00:00', scale='utc').tai
epoch_val2 = None
epoch_scale = "tai"
epoch_format = "iso"
class TimePlotDate(TimeFromEpoch):
"""
Matplotlib `~matplotlib.pyplot.plot_date` input:
1 + number of days from 0001-01-01 00:00:00 UTC.
This can be used directly in the matplotlib `~matplotlib.pyplot.plot_date`
function::
>>> import matplotlib.pyplot as plt
>>> jyear = np.linspace(2000, 2001, 20)
>>> t = Time(jyear, format='jyear', scale='utc')
>>> plt.plot_date(t.plot_date, jyear)
>>> plt.gcf().autofmt_xdate() # orient date labels at a slant
>>> plt.draw()
For example, 730120.0003703703 is midnight on January 1, 2000.
"""
# This corresponds to the zero reference time for matplotlib plot_date().
# Note that TAI and UTC are equivalent at the reference time.
name = "plot_date"
unit = 1.0
epoch_val = 1721424.5 # Time('0001-01-01 00:00:00', scale='tai').jd - 1
epoch_val2 = None
epoch_scale = "utc"
epoch_format = "jd"
@lazyproperty
def epoch(self):
"""Reference epoch time from which the time interval is measured."""
try:
# Matplotlib >= 3.3 has a get_epoch() function
from matplotlib.dates import get_epoch
except ImportError:
# If no get_epoch() then the epoch is '0001-01-01'
_epoch = self._epoch
else:
# Get the matplotlib date epoch as an ISOT string in UTC
epoch_utc = get_epoch()
from erfa import ErfaWarning
with warnings.catch_warnings():
# Catch possible dubious year warnings from erfa
warnings.filterwarnings("ignore", category=ErfaWarning)
_epoch = Time(epoch_utc, scale="utc", format="isot")
_epoch.format = "jd"
return _epoch
class TimeStardate(TimeFromEpoch):
"""
Stardate: date units from 2318-07-05 12:00:00 UTC.
For example, stardate 41153.7 is 00:52 on April 30, 2363.
See http://trekguide.com/Stardates.htm#TNG for calculations and reference points.
"""
name = "stardate"
unit = 0.397766856 # Stardate units per day
epoch_val = "2318-07-05 11:00:00" # Date and time of stardate 00000.00
epoch_val2 = None
epoch_scale = "tai"
epoch_format = "iso"
class TimeUnique(TimeFormat):
"""
Base class for time formats that can uniquely create a time object
without requiring an explicit format specifier. This class does
nothing but provide inheritance to identify a class as unique.
"""
class TimeAstropyTime(TimeUnique):
"""
Instantiate date from an Astropy Time object (or list thereof).
This is purely for instantiating from a Time object. The output
format is the same as the first time instance.
"""
name = "astropy_time"
def __new__(
cls, val1, val2, scale, precision, in_subfmt, out_subfmt, from_jd=False
):
"""
Use __new__ instead of __init__ to output a class instance that
is the same as the class of the first Time object in the list.
"""
val1_0 = val1.flat[0]
if not (
isinstance(val1_0, Time)
and all(type(val) is type(val1_0) for val in val1.flat)
):
raise TypeError(
f"Input values for {cls.name} class must all be the same "
"astropy Time type."
)
if scale is None:
scale = val1_0.scale
if val1.shape:
vals = [getattr(val, scale)._time for val in val1]
jd1 = np.concatenate([np.atleast_1d(val.jd1) for val in vals])
jd2 = np.concatenate([np.atleast_1d(val.jd2) for val in vals])
# Collect individual location values and merge into a single location.
if any(tm.location is not None for tm in val1):
if any(tm.location is None for tm in val1):
raise ValueError(
"cannot concatenate times unless all locations "
"are set or no locations are set"
)
locations = []
for tm in val1:
location = np.broadcast_to(
tm.location, tm._time.jd1.shape, subok=True
)
locations.append(np.atleast_1d(location))
location = np.concatenate(locations)
else:
location = None
else:
val = getattr(val1_0, scale)._time
jd1, jd2 = val.jd1, val.jd2
location = val1_0.location
OutTimeFormat = val1_0._time.__class__
self = OutTimeFormat(
jd1, jd2, scale, precision, in_subfmt, out_subfmt, from_jd=True
)
# Make a temporary hidden attribute to transfer location back to the
# parent Time object where it needs to live.
self._location = location
return self
class TimeDatetime(TimeUnique):
"""
Represent date as Python standard library `~datetime.datetime` object.
Example::
>>> from astropy.time import Time
>>> from datetime import datetime
>>> t = Time(datetime(2000, 1, 2, 12, 0, 0), scale='utc')
>>> t.iso
'2000-01-02 12:00:00.000'
>>> t.tt.datetime
datetime.datetime(2000, 1, 2, 12, 1, 4, 184000)
"""
name = "datetime"
def _check_val_type(self, val1, val2):
if not all(isinstance(val, datetime.datetime) for val in val1.flat):
raise TypeError(
f"Input values for {self.name} class must be datetime objects"
)
if val2 is not None:
raise ValueError(
f"{self.name} objects do not accept a val2 but you provided {val2}"
)
return val1, None
def set_jds(self, val1, val2):
"""Convert datetime object contained in val1 to jd1, jd2."""
# Iterate through the datetime objects, getting year, month, etc.
iterator = np.nditer(
[val1, None, None, None, None, None, None],
flags=["refs_ok", "zerosize_ok"],
op_dtypes=[None] + 5 * [np.intc] + [np.double],
)
for val, iy, im, id, ihr, imin, dsec in iterator:
dt = val.item()
if dt.tzinfo is not None:
dt = (dt - dt.utcoffset()).replace(tzinfo=None)
iy[...] = dt.year
im[...] = dt.month
id[...] = dt.day
ihr[...] = dt.hour
imin[...] = dt.minute
dsec[...] = dt.second + dt.microsecond / 1e6
jd1, jd2 = erfa.dtf2d(
self.scale.upper().encode("ascii"), *iterator.operands[1:]
)
self.jd1, self.jd2 = day_frac(jd1, jd2)
def to_value(
self, timezone=None, leap_second_strict="raise", parent=None, out_subfmt=None
):
"""
Convert to (potentially timezone-aware) `~datetime.datetime` object.
If ``timezone`` is not ``None``, return a timezone-aware datetime object.
Since the `~datetime.datetime` class does not natively handle leap seconds, the
behavior when converting a time within a leap second is controlled by the
``leap_second_strict`` argument. For example::
>>> from astropy.time import Time
>>> t = Time("2015-06-30 23:59:60.500")
>>> print(t.to_datetime(leap_second_strict='silent'))
2015-07-01 00:00:00.500000
Parameters
----------
timezone : {`~datetime.tzinfo`, None}, optional
If not `None`, return timezone-aware datetime.
leap_second_strict : str, optional
If ``raise`` (default), raise an exception if the time is within a leap
second. If ``warn`` then issue a warning. If ``silent`` then silently
handle the leap second.
Returns
-------
`~datetime.datetime`
If ``timezone`` is not ``None``, output will be timezone-aware.
"""
if out_subfmt is not None:
# Out_subfmt not allowed for this format, so raise the standard
# exception by trying to validate the value.
self._select_subfmts(out_subfmt)
if timezone is not None:
if self._scale != "utc":
raise ScaleValueError(
f"scale is {self._scale}, must be 'utc' when timezone is supplied."
)
# Rather than define a value property directly, we have a function,
# since we want to be able to pass in timezone information.
scale = self.scale.upper().encode("ascii")
iys, ims, ids, ihmsfs = erfa.d2dtf(
scale, 6, self.jd1, self.jd2_filled # 6 for microsec
)
ihrs = ihmsfs["h"]
imins = ihmsfs["m"]
isecs = ihmsfs["s"]
ifracs = ihmsfs["f"]
iterator = np.nditer(
[iys, ims, ids, ihrs, imins, isecs, ifracs, None],
flags=["refs_ok", "zerosize_ok"],
op_dtypes=7 * [None] + [object],
)
for iy, im, id, ihr, imin, isec, ifracsec, out in iterator:
if isec >= 60:
isec = isec - 1
in_leap_second = True
else:
in_leap_second = False
if timezone is not None:
dt = datetime.datetime(
iy, im, id, ihr, imin, isec, ifracsec, tzinfo=TimezoneInfo()
).astimezone(timezone)
else:
dt = datetime.datetime(iy, im, id, ihr, imin, isec, ifracsec)
if in_leap_second:
dt += datetime.timedelta(seconds=1)
msg = (
f"Time {dt} is within a leap second but `datetime` does not "
"support leap seconds. Use the `leap_second_strict` argument "
"of the `Time.to_datetime()` method with value of 'raise', 'warn', "
"or 'silent' to control how leap seconds are handled."
)
if leap_second_strict == "raise":
raise ValueError(msg)
elif leap_second_strict == "warn":
warnings.warn(msg, AstropyDatetimeLeapSecondWarning)
elif leap_second_strict != "silent":
raise ValueError(
f"leap_second_strict must be 'raise', 'warn', or 'silent', "
f"not '{leap_second_strict}'"
)
out[...] = dt
return self.mask_if_needed(iterator.operands[-1])
value = property(to_value)
class TimeYMDHMS(TimeUnique):
"""
ymdhms: A Time format to represent Time as year, month, day, hour,
minute, second (thus the name ymdhms).
Acceptable inputs must have keys or column names in the "YMDHMS" set of
``year``, ``month``, ``day`` ``hour``, ``minute``, ``second``:
- Dict with keys in the YMDHMS set
- NumPy structured array, record array or astropy Table, or single row
of those types, with column names in the YMDHMS set
One can supply a subset of the YMDHMS values, for instance only 'year',
'month', and 'day'. Inputs have the following defaults::
'month': 1, 'day': 1, 'hour': 0, 'minute': 0, 'second': 0
When the input is supplied as a ``dict`` then each value can be either a
scalar value or an array. The values will be broadcast to a common shape.
Example::
>>> from astropy.time import Time
>>> t = Time({'year': 2015, 'month': 2, 'day': 3,
... 'hour': 12, 'minute': 13, 'second': 14.567},
... scale='utc')
>>> t.iso
'2015-02-03 12:13:14.567'
>>> t.ymdhms.year
2015
"""
name = "ymdhms"
def _check_val_type(self, val1, val2):
"""
This checks inputs for the YMDHMS format.
It is bit more complex than most format checkers because of the flexible
input that is allowed. Also, it actually coerces ``val1`` into an appropriate
dict of ndarrays that can be used easily by ``set_jds()``. This is useful
because it makes it easy to get default values in that routine.
Parameters
----------
val1 : ndarray or None
val2 : ndarray or None
Returns
-------
val1_as_dict, val2 : val1 as dict or None, val2 is always None
"""
if val2 is not None:
raise ValueError("val2 must be None for ymdhms format")
ymdhms = ["year", "month", "day", "hour", "minute", "second"]
if val1.dtype.names:
# Convert to a dict of ndarray
val1_as_dict = {name: val1[name] for name in val1.dtype.names}
elif val1.shape == (0,):
# Input was empty list [], so set to None and set_jds will handle this
return None, None
elif (
val1.dtype.kind == "O"
and val1.shape == ()
and isinstance(val1.item(), dict)
):
# Code gets here for input as a dict. The dict input
# can be either scalar values or N-d arrays.
# Extract the item (which is a dict) and broadcast values to the
# same shape here.
names = val1.item().keys()
values = val1.item().values()
val1_as_dict = {
name: value for name, value in zip(names, np.broadcast_arrays(*values))
}
else:
raise ValueError("input must be dict or table-like")
# Check that the key names now are good.
names = val1_as_dict.keys()
required_names = ymdhms[: len(names)]
def comma_repr(vals):
return ", ".join(repr(val) for val in vals)
bad_names = set(names) - set(ymdhms)
if bad_names:
raise ValueError(
f"{comma_repr(bad_names)} not allowed as YMDHMS key name(s)"
)
if set(names) != set(required_names):
raise ValueError(
f"for {len(names)} input key names "
f"you must supply {comma_repr(required_names)}"
)
return val1_as_dict, val2
def set_jds(self, val1, val2):
if val1 is None:
# Input was empty list []
jd1 = np.array([], dtype=np.float64)
jd2 = np.array([], dtype=np.float64)
else:
jd1, jd2 = erfa.dtf2d(
self.scale.upper().encode("ascii"),
val1["year"],
val1.get("month", 1),
val1.get("day", 1),
val1.get("hour", 0),
val1.get("minute", 0),
val1.get("second", 0),
)
self.jd1, self.jd2 = day_frac(jd1, jd2)
@property
def value(self):
scale = self.scale.upper().encode("ascii")
iys, ims, ids, ihmsfs = erfa.d2dtf(scale, 9, self.jd1, self.jd2_filled)
out = np.empty(
self.jd1.shape,
dtype=[
("year", "i4"),
("month", "i4"),
("day", "i4"),
("hour", "i4"),
("minute", "i4"),
("second", "f8"),
],
)
out["year"] = iys
out["month"] = ims
out["day"] = ids
out["hour"] = ihmsfs["h"]
out["minute"] = ihmsfs["m"]
out["second"] = ihmsfs["s"] + ihmsfs["f"] * 10 ** (-9)
out = out.view(np.recarray)
return self.mask_if_needed(out)
class TimezoneInfo(datetime.tzinfo):
"""
Subclass of the `~datetime.tzinfo` object, used in the
to_datetime method to specify timezones.
It may be safer in most cases to use a timezone database package like
pytz rather than defining your own timezones - this class is mainly
a workaround for users without pytz.
"""
@u.quantity_input(utc_offset=u.day, dst=u.day)
def __init__(self, utc_offset=0 * u.day, dst=0 * u.day, tzname=None):
"""
Parameters
----------
utc_offset : `~astropy.units.Quantity`, optional
Offset from UTC in days. Defaults to zero.
dst : `~astropy.units.Quantity`, optional
Daylight Savings Time offset in days. Defaults to zero
(no daylight savings).
tzname : str or None, optional
Name of timezone
Examples
--------
>>> from datetime import datetime
>>> from astropy.time import TimezoneInfo # Specifies a timezone
>>> import astropy.units as u
>>> utc = TimezoneInfo() # Defaults to UTC
>>> utc_plus_one_hour = TimezoneInfo(utc_offset=1*u.hour) # UTC+1
>>> dt_aware = datetime(2000, 1, 1, 0, 0, 0, tzinfo=utc_plus_one_hour)
>>> print(dt_aware)
2000-01-01 00:00:00+01:00
>>> print(dt_aware.astimezone(utc))
1999-12-31 23:00:00+00:00
"""
if utc_offset == 0 and dst == 0 and tzname is None:
tzname = "UTC"
self._utcoffset = datetime.timedelta(utc_offset.to_value(u.day))
self._tzname = tzname
self._dst = datetime.timedelta(dst.to_value(u.day))
def utcoffset(self, dt):
return self._utcoffset
def tzname(self, dt):
return str(self._tzname)
def dst(self, dt):
return self._dst
class TimeString(TimeUnique):
"""
Base class for string-like time representations.
This class assumes that anything following the last decimal point to the
right is a fraction of a second.
**Fast C-based parser**
Time format classes can take advantage of a fast C-based parser if the times
are represented as fixed-format strings with year, month, day-of-month,
hour, minute, second, OR year, day-of-year, hour, minute, second. This can
be a factor of 20 or more faster than the pure Python parser.
Fixed format means that the components always have the same number of
characters. The Python parser will accept ``2001-9-2`` as a date, but the C
parser would require ``2001-09-02``.
A subclass in this case must define a class attribute ``fast_parser_pars``
which is a `dict` with all of the keys below. An inherited attribute is not
checked, only an attribute in the class ``__dict__``.
- ``delims`` (tuple of int): ASCII code for character at corresponding
``starts`` position (0 => no character)
- ``starts`` (tuple of int): position where component starts (including
delimiter if present). Use -1 for the month component for format that use
day of year.
- ``stops`` (tuple of int): position where component ends. Use -1 to
continue to end of string, or for the month component for formats that use
day of year.
- ``break_allowed`` (tuple of int): if true (1) then the time string can
legally end just before the corresponding component (e.g. "2000-01-01"
is a valid time but "2000-01-01 12" is not).
- ``has_day_of_year`` (int): 0 if dates have year, month, day; 1 if year,
day-of-year
"""
def __init_subclass__(cls, **kwargs):
if "fast_parser_pars" in cls.__dict__:
fpp = cls.fast_parser_pars
fpp = np.array(
list(
zip(
map(chr, fpp["delims"]),
fpp["starts"],
fpp["stops"],
fpp["break_allowed"],
)
),
_parse_times.dt_pars,
)
if cls.fast_parser_pars["has_day_of_year"]:
fpp["start"][1] = fpp["stop"][1] = -1
cls._fast_parser = _parse_times.create_parser(fpp)
super().__init_subclass__(**kwargs)
def _check_val_type(self, val1, val2):
if val1.dtype.kind not in ("S", "U") and val1.size:
raise TypeError(f"Input values for {self.name} class must be strings")
if val2 is not None:
raise ValueError(
f"{self.name} objects do not accept a val2 but you provided {val2}"
)
return val1, None
def parse_string(self, timestr, subfmts):
"""Read time from a single string, using a set of possible formats."""
# Datetime components required for conversion to JD by ERFA, along
# with the default values.
components = ("year", "mon", "mday", "hour", "min", "sec")
defaults = (None, 1, 1, 0, 0, 0)
# Assume that anything following "." on the right side is a
# floating fraction of a second.
try:
idot = timestr.rindex(".")
except Exception:
timestr_has_fractional_digits = False
else:
timestr, fracsec = timestr[:idot], timestr[idot:]
fracsec = float(fracsec)
timestr_has_fractional_digits = True
for _, strptime_fmt_or_regex, _ in subfmts:
if isinstance(strptime_fmt_or_regex, str):
subfmt_has_sec = "%S" in strptime_fmt_or_regex
try:
tm = time.strptime(timestr, strptime_fmt_or_regex)
except ValueError:
continue
else:
vals = [getattr(tm, "tm_" + component) for component in components]
else:
tm = re.match(strptime_fmt_or_regex, timestr)
if tm is None:
continue
tm = tm.groupdict()
vals = [
int(tm.get(component, default))
for component, default in zip(components, defaults)
]
subfmt_has_sec = "sec" in tm
# Add fractional seconds if they were in the original time string
# and the subformat has seconds. A time like "2022-08-01.123" will
# never pass this for a format like ISO and will raise a parsing
# exception.
if timestr_has_fractional_digits:
if subfmt_has_sec:
vals[-1] = vals[-1] + fracsec
else:
continue
return vals
else:
raise ValueError(f"Time {timestr} does not match {self.name} format")
def set_jds(self, val1, val2):
"""Parse the time strings contained in val1 and set jd1, jd2."""
# If specific input subformat is required then use the Python parser.
# Also do this if Time format class does not define `use_fast_parser` or
# if the fast parser is entirely disabled. Note that `use_fast_parser`
# is ignored for format classes that don't have a fast parser.
if (
self.in_subfmt != "*"
or "_fast_parser" not in self.__class__.__dict__
or conf.use_fast_parser == "False"
):
jd1, jd2 = self.get_jds_python(val1, val2)
else:
try:
jd1, jd2 = self.get_jds_fast(val1, val2)
except Exception:
# Fall through to the Python parser unless fast is forced.
if conf.use_fast_parser == "force":
raise
else:
jd1, jd2 = self.get_jds_python(val1, val2)
self.jd1 = jd1
self.jd2 = jd2
def get_jds_python(self, val1, val2):
"""Parse the time strings contained in val1 and get jd1, jd2."""
# Select subformats based on current self.in_subfmt
subfmts = self._select_subfmts(self.in_subfmt)
# Be liberal in what we accept: convert bytes to ascii.
# Here .item() is needed for arrays with entries of unequal length,
# to strip trailing 0 bytes.
to_string = (
str if val1.dtype.kind == "U" else lambda x: str(x.item(), encoding="ascii")
)
iterator = np.nditer(
[val1, None, None, None, None, None, None],
flags=["zerosize_ok"],
op_dtypes=[None] + 5 * [np.intc] + [np.double],
)
for val, iy, im, id, ihr, imin, dsec in iterator:
val = to_string(val)
(
iy[...],
im[...],
id[...],
ihr[...],
imin[...],
dsec[...],
) = self.parse_string(val, subfmts)
jd1, jd2 = erfa.dtf2d(
self.scale.upper().encode("ascii"), *iterator.operands[1:]
)
jd1, jd2 = day_frac(jd1, jd2)
return jd1, jd2
def get_jds_fast(self, val1, val2):
"""Use fast C parser to parse time strings in val1 and get jd1, jd2."""
# Handle bytes or str input and convert to uint8. We need to the
# dtype _parse_times.dt_u1 instead of uint8, since otherwise it is
# not possible to create a gufunc with structured dtype output.
# See note about ufunc type resolver in pyerfa/erfa/ufunc.c.templ.
if val1.dtype.kind == "U":
# Note: val1.astype('S') is *very* slow, so we check ourselves
# that the input is pure ASCII.
val1_uint32 = val1.view((np.uint32, val1.dtype.itemsize // 4))
if np.any(val1_uint32 > 127):
raise ValueError("input is not pure ASCII")
# It might be possible to avoid making a copy via astype with
# cleverness in parse_times.c but leave that for another day.
chars = val1_uint32.astype(_parse_times.dt_u1)
else:
chars = val1.view((_parse_times.dt_u1, val1.dtype.itemsize))
# Call the fast parsing ufunc.
time_struct = self._fast_parser(chars)
jd1, jd2 = erfa.dtf2d(
self.scale.upper().encode("ascii"),
time_struct["year"],
time_struct["month"],
time_struct["day"],
time_struct["hour"],
time_struct["minute"],
time_struct["second"],
)
return day_frac(jd1, jd2)
def str_kwargs(self):
"""
Generator that yields a dict of values corresponding to the
calendar date and time for the internal JD values.
"""
scale = (self.scale.upper().encode("ascii"),)
iys, ims, ids, ihmsfs = erfa.d2dtf(
scale, self.precision, self.jd1, self.jd2_filled
)
# Get the str_fmt element of the first allowed output subformat
_, _, str_fmt = self._select_subfmts(self.out_subfmt)[0]
yday = None
has_yday = "{yday:" in str_fmt
ihrs = ihmsfs["h"]
imins = ihmsfs["m"]
isecs = ihmsfs["s"]
ifracs = ihmsfs["f"]
for iy, im, id, ihr, imin, isec, ifracsec in np.nditer(
[iys, ims, ids, ihrs, imins, isecs, ifracs], flags=["zerosize_ok"]
):
if has_yday:
yday = datetime.datetime(iy, im, id).timetuple().tm_yday
yield {
"year": int(iy),
"mon": int(im),
"day": int(id),
"hour": int(ihr),
"min": int(imin),
"sec": int(isec),
"fracsec": int(ifracsec),
"yday": yday,
}
def format_string(self, str_fmt, **kwargs):
"""Write time to a string using a given format.
By default, just interprets str_fmt as a format string,
but subclasses can add to this.
"""
return str_fmt.format(**kwargs)
@property
def value(self):
# Select the first available subformat based on current
# self.out_subfmt
subfmts = self._select_subfmts(self.out_subfmt)
_, _, str_fmt = subfmts[0]
# TODO: fix this ugly hack
if self.precision > 0 and str_fmt.endswith("{sec:02d}"):
str_fmt += ".{fracsec:0" + str(self.precision) + "d}"
# Try to optimize this later. Can't pre-allocate because length of
# output could change, e.g. year rolls from 999 to 1000.
outs = []
for kwargs in self.str_kwargs():
outs.append(str(self.format_string(str_fmt, **kwargs)))
return np.array(outs).reshape(self.jd1.shape)
class TimeISO(TimeString):
"""
ISO 8601 compliant date-time format "YYYY-MM-DD HH:MM:SS.sss...".
For example, 2000-01-01 00:00:00.000 is midnight on January 1, 2000.
The allowed subformats are:
- 'date_hms': date + hours, mins, secs (and optional fractional secs)
- 'date_hm': date + hours, mins
- 'date': date
"""
name = "iso"
subfmts = (
(
"date_hms",
"%Y-%m-%d %H:%M:%S",
# XXX To Do - use strftime for output ??
"{year:d}-{mon:02d}-{day:02d} {hour:02d}:{min:02d}:{sec:02d}",
),
(
"date_hm",
"%Y-%m-%d %H:%M",
"{year:d}-{mon:02d}-{day:02d} {hour:02d}:{min:02d}",
),
("date", "%Y-%m-%d", "{year:d}-{mon:02d}-{day:02d}"),
)
# Define positions and starting delimiter for year, month, day, hour,
# minute, seconds components of an ISO time. This is used by the fast
# C-parser parse_ymdhms_times()
#
# "2000-01-12 13:14:15.678"
# 01234567890123456789012
# yyyy-mm-dd hh:mm:ss.fff
# Parsed as ('yyyy', '-mm', '-dd', ' hh', ':mm', ':ss', '.fff')
fast_parser_pars = dict(
delims=(0, ord("-"), ord("-"), ord(" "), ord(":"), ord(":"), ord(".")),
starts=(0, 4, 7, 10, 13, 16, 19),
stops=(3, 6, 9, 12, 15, 18, -1),
# Break allowed *before*
# y m d h m s f
break_allowed=(0, 0, 0, 1, 0, 1, 1),
has_day_of_year=0,
)
def parse_string(self, timestr, subfmts):
# Handle trailing 'Z' for UTC time
if timestr.endswith("Z"):
if self.scale != "utc":
raise ValueError("Time input terminating in 'Z' must have scale='UTC'")
timestr = timestr[:-1]
return super().parse_string(timestr, subfmts)
class TimeISOT(TimeISO):
"""
ISO 8601 compliant date-time format "YYYY-MM-DDTHH:MM:SS.sss...".
This is the same as TimeISO except for a "T" instead of space between
the date and time.
For example, 2000-01-01T00:00:00.000 is midnight on January 1, 2000.
The allowed subformats are:
- 'date_hms': date + hours, mins, secs (and optional fractional secs)
- 'date_hm': date + hours, mins
- 'date': date
"""
name = "isot"
subfmts = (
(
"date_hms",
"%Y-%m-%dT%H:%M:%S",
"{year:d}-{mon:02d}-{day:02d}T{hour:02d}:{min:02d}:{sec:02d}",
),
(
"date_hm",
"%Y-%m-%dT%H:%M",
"{year:d}-{mon:02d}-{day:02d}T{hour:02d}:{min:02d}",
),
("date", "%Y-%m-%d", "{year:d}-{mon:02d}-{day:02d}"),
)
# See TimeISO for explanation
fast_parser_pars = dict(
delims=(0, ord("-"), ord("-"), ord("T"), ord(":"), ord(":"), ord(".")),
starts=(0, 4, 7, 10, 13, 16, 19),
stops=(3, 6, 9, 12, 15, 18, -1),
# Break allowed *before*
# y m d h m s f
break_allowed=(0, 0, 0, 1, 0, 1, 1),
has_day_of_year=0,
)
class TimeYearDayTime(TimeISO):
"""
Year, day-of-year and time as "YYYY:DOY:HH:MM:SS.sss...".
The day-of-year (DOY) goes from 001 to 365 (366 in leap years).
For example, 2000:001:00:00:00.000 is midnight on January 1, 2000.
The allowed subformats are:
- 'date_hms': date + hours, mins, secs (and optional fractional secs)
- 'date_hm': date + hours, mins
- 'date': date
"""
name = "yday"
subfmts = (
(
"date_hms",
"%Y:%j:%H:%M:%S",
"{year:d}:{yday:03d}:{hour:02d}:{min:02d}:{sec:02d}",
),
("date_hm", "%Y:%j:%H:%M", "{year:d}:{yday:03d}:{hour:02d}:{min:02d}"),
("date", "%Y:%j", "{year:d}:{yday:03d}"),
)
# Define positions and starting delimiter for year, month, day, hour,
# minute, seconds components of an ISO time. This is used by the fast
# C-parser parse_ymdhms_times()
#
# "2000:123:13:14:15.678"
# 012345678901234567890
# yyyy:ddd:hh:mm:ss.fff
# Parsed as ('yyyy', ':ddd', ':hh', ':mm', ':ss', '.fff')
#
# delims: character at corresponding `starts` position (0 => no character)
# starts: position where component starts (including delimiter if present)
# stops: position where component ends (-1 => continue to end of string)
fast_parser_pars = dict(
delims=(0, 0, ord(":"), ord(":"), ord(":"), ord(":"), ord(".")),
starts=(0, -1, 4, 8, 11, 14, 17),
stops=(3, -1, 7, 10, 13, 16, -1),
# Break allowed before:
# y m d h m s f
break_allowed=(0, 0, 0, 1, 0, 1, 1),
has_day_of_year=1,
)
class TimeDatetime64(TimeISOT):
name = "datetime64"
def _check_val_type(self, val1, val2):
if not val1.dtype.kind == "M":
if val1.size > 0:
raise TypeError(
f"Input values for {self.name} class must be datetime64 objects"
)
else:
val1 = np.array([], "datetime64[D]")
if val2 is not None:
raise ValueError(
f"{self.name} objects do not accept a val2 but you provided {val2}"
)
return val1, None
def set_jds(self, val1, val2):
# If there are any masked values in the ``val1`` datetime64 array
# ('NaT') then stub them with a valid date so downstream parse_string
# will work. The value under the mask is arbitrary but a "modern" date
# is good.
mask = np.isnat(val1)
masked = np.any(mask)
if masked:
val1 = val1.copy()
val1[mask] = "2000"
# Make sure M(onth) and Y(ear) dates will parse and convert to bytestring
if val1.dtype.name in ["datetime64[M]", "datetime64[Y]"]:
val1 = val1.astype("datetime64[D]")
val1 = val1.astype("S")
# Standard ISO string parsing now
super().set_jds(val1, val2)
# Finally apply mask if necessary
if masked:
self.jd2[mask] = np.nan
@property
def value(self):
precision = self.precision
self.precision = 9
ret = super().value
self.precision = precision
return ret.astype("datetime64")
class TimeFITS(TimeString):
"""
FITS format: "[±Y]YYYY-MM-DD[THH:MM:SS[.sss]]".
ISOT but can give signed five-digit year (mostly for negative years);
The allowed subformats are:
- 'date_hms': date + hours, mins, secs (and optional fractional secs)
- 'date': date
- 'longdate_hms': as 'date_hms', but with signed 5-digit year
- 'longdate': as 'date', but with signed 5-digit year
See Rots et al., 2015, A&A 574:A36 (arXiv:1409.7583).
"""
name = "fits"
subfmts = (
(
"date_hms",
(
r"(?P<year>\d{4})-(?P<mon>\d\d)-(?P<mday>\d\d)T"
r"(?P<hour>\d\d):(?P<min>\d\d):(?P<sec>\d\d(\.\d*)?)"
),
"{year:04d}-{mon:02d}-{day:02d}T{hour:02d}:{min:02d}:{sec:02d}",
),
(
"date",
r"(?P<year>\d{4})-(?P<mon>\d\d)-(?P<mday>\d\d)",
"{year:04d}-{mon:02d}-{day:02d}",
),
(
"longdate_hms",
(
r"(?P<year>[+-]\d{5})-(?P<mon>\d\d)-(?P<mday>\d\d)T"
r"(?P<hour>\d\d):(?P<min>\d\d):(?P<sec>\d\d(\.\d*)?)"
),
"{year:+06d}-{mon:02d}-{day:02d}T{hour:02d}:{min:02d}:{sec:02d}",
),
(
"longdate",
r"(?P<year>[+-]\d{5})-(?P<mon>\d\d)-(?P<mday>\d\d)",
"{year:+06d}-{mon:02d}-{day:02d}",
),
)
# Add the regex that parses the scale and possible realization.
# Support for this is deprecated. Read old style but no longer write
# in this style.
subfmts = tuple(
(
subfmt[0],
subfmt[1] + r"(\((?P<scale>\w+)(\((?P<realization>\w+)\))?\))?",
subfmt[2],
)
for subfmt in subfmts
)
def parse_string(self, timestr, subfmts):
"""Read time and deprecated scale if present."""
# Try parsing with any of the allowed sub-formats.
for _, regex, _ in subfmts:
tm = re.match(regex, timestr)
if tm:
break
else:
raise ValueError(f"Time {timestr} does not match {self.name} format")
tm = tm.groupdict()
# Scale and realization are deprecated and strings in this form
# are no longer created. We issue a warning but still use the value.
if tm["scale"] is not None:
warnings.warn(
"FITS time strings should no longer have embedded time scale.",
AstropyDeprecationWarning,
)
# If a scale was given, translate from a possible deprecated
# timescale identifier to the scale used by Time.
fits_scale = tm["scale"].upper()
scale = FITS_DEPRECATED_SCALES.get(fits_scale, fits_scale.lower())
if scale not in TIME_SCALES:
raise ValueError(
f"Scale {scale!r} is not in the allowed scales "
f"{sorted(TIME_SCALES)}"
)
# If no scale was given in the initialiser, set the scale to
# that given in the string. Realization is ignored
# and is only supported to allow old-style strings to be
# parsed.
if self._scale is None:
self._scale = scale
if scale != self.scale:
raise ValueError(
f"Input strings for {self.name} class must all "
"have consistent time scales."
)
return [
int(tm["year"]),
int(tm["mon"]),
int(tm["mday"]),
int(tm.get("hour", 0)),
int(tm.get("min", 0)),
float(tm.get("sec", 0.0)),
]
@property
def value(self):
"""Convert times to strings, using signed 5 digit if necessary."""
if "long" not in self.out_subfmt:
# If we have times before year 0 or after year 9999, we can
# output only in a "long" format, using signed 5-digit years.
jd = self.jd1 + self.jd2
if jd.size and (jd.min() < 1721425.5 or jd.max() >= 5373484.5):
self.out_subfmt = "long" + self.out_subfmt
return super().value
class TimeEpochDate(TimeNumeric):
"""
Base class for support of Besselian and Julian epoch dates.
"""
_default_scale = "tt" # As of astropy 3.2, this is no longer 'utc'.
def set_jds(self, val1, val2):
self._check_scale(self._scale) # validate scale.
epoch_to_jd = getattr(erfa, self.epoch_to_jd)
jd1, jd2 = epoch_to_jd(val1 + val2)
self.jd1, self.jd2 = day_frac(jd1, jd2)
def to_value(self, **kwargs):
jd_to_epoch = getattr(erfa, self.jd_to_epoch)
value = jd_to_epoch(self.jd1, self.jd2)
return super().to_value(jd1=value, jd2=np.float64(0.0), **kwargs)
value = property(to_value)
class TimeBesselianEpoch(TimeEpochDate):
"""Besselian Epoch year as value(s) like 1950.0.
Since for this format the length of the year varies, input needs to
be floating point; it is not possible to use Quantity input, for
which a year always equals 365.25 days.
"""
name = "byear"
epoch_to_jd = "epb2jd"
jd_to_epoch = "epb"
def _check_val_type(self, val1, val2):
_check_val_type_not_quantity(self.name, val1, val2)
# FIXME: is val2 really okay here?
return super()._check_val_type(val1, val2)
class TimeJulianEpoch(TimeEpochDate):
"""Julian Epoch year as value(s) like 2000.0."""
name = "jyear"
unit = erfa.DJY # 365.25, the Julian year, for conversion to quantities
epoch_to_jd = "epj2jd"
jd_to_epoch = "epj"
class TimeEpochDateString(TimeString):
"""
Base class to support string Besselian and Julian epoch dates
such as 'B1950.0' or 'J2000.0' respectively.
"""
_default_scale = "tt" # As of astropy 3.2, this is no longer 'utc'.
def set_jds(self, val1, val2):
epoch_prefix = self.epoch_prefix
# Be liberal in what we accept: convert bytes to ascii.
to_string = (
str if val1.dtype.kind == "U" else lambda x: str(x.item(), encoding="ascii")
)
iterator = np.nditer(
[val1, None], op_dtypes=[val1.dtype, np.double], flags=["zerosize_ok"]
)
for val, years in iterator:
try:
time_str = to_string(val)
epoch_type, year_str = time_str[0], time_str[1:]
year = float(year_str)
if epoch_type.upper() != epoch_prefix:
raise ValueError
except (IndexError, ValueError, UnicodeEncodeError):
raise ValueError(f"Time {val} does not match {self.name} format")
else:
years[...] = year
self._check_scale(self._scale) # validate scale.
epoch_to_jd = getattr(erfa, self.epoch_to_jd)
jd1, jd2 = epoch_to_jd(iterator.operands[-1])
self.jd1, self.jd2 = day_frac(jd1, jd2)
@property
def value(self):
jd_to_epoch = getattr(erfa, self.jd_to_epoch)
years = jd_to_epoch(self.jd1, self.jd2)
# Use old-style format since it is a factor of 2 faster
str_fmt = self.epoch_prefix + "%." + str(self.precision) + "f"
outs = [str_fmt % year for year in years.flat]
return np.array(outs).reshape(self.jd1.shape)
class TimeBesselianEpochString(TimeEpochDateString):
"""Besselian Epoch year as string value(s) like 'B1950.0'."""
name = "byear_str"
epoch_to_jd = "epb2jd"
jd_to_epoch = "epb"
epoch_prefix = "B"
class TimeJulianEpochString(TimeEpochDateString):
"""Julian Epoch year as string value(s) like 'J2000.0'."""
name = "jyear_str"
epoch_to_jd = "epj2jd"
jd_to_epoch = "epj"
epoch_prefix = "J"
class TimeDeltaFormat(TimeFormat):
"""Base class for time delta representations."""
_registry = TIME_DELTA_FORMATS
def _check_scale(self, scale):
"""
Check that the scale is in the allowed list of scales, or is `None`.
"""
if scale is not None and scale not in TIME_DELTA_SCALES:
raise ScaleValueError(
f"Scale value '{scale}' not in allowed values {TIME_DELTA_SCALES}"
)
return scale
class TimeDeltaNumeric(TimeDeltaFormat, TimeNumeric):
def set_jds(self, val1, val2):
self._check_scale(self._scale) # Validate scale.
self.jd1, self.jd2 = day_frac(val1, val2, divisor=1.0 / self.unit)
def to_value(self, **kwargs):
# Note that 1/unit is always exactly representable, so the
# following multiplications are exact.
factor = 1.0 / self.unit
jd1 = self.jd1 * factor
jd2 = self.jd2 * factor
return super().to_value(jd1=jd1, jd2=jd2, **kwargs)
value = property(to_value)
class TimeDeltaSec(TimeDeltaNumeric):
"""Time delta in SI seconds."""
name = "sec"
unit = 1.0 / erfa.DAYSEC # for quantity input
class TimeDeltaJD(TimeDeltaNumeric):
"""Time delta in Julian days (86400 SI seconds)."""
name = "jd"
unit = 1.0
class TimeDeltaDatetime(TimeDeltaFormat, TimeUnique):
"""Time delta in datetime.timedelta."""
name = "datetime"
def _check_val_type(self, val1, val2):
if not all(isinstance(val, datetime.timedelta) for val in val1.flat):
raise TypeError(
f"Input values for {self.name} class must be datetime.timedelta objects"
)
if val2 is not None:
raise ValueError(
f"{self.name} objects do not accept a val2 but you provided {val2}"
)
return val1, None
def set_jds(self, val1, val2):
self._check_scale(self._scale) # Validate scale.
iterator = np.nditer(
[val1, None, None],
flags=["refs_ok", "zerosize_ok"],
op_dtypes=[None, np.double, np.double],
)
day = datetime.timedelta(days=1)
for val, jd1, jd2 in iterator:
jd1[...], other = divmod(val.item(), day)
jd2[...] = other / day
self.jd1, self.jd2 = day_frac(iterator.operands[-2], iterator.operands[-1])
@property
def value(self):
iterator = np.nditer(
[self.jd1, self.jd2, None],
flags=["refs_ok", "zerosize_ok"],
op_dtypes=[None, None, object],
)
for jd1, jd2, out in iterator:
jd1_, jd2_ = day_frac(jd1, jd2)
out[...] = datetime.timedelta(days=jd1_, microseconds=jd2_ * 86400 * 1e6)
return self.mask_if_needed(iterator.operands[-1])
def _validate_jd_for_storage(jd):
if isinstance(jd, (float, int)):
return np.array(jd, dtype=np.float_)
if isinstance(jd, np.generic) and (
jd.dtype.kind == "f" and jd.dtype.itemsize <= 8 or jd.dtype.kind in "iu"
):
return np.array(jd, dtype=np.float_)
elif isinstance(jd, np.ndarray) and jd.dtype.kind == "f" and jd.dtype.itemsize == 8:
return jd
else:
raise TypeError(
"JD values must be arrays (possibly zero-dimensional) "
f"of floats but we got {jd!r} of type {type(jd)}"
)
def _broadcast_writeable(jd1, jd2):
if jd1.shape == jd2.shape:
return jd1, jd2
# When using broadcast_arrays, *both* are flagged with
# warn-on-write, even the one that wasn't modified, and
# require "C" only clears the flag if it actually copied
# anything.
shape = np.broadcast(jd1, jd2).shape
if jd1.shape == shape:
s_jd1 = jd1
else:
s_jd1 = np.require(np.broadcast_to(jd1, shape), requirements=["C", "W"])
if jd2.shape == shape:
s_jd2 = jd2
else:
s_jd2 = np.require(np.broadcast_to(jd2, shape), requirements=["C", "W"])
return s_jd1, s_jd2
# Import symbols from core.py that are used in this module. This succeeds
# because __init__.py imports format.py just before core.py.
from .core import TIME_DELTA_SCALES, TIME_SCALES, ScaleValueError, Time # noqa: E402
|
6abae9ecca014008e736e6c970572720c41805c35f4c63224da14e6121aeb26b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
__all__ = ["quantity_input"]
import inspect
from collections.abc import Sequence
from functools import wraps
from numbers import Number
import numpy as np
from . import _typing as T
from .core import (
Unit,
UnitBase,
UnitsError,
add_enabled_equivalencies,
dimensionless_unscaled,
)
from .physical import PhysicalType, get_physical_type
from .quantity import Quantity
NoneType = type(None)
def _get_allowed_units(targets):
"""
From a list of target units (either as strings or unit objects) and physical
types, return a list of Unit objects.
"""
allowed_units = []
for target in targets:
try:
unit = Unit(target)
except (TypeError, ValueError):
try:
unit = get_physical_type(target)._unit
except (TypeError, ValueError, KeyError): # KeyError for Enum
raise ValueError(f"Invalid unit or physical type {target!r}.") from None
allowed_units.append(unit)
return allowed_units
def _validate_arg_value(
param_name, func_name, arg, targets, equivalencies, strict_dimensionless=False
):
"""
Validates the object passed in to the wrapped function, ``arg``, with target
unit or physical type, ``target``.
"""
if len(targets) == 0:
return
allowed_units = _get_allowed_units(targets)
# If dimensionless is an allowed unit and the argument is unit-less,
# allow numbers or numpy arrays with numeric dtypes
if (
dimensionless_unscaled in allowed_units
and not strict_dimensionless
and not hasattr(arg, "unit")
):
if isinstance(arg, Number):
return
elif isinstance(arg, np.ndarray) and np.issubdtype(arg.dtype, np.number):
return
for allowed_unit in allowed_units:
try:
if arg.unit.is_equivalent(allowed_unit, equivalencies=equivalencies):
break
except AttributeError: # Either there is no .unit or no .is_equivalent
if hasattr(arg, "unit"):
error_msg = "a 'unit' attribute without an 'is_equivalent' method"
else:
error_msg = "no 'unit' attribute"
raise TypeError(
f"Argument '{param_name}' to function '{func_name}'"
f" has {error_msg}. You should pass in an astropy "
"Quantity instead."
)
else:
error_msg = (
f"Argument '{param_name}' to function '{func_name}' must "
"be in units convertible to"
)
if len(targets) > 1:
targ_names = ", ".join([f"'{str(targ)}'" for targ in targets])
raise UnitsError(f"{error_msg} one of: {targ_names}.")
else:
raise UnitsError(f"{error_msg} '{str(targets[0])}'.")
def _parse_annotation(target):
if target in (None, NoneType, inspect._empty):
return target
# check if unit-like
try:
unit = Unit(target)
except (TypeError, ValueError):
try:
ptype = get_physical_type(target)
except (TypeError, ValueError, KeyError): # KeyError for Enum
if isinstance(target, str):
raise ValueError(f"invalid unit or physical type {target!r}.") from None
else:
return ptype
else:
return unit
# could be a type hint
origin = T.get_origin(target)
if origin is T.Union:
return [_parse_annotation(t) for t in T.get_args(target)]
elif origin is not T.Annotated: # can't be Quantity[]
return False
# parse type hint
cls, *annotations = T.get_args(target)
if not issubclass(cls, Quantity) or not annotations:
return False
# get unit from type hint
unit, *rest = annotations
if not isinstance(unit, (UnitBase, PhysicalType)):
return False
return unit
class QuantityInput:
@classmethod
def as_decorator(cls, func=None, **kwargs):
r"""
A decorator for validating the units of arguments to functions.
Unit specifications can be provided as keyword arguments to the
decorator, or by using function annotation syntax. Arguments to the
decorator take precedence over any function annotations present.
A `~astropy.units.UnitsError` will be raised if the unit attribute of
the argument is not equivalent to the unit specified to the decorator or
in the annotation. If the argument has no unit attribute, i.e. it is not
a Quantity object, a `ValueError` will be raised unless the argument is
an annotation. This is to allow non Quantity annotations to pass
through.
Where an equivalency is specified in the decorator, the function will be
executed with that equivalency in force.
Notes
-----
The checking of arguments inside variable arguments to a function is not
supported (i.e. \*arg or \**kwargs).
The original function is accessible by the attributed ``__wrapped__``.
See :func:`functools.wraps` for details.
Examples
--------
.. code-block:: python
import astropy.units as u
@u.quantity_input(myangle=u.arcsec)
def myfunction(myangle):
return myangle**2
.. code-block:: python
import astropy.units as u
@u.quantity_input
def myfunction(myangle: u.arcsec):
return myangle**2
Or using a unit-aware Quantity annotation.
.. code-block:: python
@u.quantity_input
def myfunction(myangle: u.Quantity[u.arcsec]):
return myangle**2
Also you can specify a return value annotation, which will
cause the function to always return a `~astropy.units.Quantity` in that
unit.
.. code-block:: python
import astropy.units as u
@u.quantity_input
def myfunction(myangle: u.arcsec) -> u.deg**2:
return myangle**2
Using equivalencies::
import astropy.units as u
@u.quantity_input(myenergy=u.eV, equivalencies=u.mass_energy())
def myfunction(myenergy):
return myenergy**2
"""
self = cls(**kwargs)
if func is not None and not kwargs:
return self(func)
else:
return self
def __init__(self, func=None, strict_dimensionless=False, **kwargs):
self.equivalencies = kwargs.pop("equivalencies", [])
self.decorator_kwargs = kwargs
self.strict_dimensionless = strict_dimensionless
def __call__(self, wrapped_function):
# Extract the function signature for the function we are wrapping.
wrapped_signature = inspect.signature(wrapped_function)
# Define a new function to return in place of the wrapped one
@wraps(wrapped_function)
def wrapper(*func_args, **func_kwargs):
# Bind the arguments to our new function to the signature of the original.
bound_args = wrapped_signature.bind(*func_args, **func_kwargs)
# Iterate through the parameters of the original signature
for param in wrapped_signature.parameters.values():
# We do not support variable arguments (*args, **kwargs)
if param.kind in (
inspect.Parameter.VAR_KEYWORD,
inspect.Parameter.VAR_POSITIONAL,
):
continue
# Catch the (never triggered) case where bind relied on a default value.
if (
param.name not in bound_args.arguments
and param.default is not param.empty
):
bound_args.arguments[param.name] = param.default
# Get the value of this parameter (argument to new function)
arg = bound_args.arguments[param.name]
# Get target unit or physical type, either from decorator kwargs
# or annotations
if param.name in self.decorator_kwargs:
targets = self.decorator_kwargs[param.name]
is_annotation = False
else:
targets = param.annotation
is_annotation = True
# parses to unit if it's an annotation (or list thereof)
targets = _parse_annotation(targets)
# If the targets is empty, then no target units or physical
# types were specified so we can continue to the next arg
if targets is inspect.Parameter.empty:
continue
# If the argument value is None, and the default value is None,
# pass through the None even if there is a target unit
if arg is None and param.default is None:
continue
# Here, we check whether multiple target unit/physical type's
# were specified in the decorator/annotation, or whether a
# single string (unit or physical type) or a Unit object was
# specified
if isinstance(targets, str) or not isinstance(targets, Sequence):
valid_targets = [targets]
# Check for None in the supplied list of allowed units and, if
# present and the passed value is also None, ignore.
elif None in targets or NoneType in targets:
if arg is None:
continue
else:
valid_targets = [t for t in targets if t is not None]
else:
valid_targets = targets
# If we're dealing with an annotation, skip all the targets that
# are not strings or subclasses of Unit. This is to allow
# non unit related annotations to pass through
if is_annotation:
valid_targets = [
t
for t in valid_targets
if isinstance(t, (str, UnitBase, PhysicalType))
]
# Now we loop over the allowed units/physical types and validate
# the value of the argument:
_validate_arg_value(
param.name,
wrapped_function.__name__,
arg,
valid_targets,
self.equivalencies,
self.strict_dimensionless,
)
# Call the original function with any equivalencies in force.
with add_enabled_equivalencies(self.equivalencies):
return_ = wrapped_function(*func_args, **func_kwargs)
# Return
ra = wrapped_signature.return_annotation
valid_empty = (inspect.Signature.empty, None, NoneType, T.NoReturn)
if ra not in valid_empty:
target = (
ra
if T.get_origin(ra) not in (T.Annotated, T.Union)
else _parse_annotation(ra)
)
if isinstance(target, str) or not isinstance(target, Sequence):
target = [target]
valid_targets = [
t for t in target if isinstance(t, (str, UnitBase, PhysicalType))
]
_validate_arg_value(
"return",
wrapped_function.__name__,
return_,
valid_targets,
self.equivalencies,
self.strict_dimensionless,
)
if len(valid_targets) > 0:
return_ <<= valid_targets[0]
return return_
return wrapper
quantity_input = QuantityInput.as_decorator
|
2d063beebb6eba16f9e8e0b87b413573cf0226945f214659ae8a56727b2bb9a9 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Core units classes and functions.
"""
import inspect
import operator
import textwrap
import warnings
import numpy as np
from astropy.utils.decorators import lazyproperty
from astropy.utils.exceptions import AstropyWarning
from astropy.utils.misc import isiterable
from . import format as unit_format
from .utils import (
is_effectively_unity,
resolve_fractions,
sanitize_scale,
validate_power,
)
__all__ = [
"UnitsError",
"UnitsWarning",
"UnitConversionError",
"UnitTypeError",
"UnitBase",
"NamedUnit",
"IrreducibleUnit",
"Unit",
"CompositeUnit",
"PrefixUnit",
"UnrecognizedUnit",
"def_unit",
"get_current_unit_registry",
"set_enabled_units",
"add_enabled_units",
"set_enabled_equivalencies",
"add_enabled_equivalencies",
"set_enabled_aliases",
"add_enabled_aliases",
"dimensionless_unscaled",
"one",
]
UNITY = 1.0
def _flatten_units_collection(items):
"""
Given a list of sequences, modules or dictionaries of units, or
single units, return a flat set of all the units found.
"""
if not isinstance(items, list):
items = [items]
result = set()
for item in items:
if isinstance(item, UnitBase):
result.add(item)
else:
if isinstance(item, dict):
units = item.values()
elif inspect.ismodule(item):
units = vars(item).values()
elif isiterable(item):
units = item
else:
continue
for unit in units:
if isinstance(unit, UnitBase):
result.add(unit)
return result
def _normalize_equivalencies(equivalencies):
"""Normalizes equivalencies ensuring each is a 4-tuple.
The resulting tuple is of the form::
(from_unit, to_unit, forward_func, backward_func)
Parameters
----------
equivalencies : list of equivalency pairs
Raises
------
ValueError if an equivalency cannot be interpreted
"""
if equivalencies is None:
return []
normalized = []
for i, equiv in enumerate(equivalencies):
if len(equiv) == 2:
funit, tunit = equiv
a = b = lambda x: x
elif len(equiv) == 3:
funit, tunit, a = equiv
b = a
elif len(equiv) == 4:
funit, tunit, a, b = equiv
else:
raise ValueError(f"Invalid equivalence entry {i}: {equiv!r}")
if not (
funit is Unit(funit)
and (tunit is None or tunit is Unit(tunit))
and callable(a)
and callable(b)
):
raise ValueError(f"Invalid equivalence entry {i}: {equiv!r}")
normalized.append((funit, tunit, a, b))
return normalized
class _UnitRegistry:
"""
Manages a registry of the enabled units.
"""
def __init__(self, init=[], equivalencies=[], aliases={}):
if isinstance(init, _UnitRegistry):
# If passed another registry we don't need to rebuild everything.
# but because these are mutable types we don't want to create
# conflicts so everything needs to be copied.
self._equivalencies = init._equivalencies.copy()
self._aliases = init._aliases.copy()
self._all_units = init._all_units.copy()
self._registry = init._registry.copy()
self._non_prefix_units = init._non_prefix_units.copy()
# The physical type is a dictionary containing sets as values.
# All of these must be copied otherwise we could alter the old
# registry.
self._by_physical_type = {
k: v.copy() for k, v in init._by_physical_type.items()
}
else:
self._reset_units()
self._reset_equivalencies()
self._reset_aliases()
self.add_enabled_units(init)
self.add_enabled_equivalencies(equivalencies)
self.add_enabled_aliases(aliases)
def _reset_units(self):
self._all_units = set()
self._non_prefix_units = set()
self._registry = {}
self._by_physical_type = {}
def _reset_equivalencies(self):
self._equivalencies = set()
def _reset_aliases(self):
self._aliases = {}
@property
def registry(self):
return self._registry
@property
def all_units(self):
return self._all_units
@property
def non_prefix_units(self):
return self._non_prefix_units
def set_enabled_units(self, units):
"""
Sets the units enabled in the unit registry.
These units are searched when using
`UnitBase.find_equivalent_units`, for example.
Parameters
----------
units : list of sequence, dict, or module
This is a list of things in which units may be found
(sequences, dicts or modules), or units themselves. The
entire set will be "enabled" for searching through by
methods like `UnitBase.find_equivalent_units` and
`UnitBase.compose`.
"""
self._reset_units()
return self.add_enabled_units(units)
def add_enabled_units(self, units):
"""
Adds to the set of units enabled in the unit registry.
These units are searched when using
`UnitBase.find_equivalent_units`, for example.
Parameters
----------
units : list of sequence, dict, or module
This is a list of things in which units may be found
(sequences, dicts or modules), or units themselves. The
entire set will be added to the "enabled" set for
searching through by methods like
`UnitBase.find_equivalent_units` and `UnitBase.compose`.
"""
units = _flatten_units_collection(units)
for unit in units:
# Loop through all of the names first, to ensure all of them
# are new, then add them all as a single "transaction" below.
for st in unit._names:
if st in self._registry and unit != self._registry[st]:
raise ValueError(
f"Object with name {st!r} already exists in namespace. "
"Filter the set of units to avoid name clashes before "
"enabling them."
)
for st in unit._names:
self._registry[st] = unit
self._all_units.add(unit)
if not isinstance(unit, PrefixUnit):
self._non_prefix_units.add(unit)
hash = unit._get_physical_type_id()
self._by_physical_type.setdefault(hash, set()).add(unit)
def get_units_with_physical_type(self, unit):
"""
Get all units in the registry with the same physical type as
the given unit.
Parameters
----------
unit : UnitBase instance
"""
return self._by_physical_type.get(unit._get_physical_type_id(), set())
@property
def equivalencies(self):
return list(self._equivalencies)
def set_enabled_equivalencies(self, equivalencies):
"""
Sets the equivalencies enabled in the unit registry.
These equivalencies are used if no explicit equivalencies are given,
both in unit conversion and in finding equivalent units.
This is meant in particular for allowing angles to be dimensionless.
Use with care.
Parameters
----------
equivalencies : list of tuple
List of equivalent pairs, e.g., as returned by
`~astropy.units.equivalencies.dimensionless_angles`.
"""
self._reset_equivalencies()
return self.add_enabled_equivalencies(equivalencies)
def add_enabled_equivalencies(self, equivalencies):
"""
Adds to the set of equivalencies enabled in the unit registry.
These equivalencies are used if no explicit equivalencies are given,
both in unit conversion and in finding equivalent units.
This is meant in particular for allowing angles to be dimensionless.
Use with care.
Parameters
----------
equivalencies : list of tuple
List of equivalent pairs, e.g., as returned by
`~astropy.units.equivalencies.dimensionless_angles`.
"""
# pre-normalize list to help catch mistakes
equivalencies = _normalize_equivalencies(equivalencies)
self._equivalencies |= set(equivalencies)
@property
def aliases(self):
return self._aliases
def set_enabled_aliases(self, aliases):
"""
Set aliases for units.
Parameters
----------
aliases : dict of str, Unit
The aliases to set. The keys must be the string aliases, and values
must be the `astropy.units.Unit` that the alias will be mapped to.
Raises
------
ValueError
If the alias already defines a different unit.
"""
self._reset_aliases()
self.add_enabled_aliases(aliases)
def add_enabled_aliases(self, aliases):
"""
Add aliases for units.
Parameters
----------
aliases : dict of str, Unit
The aliases to add. The keys must be the string aliases, and values
must be the `astropy.units.Unit` that the alias will be mapped to.
Raises
------
ValueError
If the alias already defines a different unit.
"""
for alias, unit in aliases.items():
if alias in self._registry and unit != self._registry[alias]:
raise ValueError(
f"{alias} already means {self._registry[alias]}, so "
f"cannot be used as an alias for {unit}."
)
if alias in self._aliases and unit != self._aliases[alias]:
raise ValueError(
f"{alias} already is an alias for {self._aliases[alias]}, so "
f"cannot be used as an alias for {unit}."
)
for alias, unit in aliases.items():
if alias not in self._registry and alias not in self._aliases:
self._aliases[alias] = unit
class _UnitContext:
def __init__(self, init=[], equivalencies=[]):
_unit_registries.append(_UnitRegistry(init=init, equivalencies=equivalencies))
def __enter__(self):
pass
def __exit__(self, type, value, tb):
_unit_registries.pop()
_unit_registries = [_UnitRegistry()]
def get_current_unit_registry():
return _unit_registries[-1]
def set_enabled_units(units):
"""
Sets the units enabled in the unit registry.
These units are searched when using
`UnitBase.find_equivalent_units`, for example.
This may be used either permanently, or as a context manager using
the ``with`` statement (see example below).
Parameters
----------
units : list of sequence, dict, or module
This is a list of things in which units may be found
(sequences, dicts or modules), or units themselves. The
entire set will be "enabled" for searching through by methods
like `UnitBase.find_equivalent_units` and `UnitBase.compose`.
Examples
--------
>>> from astropy import units as u
>>> with u.set_enabled_units([u.pc]):
... u.m.find_equivalent_units()
...
Primary name | Unit definition | Aliases
[
pc | 3.08568e+16 m | parsec ,
]
>>> u.m.find_equivalent_units()
Primary name | Unit definition | Aliases
[
AU | 1.49598e+11 m | au, astronomical_unit ,
Angstrom | 1e-10 m | AA, angstrom ,
cm | 0.01 m | centimeter ,
earthRad | 6.3781e+06 m | R_earth, Rearth ,
jupiterRad | 7.1492e+07 m | R_jup, Rjup, R_jupiter, Rjupiter ,
lsec | 2.99792e+08 m | lightsecond ,
lyr | 9.46073e+15 m | lightyear ,
m | irreducible | meter ,
micron | 1e-06 m | ,
pc | 3.08568e+16 m | parsec ,
solRad | 6.957e+08 m | R_sun, Rsun ,
]
"""
# get a context with a new registry, using equivalencies of the current one
context = _UnitContext(equivalencies=get_current_unit_registry().equivalencies)
# in this new current registry, enable the units requested
get_current_unit_registry().set_enabled_units(units)
return context
def add_enabled_units(units):
"""
Adds to the set of units enabled in the unit registry.
These units are searched when using
`UnitBase.find_equivalent_units`, for example.
This may be used either permanently, or as a context manager using
the ``with`` statement (see example below).
Parameters
----------
units : list of sequence, dict, or module
This is a list of things in which units may be found
(sequences, dicts or modules), or units themselves. The
entire set will be added to the "enabled" set for searching
through by methods like `UnitBase.find_equivalent_units` and
`UnitBase.compose`.
Examples
--------
>>> from astropy import units as u
>>> from astropy.units import imperial
>>> with u.add_enabled_units(imperial):
... u.m.find_equivalent_units()
...
Primary name | Unit definition | Aliases
[
AU | 1.49598e+11 m | au, astronomical_unit ,
Angstrom | 1e-10 m | AA, angstrom ,
cm | 0.01 m | centimeter ,
earthRad | 6.3781e+06 m | R_earth, Rearth ,
ft | 0.3048 m | foot ,
fur | 201.168 m | furlong ,
inch | 0.0254 m | ,
jupiterRad | 7.1492e+07 m | R_jup, Rjup, R_jupiter, Rjupiter ,
lsec | 2.99792e+08 m | lightsecond ,
lyr | 9.46073e+15 m | lightyear ,
m | irreducible | meter ,
mi | 1609.34 m | mile ,
micron | 1e-06 m | ,
mil | 2.54e-05 m | thou ,
nmi | 1852 m | nauticalmile, NM ,
pc | 3.08568e+16 m | parsec ,
solRad | 6.957e+08 m | R_sun, Rsun ,
yd | 0.9144 m | yard ,
]
"""
# get a context with a new registry, which is a copy of the current one
context = _UnitContext(get_current_unit_registry())
# in this new current registry, enable the further units requested
get_current_unit_registry().add_enabled_units(units)
return context
def set_enabled_equivalencies(equivalencies):
"""
Sets the equivalencies enabled in the unit registry.
These equivalencies are used if no explicit equivalencies are given,
both in unit conversion and in finding equivalent units.
This is meant in particular for allowing angles to be dimensionless.
Use with care.
Parameters
----------
equivalencies : list of tuple
list of equivalent pairs, e.g., as returned by
`~astropy.units.equivalencies.dimensionless_angles`.
Examples
--------
Exponentiation normally requires dimensionless quantities. To avoid
problems with complex phases::
>>> from astropy import units as u
>>> with u.set_enabled_equivalencies(u.dimensionless_angles()):
... phase = 0.5 * u.cycle
... np.exp(1j*phase) # doctest: +FLOAT_CMP
<Quantity -1.+1.2246468e-16j>
"""
# get a context with a new registry, using all units of the current one
context = _UnitContext(get_current_unit_registry())
# in this new current registry, enable the equivalencies requested
get_current_unit_registry().set_enabled_equivalencies(equivalencies)
return context
def add_enabled_equivalencies(equivalencies):
"""
Adds to the equivalencies enabled in the unit registry.
These equivalencies are used if no explicit equivalencies are given,
both in unit conversion and in finding equivalent units.
This is meant in particular for allowing angles to be dimensionless.
Since no equivalencies are enabled by default, generally it is recommended
to use `set_enabled_equivalencies`.
Parameters
----------
equivalencies : list of tuple
list of equivalent pairs, e.g., as returned by
`~astropy.units.equivalencies.dimensionless_angles`.
"""
# get a context with a new registry, which is a copy of the current one
context = _UnitContext(get_current_unit_registry())
# in this new current registry, enable the further equivalencies requested
get_current_unit_registry().add_enabled_equivalencies(equivalencies)
return context
def set_enabled_aliases(aliases):
"""
Set aliases for units.
This is useful for handling alternate spellings for units, or
misspelled units in files one is trying to read.
Parameters
----------
aliases : dict of str, Unit
The aliases to set. The keys must be the string aliases, and values
must be the `astropy.units.Unit` that the alias will be mapped to.
Raises
------
ValueError
If the alias already defines a different unit.
Examples
--------
To temporarily allow for a misspelled 'Angstroem' unit::
>>> from astropy import units as u
>>> with u.set_enabled_aliases({'Angstroem': u.Angstrom}):
... print(u.Unit("Angstroem", parse_strict="raise") == u.Angstrom)
True
"""
# get a context with a new registry, which is a copy of the current one
context = _UnitContext(get_current_unit_registry())
# in this new current registry, enable the further equivalencies requested
get_current_unit_registry().set_enabled_aliases(aliases)
return context
def add_enabled_aliases(aliases):
"""
Add aliases for units.
This is useful for handling alternate spellings for units, or
misspelled units in files one is trying to read.
Since no aliases are enabled by default, generally it is recommended
to use `set_enabled_aliases`.
Parameters
----------
aliases : dict of str, Unit
The aliases to add. The keys must be the string aliases, and values
must be the `astropy.units.Unit` that the alias will be mapped to.
Raises
------
ValueError
If the alias already defines a different unit.
Examples
--------
To temporarily allow for a misspelled 'Angstroem' unit::
>>> from astropy import units as u
>>> with u.add_enabled_aliases({'Angstroem': u.Angstrom}):
... print(u.Unit("Angstroem", parse_strict="raise") == u.Angstrom)
True
"""
# get a context with a new registry, which is a copy of the current one
context = _UnitContext(get_current_unit_registry())
# in this new current registry, enable the further equivalencies requested
get_current_unit_registry().add_enabled_aliases(aliases)
return context
class UnitsError(Exception):
"""
The base class for unit-specific exceptions.
"""
class UnitScaleError(UnitsError, ValueError):
"""
Used to catch the errors involving scaled units,
which are not recognized by FITS format.
"""
pass
class UnitConversionError(UnitsError, ValueError):
"""
Used specifically for errors related to converting between units or
interpreting units in terms of other units.
"""
class UnitTypeError(UnitsError, TypeError):
"""
Used specifically for errors in setting to units not allowed by a class.
E.g., would be raised if the unit of an `~astropy.coordinates.Angle`
instances were set to a non-angular unit.
"""
class UnitsWarning(AstropyWarning):
"""
The base class for unit-specific warnings.
"""
class UnitBase:
"""
Abstract base class for units.
Most of the arithmetic operations on units are defined in this
base class.
Should not be instantiated by users directly.
"""
# Make sure that __rmul__ of units gets called over the __mul__ of Numpy
# arrays to avoid element-wise multiplication.
__array_priority__ = 1000
_hash = None
_type_id = None
def __deepcopy__(self, memo):
# This may look odd, but the units conversion will be very
# broken after deep-copying if we don't guarantee that a given
# physical unit corresponds to only one instance
return self
def _repr_latex_(self):
"""
Generate latex representation of unit name. This is used by
the IPython notebook to print a unit with a nice layout.
Returns
-------
Latex string
"""
return unit_format.Latex.to_string(self)
def __bytes__(self):
"""Return string representation for unit."""
return unit_format.Generic.to_string(self).encode("unicode_escape")
def __str__(self):
"""Return string representation for unit."""
return unit_format.Generic.to_string(self)
def __repr__(self):
string = unit_format.Generic.to_string(self)
return f'Unit("{string}")'
def _get_physical_type_id(self):
"""
Returns an identifier that uniquely identifies the physical
type of this unit. It is comprised of the bases and powers of
this unit, without the scale. Since it is hashable, it is
useful as a dictionary key.
"""
if self._type_id is None:
unit = self.decompose()
self._type_id = tuple(zip((base.name for base in unit.bases), unit.powers))
return self._type_id
@property
def names(self):
"""
Returns all of the names associated with this unit.
"""
raise AttributeError(
"Can not get names from unnamed units. Perhaps you meant to_string()?"
)
@property
def name(self):
"""
Returns the canonical (short) name associated with this unit.
"""
raise AttributeError(
"Can not get names from unnamed units. Perhaps you meant to_string()?"
)
@property
def aliases(self):
"""
Returns the alias (long) names for this unit.
"""
raise AttributeError(
"Can not get aliases from unnamed units. Perhaps you meant to_string()?"
)
@property
def scale(self):
"""
Return the scale of the unit.
"""
return 1.0
@property
def bases(self):
"""
Return the bases of the unit.
"""
return [self]
@property
def powers(self):
"""
Return the powers of the unit.
"""
return [1]
def to_string(self, format=unit_format.Generic, **kwargs):
r"""Output the unit in the given format as a string.
Parameters
----------
format : `astropy.units.format.Base` instance or str
The name of a format or a formatter object. If not
provided, defaults to the generic format.
**kwargs
Further options forwarded to the formatter. Currently
recognized is ``fraction``, which can take the following values:
- `False` : display unit bases with negative powers as they are;
- 'inline' or `True` : use a single-line fraction;
- 'multiline' : use a multiline fraction (available for the
'latex', 'console' and 'unicode' formats only).
Raises
------
TypeError
If ``format`` is of the wrong type.
ValueError
If ``format`` or ``fraction`` are not recognized.
Examples
--------
>>> import astropy.units as u
>>> kms = u.Unit('km / s')
>>> kms.to_string() # Generic uses fraction='inline' by default
'km / s'
>>> kms.to_string('latex') # Latex uses fraction='multiline' by default
'$\\mathrm{\\frac{km}{s}}$'
>>> print(kms.to_string('unicode', fraction=False))
km s⁻¹
>>> print(kms.to_string('unicode', fraction='inline'))
km / s
>>> print(kms.to_string('unicode', fraction='multiline'))
km
──
s
"""
f = unit_format.get_format(format)
return f.to_string(self, **kwargs)
def __format__(self, format_spec):
"""Try to format units using a formatter."""
try:
return self.to_string(format=format_spec)
except ValueError:
return format(str(self), format_spec)
@staticmethod
def _normalize_equivalencies(equivalencies):
"""Normalizes equivalencies, ensuring each is a 4-tuple.
The resulting tuple is of the form::
(from_unit, to_unit, forward_func, backward_func)
Parameters
----------
equivalencies : list of equivalency pairs, or None
Returns
-------
A normalized list, including possible global defaults set by, e.g.,
`set_enabled_equivalencies`, except when `equivalencies`=`None`,
in which case the returned list is always empty.
Raises
------
ValueError if an equivalency cannot be interpreted
"""
normalized = _normalize_equivalencies(equivalencies)
if equivalencies is not None:
normalized += get_current_unit_registry().equivalencies
return normalized
def __pow__(self, p):
p = validate_power(p)
return CompositeUnit(1, [self], [p], _error_check=False)
def __truediv__(self, m):
if isinstance(m, (bytes, str)):
m = Unit(m)
if isinstance(m, UnitBase):
if m.is_unity():
return self
return CompositeUnit(1, [self, m], [1, -1], _error_check=False)
try:
# Cannot handle this as Unit, re-try as Quantity
from .quantity import Quantity
return Quantity(1, self) / m
except TypeError:
return NotImplemented
def __rtruediv__(self, m):
if isinstance(m, (bytes, str)):
return Unit(m) / self
try:
# Cannot handle this as Unit. Here, m cannot be a Quantity,
# so we make it into one, fasttracking when it does not have a
# unit, for the common case of <array> / <unit>.
from .quantity import Quantity
if hasattr(m, "unit"):
result = Quantity(m)
result /= self
return result
else:
return Quantity(m, self ** (-1))
except TypeError:
return NotImplemented
def __mul__(self, m):
if isinstance(m, (bytes, str)):
m = Unit(m)
if isinstance(m, UnitBase):
if m.is_unity():
return self
elif self.is_unity():
return m
return CompositeUnit(1, [self, m], [1, 1], _error_check=False)
# Cannot handle this as Unit, re-try as Quantity.
try:
from .quantity import Quantity
return Quantity(1, unit=self) * m
except TypeError:
return NotImplemented
def __rmul__(self, m):
if isinstance(m, (bytes, str)):
return Unit(m) * self
# Cannot handle this as Unit. Here, m cannot be a Quantity,
# so we make it into one, fasttracking when it does not have a unit
# for the common case of <array> * <unit>.
try:
from .quantity import Quantity
if hasattr(m, "unit"):
result = Quantity(m)
result *= self
return result
else:
return Quantity(m, unit=self)
except TypeError:
return NotImplemented
def __rlshift__(self, m):
try:
from .quantity import Quantity
return Quantity(m, self, copy=False, subok=True)
except Exception:
return NotImplemented
def __rrshift__(self, m):
warnings.warn(
">> is not implemented. Did you mean to convert "
f"to a Quantity with unit {m} using '<<'?",
AstropyWarning,
)
return NotImplemented
def __hash__(self):
if self._hash is None:
parts = (
[str(self.scale)]
+ [x.name for x in self.bases]
+ [str(x) for x in self.powers]
)
self._hash = hash(tuple(parts))
return self._hash
def __getstate__(self):
# If we get pickled, we should *not* store the memoized members since
# hashes of strings vary between sessions.
state = self.__dict__.copy()
state.pop("_hash", None)
state.pop("_type_id", None)
return state
def __eq__(self, other):
if self is other:
return True
try:
other = Unit(other, parse_strict="silent")
except (ValueError, UnitsError, TypeError):
return NotImplemented
# Other is unit-like, but the test below requires it is a UnitBase
# instance; if it is not, give up (so that other can try).
if not isinstance(other, UnitBase):
return NotImplemented
try:
return is_effectively_unity(self._to(other))
except UnitsError:
return False
def __ne__(self, other):
return not (self == other)
def __le__(self, other):
scale = self._to(Unit(other))
return scale <= 1.0 or is_effectively_unity(scale)
def __ge__(self, other):
scale = self._to(Unit(other))
return scale >= 1.0 or is_effectively_unity(scale)
def __lt__(self, other):
return not (self >= other)
def __gt__(self, other):
return not (self <= other)
def __neg__(self):
return self * -1.0
def is_equivalent(self, other, equivalencies=[]):
"""
Returns `True` if this unit is equivalent to ``other``.
Parameters
----------
other : `~astropy.units.Unit`, str, or tuple
The unit to convert to. If a tuple of units is specified, this
method returns true if the unit matches any of those in the tuple.
equivalencies : list of tuple
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`astropy:unit_equivalencies`.
This list is in addition to possible global defaults set by, e.g.,
`set_enabled_equivalencies`.
Use `None` to turn off all equivalencies.
Returns
-------
bool
"""
equivalencies = self._normalize_equivalencies(equivalencies)
if isinstance(other, tuple):
return any(self.is_equivalent(u, equivalencies) for u in other)
other = Unit(other, parse_strict="silent")
return self._is_equivalent(other, equivalencies)
def _is_equivalent(self, other, equivalencies=[]):
"""Returns `True` if this unit is equivalent to `other`.
See `is_equivalent`, except that a proper Unit object should be
given (i.e., no string) and that the equivalency list should be
normalized using `_normalize_equivalencies`.
"""
if isinstance(other, UnrecognizedUnit):
return False
if self._get_physical_type_id() == other._get_physical_type_id():
return True
elif len(equivalencies):
unit = self.decompose()
other = other.decompose()
for a, b, forward, backward in equivalencies:
if b is None:
# after canceling, is what's left convertible
# to dimensionless (according to the equivalency)?
try:
(other / unit).decompose([a])
return True
except Exception:
pass
elif (a._is_equivalent(unit) and b._is_equivalent(other)) or (
b._is_equivalent(unit) and a._is_equivalent(other)
):
return True
return False
def _apply_equivalencies(self, unit, other, equivalencies):
"""
Internal function (used from `_get_converter`) to apply
equivalence pairs.
"""
def make_converter(scale1, func, scale2):
def convert(v):
return func(_condition_arg(v) / scale1) * scale2
return convert
for funit, tunit, a, b in equivalencies:
if tunit is None:
ratio = other.decompose() / unit.decompose()
try:
ratio_in_funit = ratio.decompose([funit])
return make_converter(ratio_in_funit.scale, a, 1.0)
except UnitsError:
pass
else:
try:
scale1 = funit._to(unit)
scale2 = tunit._to(other)
return make_converter(scale1, a, scale2)
except UnitsError:
pass
try:
scale1 = tunit._to(unit)
scale2 = funit._to(other)
return make_converter(scale1, b, scale2)
except UnitsError:
pass
def get_err_str(unit):
unit_str = unit.to_string("unscaled")
physical_type = unit.physical_type
if physical_type != "unknown":
unit_str = f"'{unit_str}' ({physical_type})"
else:
unit_str = f"'{unit_str}'"
return unit_str
unit_str = get_err_str(unit)
other_str = get_err_str(other)
raise UnitConversionError(f"{unit_str} and {other_str} are not convertible")
def _get_converter(self, other, equivalencies=[]):
"""Get a converter for values in ``self`` to ``other``.
If no conversion is necessary, returns ``unit_scale_converter``
(which is used as a check in quantity helpers).
"""
# First see if it is just a scaling.
try:
scale = self._to(other)
except UnitsError:
pass
else:
if scale == 1.0:
return unit_scale_converter
else:
return lambda val: scale * _condition_arg(val)
# if that doesn't work, maybe we can do it with equivalencies?
try:
return self._apply_equivalencies(
self, other, self._normalize_equivalencies(equivalencies)
)
except UnitsError as exc:
# Last hope: maybe other knows how to do it?
# We assume the equivalencies have the unit itself as first item.
# TODO: maybe better for other to have a `_back_converter` method?
if hasattr(other, "equivalencies"):
for funit, tunit, a, b in other.equivalencies:
if other is funit:
try:
converter = self._get_converter(tunit, equivalencies)
except Exception:
pass
else:
return lambda v: b(converter(v))
raise exc
def _to(self, other):
"""
Returns the scale to the specified unit.
See `to`, except that a Unit object should be given (i.e., no
string), and that all defaults are used, i.e., no
equivalencies and value=1.
"""
# There are many cases where we just want to ensure a Quantity is
# of a particular unit, without checking whether it's already in
# a particular unit. If we're being asked to convert from a unit
# to itself, we can short-circuit all of this.
if self is other:
return 1.0
# Don't presume decomposition is possible; e.g.,
# conversion to function units is through equivalencies.
if isinstance(other, UnitBase):
self_decomposed = self.decompose()
other_decomposed = other.decompose()
# Check quickly whether equivalent. This is faster than
# `is_equivalent`, because it doesn't generate the entire
# physical type list of both units. In other words it "fails
# fast".
if self_decomposed.powers == other_decomposed.powers and all(
self_base is other_base
for (self_base, other_base) in zip(
self_decomposed.bases, other_decomposed.bases
)
):
return self_decomposed.scale / other_decomposed.scale
raise UnitConversionError(f"'{self!r}' is not a scaled version of '{other!r}'")
def to(self, other, value=UNITY, equivalencies=[]):
"""
Return the converted values in the specified unit.
Parameters
----------
other : unit-like
The unit to convert to.
value : int, float, or scalar array-like, optional
Value(s) in the current unit to be converted to the
specified unit. If not provided, defaults to 1.0
equivalencies : list of tuple
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`astropy:unit_equivalencies`.
This list is in addition to possible global defaults set by, e.g.,
`set_enabled_equivalencies`.
Use `None` to turn off all equivalencies.
Returns
-------
values : scalar or array
Converted value(s). Input value sequences are returned as
numpy arrays.
Raises
------
UnitsError
If units are inconsistent
"""
if other is self and value is UNITY:
return UNITY
else:
return self._get_converter(Unit(other), equivalencies)(value)
def in_units(self, other, value=1.0, equivalencies=[]):
"""
Alias for `to` for backward compatibility with pynbody.
"""
return self.to(other, value=value, equivalencies=equivalencies)
def decompose(self, bases=set()):
"""
Return a unit object composed of only irreducible units.
Parameters
----------
bases : sequence of UnitBase, optional
The bases to decompose into. When not provided,
decomposes down to any irreducible units. When provided,
the decomposed result will only contain the given units.
This will raises a `UnitsError` if it's not possible
to do so.
Returns
-------
unit : `~astropy.units.CompositeUnit`
New object containing only irreducible unit objects.
"""
raise NotImplementedError()
def _compose(
self, equivalencies=[], namespace=[], max_depth=2, depth=0, cached_results=None
):
def is_final_result(unit):
# Returns True if this result contains only the expected
# units
return all(base in namespace for base in unit.bases)
unit = self.decompose()
key = hash(unit)
cached = cached_results.get(key)
if cached is not None:
if isinstance(cached, Exception):
raise cached
return cached
# Prevent too many levels of recursion
# And special case for dimensionless unit
if depth >= max_depth:
cached_results[key] = [unit]
return [unit]
# Make a list including all of the equivalent units
units = [unit]
for funit, tunit, a, b in equivalencies:
if tunit is not None:
if self._is_equivalent(funit):
scale = funit.decompose().scale / unit.scale
units.append(Unit(a(1.0 / scale) * tunit).decompose())
elif self._is_equivalent(tunit):
scale = tunit.decompose().scale / unit.scale
units.append(Unit(b(1.0 / scale) * funit).decompose())
else:
if self._is_equivalent(funit):
units.append(Unit(unit.scale))
# Store partial results
partial_results = []
# Store final results that reduce to a single unit or pair of
# units
if len(unit.bases) == 0:
final_results = [{unit}, set()]
else:
final_results = [set(), set()]
for tunit in namespace:
tunit_decomposed = tunit.decompose()
for u in units:
# If the unit is a base unit, look for an exact match
# to one of the bases of the target unit. If found,
# factor by the same power as the target unit's base.
# This allows us to factor out fractional powers
# without needing to do an exhaustive search.
if len(tunit_decomposed.bases) == 1:
for base, power in zip(u.bases, u.powers):
if tunit_decomposed._is_equivalent(base):
tunit = tunit**power
tunit_decomposed = tunit_decomposed**power
break
composed = (u / tunit_decomposed).decompose()
factored = composed * tunit
len_bases = len(composed.bases)
if is_final_result(factored) and len_bases <= 1:
final_results[len_bases].add(factored)
else:
partial_results.append((len_bases, composed, tunit))
# Do we have any minimal results?
for final_result in final_results:
if len(final_result):
results = final_results[0].union(final_results[1])
cached_results[key] = results
return results
partial_results.sort(key=operator.itemgetter(0))
# ...we have to recurse and try to further compose
results = []
for len_bases, composed, tunit in partial_results:
try:
composed_list = composed._compose(
equivalencies=equivalencies,
namespace=namespace,
max_depth=max_depth,
depth=depth + 1,
cached_results=cached_results,
)
except UnitsError:
composed_list = []
for subcomposed in composed_list:
results.append((len(subcomposed.bases), subcomposed, tunit))
if len(results):
results.sort(key=operator.itemgetter(0))
min_length = results[0][0]
subresults = set()
for len_bases, composed, tunit in results:
if len_bases > min_length:
break
else:
factored = composed * tunit
if is_final_result(factored):
subresults.add(factored)
if len(subresults):
cached_results[key] = subresults
return subresults
if not is_final_result(self):
result = UnitsError(
f"Cannot represent unit {self} in terms of the given units"
)
cached_results[key] = result
raise result
cached_results[key] = [self]
return [self]
def compose(
self, equivalencies=[], units=None, max_depth=2, include_prefix_units=None
):
"""
Return the simplest possible composite unit(s) that represent
the given unit. Since there may be multiple equally simple
compositions of the unit, a list of units is always returned.
Parameters
----------
equivalencies : list of tuple
A list of equivalence pairs to also list. See
:ref:`astropy:unit_equivalencies`.
This list is in addition to possible global defaults set by, e.g.,
`set_enabled_equivalencies`.
Use `None` to turn off all equivalencies.
units : set of `~astropy.units.Unit`, optional
If not provided, any known units may be used to compose
into. Otherwise, ``units`` is a dict, module or sequence
containing the units to compose into.
max_depth : int, optional
The maximum recursion depth to use when composing into
composite units.
include_prefix_units : bool, optional
When `True`, include prefixed units in the result.
Default is `True` if a sequence is passed in to ``units``,
`False` otherwise.
Returns
-------
units : list of `CompositeUnit`
A list of candidate compositions. These will all be
equally simple, but it may not be possible to
automatically determine which of the candidates are
better.
"""
# if units parameter is specified and is a sequence (list|tuple),
# include_prefix_units is turned on by default. Ex: units=[u.kpc]
if include_prefix_units is None:
include_prefix_units = isinstance(units, (list, tuple))
# Pre-normalize the equivalencies list
equivalencies = self._normalize_equivalencies(equivalencies)
# The namespace of units to compose into should be filtered to
# only include units with bases in common with self, otherwise
# they can't possibly provide useful results. Having too many
# destination units greatly increases the search space.
def has_bases_in_common(a, b):
if len(a.bases) == 0 and len(b.bases) == 0:
return True
for ab in a.bases:
for bb in b.bases:
if ab == bb:
return True
return False
def has_bases_in_common_with_equiv(unit, other):
if has_bases_in_common(unit, other):
return True
for funit, tunit, a, b in equivalencies:
if tunit is not None:
if unit._is_equivalent(funit):
if has_bases_in_common(tunit.decompose(), other):
return True
elif unit._is_equivalent(tunit):
if has_bases_in_common(funit.decompose(), other):
return True
else:
if unit._is_equivalent(funit):
if has_bases_in_common(dimensionless_unscaled, other):
return True
return False
def filter_units(units):
filtered_namespace = set()
for tunit in units:
if (
isinstance(tunit, UnitBase)
and (include_prefix_units or not isinstance(tunit, PrefixUnit))
and has_bases_in_common_with_equiv(decomposed, tunit.decompose())
):
filtered_namespace.add(tunit)
return filtered_namespace
decomposed = self.decompose()
if units is None:
units = filter_units(self._get_units_with_same_physical_type(equivalencies))
if len(units) == 0:
units = get_current_unit_registry().non_prefix_units
elif isinstance(units, dict):
units = set(filter_units(units.values()))
elif inspect.ismodule(units):
units = filter_units(vars(units).values())
else:
units = filter_units(_flatten_units_collection(units))
def sort_results(results):
if not len(results):
return []
# Sort the results so the simplest ones appear first.
# Simplest is defined as "the minimum sum of absolute
# powers" (i.e. the fewest bases), and preference should
# be given to results where the sum of powers is positive
# and the scale is exactly equal to 1.0
results = list(results)
results.sort(key=lambda x: np.abs(x.scale))
results.sort(key=lambda x: np.sum(np.abs(x.powers)))
results.sort(key=lambda x: np.sum(x.powers) < 0.0)
results.sort(key=lambda x: not is_effectively_unity(x.scale))
last_result = results[0]
filtered = [last_result]
for result in results[1:]:
if str(result) != str(last_result):
filtered.append(result)
last_result = result
return filtered
return sort_results(
self._compose(
equivalencies=equivalencies,
namespace=units,
max_depth=max_depth,
depth=0,
cached_results={},
)
)
def to_system(self, system):
"""
Converts this unit into ones belonging to the given system.
Since more than one result may be possible, a list is always
returned.
Parameters
----------
system : module
The module that defines the unit system. Commonly used
ones include `astropy.units.si` and `astropy.units.cgs`.
To use your own module it must contain unit objects and a
sequence member named ``bases`` containing the base units of
the system.
Returns
-------
units : list of `CompositeUnit`
The list is ranked so that units containing only the base
units of that system will appear first.
"""
bases = set(system.bases)
def score(compose):
# In case that compose._bases has no elements we return
# 'np.inf' as 'score value'. It does not really matter which
# number we would return. This case occurs for instance for
# dimensionless quantities:
compose_bases = compose.bases
if len(compose_bases) == 0:
return np.inf
else:
sum = 0
for base in compose_bases:
if base in bases:
sum += 1
return sum / float(len(compose_bases))
x = self.decompose(bases=bases)
composed = x.compose(units=system)
composed = sorted(composed, key=score, reverse=True)
return composed
@lazyproperty
def si(self):
"""
Returns a copy of the current `Unit` instance in SI units.
"""
from . import si
return self.to_system(si)[0]
@lazyproperty
def cgs(self):
"""
Returns a copy of the current `Unit` instance with CGS units.
"""
from . import cgs
return self.to_system(cgs)[0]
@property
def physical_type(self):
"""
Physical type(s) dimensionally compatible with the unit.
Returns
-------
`~astropy.units.physical.PhysicalType`
A representation of the physical type(s) of a unit.
Examples
--------
>>> from astropy import units as u
>>> u.m.physical_type
PhysicalType('length')
>>> (u.m ** 2 / u.s).physical_type
PhysicalType({'diffusivity', 'kinematic viscosity'})
Physical types can be compared to other physical types
(recommended in packages) or to strings.
>>> area = (u.m ** 2).physical_type
>>> area == u.m.physical_type ** 2
True
>>> area == "area"
True
`~astropy.units.physical.PhysicalType` objects can be used for
dimensional analysis.
>>> number_density = u.m.physical_type ** -3
>>> velocity = (u.m / u.s).physical_type
>>> number_density * velocity
PhysicalType('particle flux')
"""
from . import physical
return physical.get_physical_type(self)
def _get_units_with_same_physical_type(self, equivalencies=[]):
"""
Return a list of registered units with the same physical type
as this unit.
This function is used by Quantity to add its built-in
conversions to equivalent units.
This is a private method, since end users should be encouraged
to use the more powerful `compose` and `find_equivalent_units`
methods (which use this under the hood).
Parameters
----------
equivalencies : list of tuple
A list of equivalence pairs to also pull options from.
See :ref:`astropy:unit_equivalencies`. It must already be
normalized using `_normalize_equivalencies`.
"""
unit_registry = get_current_unit_registry()
units = set(unit_registry.get_units_with_physical_type(self))
for funit, tunit, a, b in equivalencies:
if tunit is not None:
if self.is_equivalent(funit) and tunit not in units:
units.update(unit_registry.get_units_with_physical_type(tunit))
if self._is_equivalent(tunit) and funit not in units:
units.update(unit_registry.get_units_with_physical_type(funit))
else:
if self.is_equivalent(funit):
units.add(dimensionless_unscaled)
return units
class EquivalentUnitsList(list):
"""
A class to handle pretty-printing the result of
`find_equivalent_units`.
"""
HEADING_NAMES = ("Primary name", "Unit definition", "Aliases")
ROW_LEN = 3 # len(HEADING_NAMES), but hard-code since it is constant
NO_EQUIV_UNITS_MSG = "There are no equivalent units"
def __repr__(self):
if len(self) == 0:
return self.NO_EQUIV_UNITS_MSG
else:
lines = self._process_equivalent_units(self)
lines.insert(0, self.HEADING_NAMES)
widths = [0] * self.ROW_LEN
for line in lines:
for i, col in enumerate(line):
widths[i] = max(widths[i], len(col))
f = " {{0:<{}s}} | {{1:<{}s}} | {{2:<{}s}}".format(*widths)
lines = [f.format(*line) for line in lines]
lines = lines[0:1] + ["["] + [f"{x} ," for x in lines[1:]] + ["]"]
return "\n".join(lines)
def _repr_html_(self):
"""
Outputs a HTML table representation within Jupyter notebooks.
"""
if len(self) == 0:
return f"<p>{self.NO_EQUIV_UNITS_MSG}</p>"
else:
# HTML tags to use to compose the table in HTML
blank_table = '<table style="width:50%">{}</table>'
blank_row_container = "<tr>{}</tr>"
heading_row_content = "<th>{}</th>" * self.ROW_LEN
data_row_content = "<td>{}</td>" * self.ROW_LEN
# The HTML will be rendered & the table is simple, so don't
# bother to include newlines & indentation for the HTML code.
heading_row = blank_row_container.format(
heading_row_content.format(*self.HEADING_NAMES)
)
data_rows = self._process_equivalent_units(self)
all_rows = heading_row
for row in data_rows:
html_row = blank_row_container.format(data_row_content.format(*row))
all_rows += html_row
return blank_table.format(all_rows)
@staticmethod
def _process_equivalent_units(equiv_units_data):
"""
Extract attributes, and sort, the equivalent units pre-formatting.
"""
processed_equiv_units = []
for u in equiv_units_data:
irred = u.decompose().to_string()
if irred == u.name:
irred = "irreducible"
processed_equiv_units.append((u.name, irred, ", ".join(u.aliases)))
processed_equiv_units.sort()
return processed_equiv_units
def find_equivalent_units(
self, equivalencies=[], units=None, include_prefix_units=False
):
"""
Return a list of all the units that are the same type as ``self``.
Parameters
----------
equivalencies : list of tuple
A list of equivalence pairs to also list. See
:ref:`astropy:unit_equivalencies`.
Any list given, including an empty one, supersedes global defaults
that may be in effect (as set by `set_enabled_equivalencies`)
units : set of `~astropy.units.Unit`, optional
If not provided, all defined units will be searched for
equivalencies. Otherwise, may be a dict, module or
sequence containing the units to search for equivalencies.
include_prefix_units : bool, optional
When `True`, include prefixed units in the result.
Default is `False`.
Returns
-------
units : list of `UnitBase`
A list of unit objects that match ``u``. A subclass of
`list` (``EquivalentUnitsList``) is returned that
pretty-prints the list of units when output.
"""
results = self.compose(
equivalencies=equivalencies,
units=units,
max_depth=1,
include_prefix_units=include_prefix_units,
)
results = {x.bases[0] for x in results if len(x.bases) == 1}
return self.EquivalentUnitsList(results)
def is_unity(self):
"""
Returns `True` if the unit is unscaled and dimensionless.
"""
return False
class NamedUnit(UnitBase):
"""
The base class of units that have a name.
Parameters
----------
st : str, list of str, 2-tuple
The name of the unit. If a list of strings, the first element
is the canonical (short) name, and the rest of the elements
are aliases. If a tuple of lists, the first element is a list
of short names, and the second element is a list of long
names; all but the first short name are considered "aliases".
Each name *should* be a valid Python identifier to make it
easy to access, but this is not required.
namespace : dict, optional
When provided, inject the unit, and all of its aliases, in the
given namespace dictionary. If a unit by the same name is
already in the namespace, a ValueError is raised.
doc : str, optional
A docstring describing the unit.
format : dict, optional
A mapping to format-specific representations of this unit.
For example, for the ``Ohm`` unit, it might be nice to have it
displayed as ``\\Omega`` by the ``latex`` formatter. In that
case, `format` argument should be set to::
{'latex': r'\\Omega'}
Raises
------
ValueError
If any of the given unit names are already in the registry.
ValueError
If any of the given unit names are not valid Python tokens.
"""
def __init__(self, st, doc=None, format=None, namespace=None):
UnitBase.__init__(self)
if isinstance(st, (bytes, str)):
self._names = [st]
self._short_names = [st]
self._long_names = []
elif isinstance(st, tuple):
if not len(st) == 2:
raise ValueError("st must be string, list or 2-tuple")
self._names = st[0] + [n for n in st[1] if n not in st[0]]
if not len(self._names):
raise ValueError("must provide at least one name")
self._short_names = st[0][:]
self._long_names = st[1][:]
else:
if len(st) == 0:
raise ValueError("st list must have at least one entry")
self._names = st[:]
self._short_names = [st[0]]
self._long_names = st[1:]
if format is None:
format = {}
self._format = format
if doc is None:
doc = self._generate_doc()
else:
doc = textwrap.dedent(doc)
doc = textwrap.fill(doc)
self.__doc__ = doc
self._inject(namespace)
def _generate_doc(self):
"""
Generate a docstring for the unit if the user didn't supply
one. This is only used from the constructor and may be
overridden in subclasses.
"""
names = self.names
if len(self.names) > 1:
return f"{names[1]} ({names[0]})"
else:
return names[0]
def get_format_name(self, format):
"""
Get a name for this unit that is specific to a particular
format.
Uses the dictionary passed into the `format` kwarg in the
constructor.
Parameters
----------
format : str
The name of the format
Returns
-------
name : str
The name of the unit for the given format.
"""
return self._format.get(format, self.name)
@property
def names(self):
"""
Returns all of the names associated with this unit.
"""
return self._names
@property
def name(self):
"""
Returns the canonical (short) name associated with this unit.
"""
return self._names[0]
@property
def aliases(self):
"""
Returns the alias (long) names for this unit.
"""
return self._names[1:]
@property
def short_names(self):
"""
Returns all of the short names associated with this unit.
"""
return self._short_names
@property
def long_names(self):
"""
Returns all of the long names associated with this unit.
"""
return self._long_names
def _inject(self, namespace=None):
"""
Injects the unit, and all of its aliases, in the given
namespace dictionary.
"""
if namespace is None:
return
# Loop through all of the names first, to ensure all of them
# are new, then add them all as a single "transaction" below.
for name in self._names:
if name in namespace and self != namespace[name]:
raise ValueError(
f"Object with name {name!r} already exists in "
f"given namespace ({namespace[name]!r})."
)
for name in self._names:
namespace[name] = self
def _recreate_irreducible_unit(cls, names, registered):
"""
This is used to reconstruct units when passed around by
multiprocessing.
"""
registry = get_current_unit_registry().registry
if names[0] in registry:
# If in local registry return that object.
return registry[names[0]]
else:
# otherwise, recreate the unit.
unit = cls(names)
if registered:
# If not in local registry but registered in origin registry,
# enable unit in local registry.
get_current_unit_registry().add_enabled_units([unit])
return unit
class IrreducibleUnit(NamedUnit):
"""
Irreducible units are the units that all other units are defined
in terms of.
Examples are meters, seconds, kilograms, amperes, etc. There is
only once instance of such a unit per type.
"""
def __reduce__(self):
# When IrreducibleUnit objects are passed to other processes
# over multiprocessing, they need to be recreated to be the
# ones already in the subprocesses' namespace, not new
# objects, or they will be considered "unconvertible".
# Therefore, we have a custom pickler/unpickler that
# understands how to recreate the Unit on the other side.
registry = get_current_unit_registry().registry
return (
_recreate_irreducible_unit,
(self.__class__, list(self.names), self.name in registry),
self.__getstate__(),
)
@property
def represents(self):
"""The unit that this named unit represents.
For an irreducible unit, that is always itself.
"""
return self
def decompose(self, bases=set()):
if len(bases) and self not in bases:
for base in bases:
try:
scale = self._to(base)
except UnitsError:
pass
else:
if is_effectively_unity(scale):
return base
else:
return CompositeUnit(scale, [base], [1], _error_check=False)
raise UnitConversionError(
f"Unit {self} can not be decomposed into the requested bases"
)
return self
class UnrecognizedUnit(IrreducibleUnit):
"""
A unit that did not parse correctly. This allows for
round-tripping it as a string, but no unit operations actually work
on it.
Parameters
----------
st : str
The name of the unit.
"""
# For UnrecognizedUnits, we want to use "standard" Python
# pickling, not the special case that is used for
# IrreducibleUnits.
__reduce__ = object.__reduce__
def __repr__(self):
return f"UnrecognizedUnit({str(self)})"
def __bytes__(self):
return self.name.encode("ascii", "replace")
def __str__(self):
return self.name
def to_string(self, format=None):
return self.name
def _unrecognized_operator(self, *args, **kwargs):
raise ValueError(
f"The unit {self.name!r} is unrecognized, so all arithmetic operations "
"with it are invalid."
)
__pow__ = __truediv__ = __rtruediv__ = __mul__ = __rmul__ = _unrecognized_operator
__lt__ = __gt__ = __le__ = __ge__ = __neg__ = _unrecognized_operator
def __eq__(self, other):
try:
other = Unit(other, parse_strict="silent")
except (ValueError, UnitsError, TypeError):
return NotImplemented
return isinstance(other, type(self)) and self.name == other.name
def __ne__(self, other):
return not (self == other)
def is_equivalent(self, other, equivalencies=None):
self._normalize_equivalencies(equivalencies)
return self == other
def _get_converter(self, other, equivalencies=None):
self._normalize_equivalencies(equivalencies)
raise ValueError(
f"The unit {self.name!r} is unrecognized. It can not be converted "
"to other units."
)
def get_format_name(self, format):
return self.name
def is_unity(self):
return False
class _UnitMetaClass(type):
"""
This metaclass exists because the Unit constructor should
sometimes return instances that already exist. This "overrides"
the constructor before the new instance is actually created, so we
can return an existing one.
"""
def __call__(
self,
s="",
represents=None,
format=None,
namespace=None,
doc=None,
parse_strict="raise",
):
# Short-circuit if we're already a unit
if hasattr(s, "_get_physical_type_id"):
return s
# turn possible Quantity input for s or represents into a Unit
from .quantity import Quantity
if isinstance(represents, Quantity):
if is_effectively_unity(represents.value):
represents = represents.unit
else:
represents = CompositeUnit(
represents.value * represents.unit.scale,
bases=represents.unit.bases,
powers=represents.unit.powers,
_error_check=False,
)
if isinstance(s, Quantity):
if is_effectively_unity(s.value):
s = s.unit
else:
s = CompositeUnit(
s.value * s.unit.scale,
bases=s.unit.bases,
powers=s.unit.powers,
_error_check=False,
)
# now decide what we really need to do; define derived Unit?
if isinstance(represents, UnitBase):
# This has the effect of calling the real __new__ and
# __init__ on the Unit class.
return super().__call__(
s, represents, format=format, namespace=namespace, doc=doc
)
# or interpret a Quantity (now became unit), string or number?
if isinstance(s, UnitBase):
return s
elif isinstance(s, (bytes, str)):
if len(s.strip()) == 0:
# Return the NULL unit
return dimensionless_unscaled
if format is None:
format = unit_format.Generic
f = unit_format.get_format(format)
if isinstance(s, bytes):
s = s.decode("ascii")
try:
return f.parse(s)
except NotImplementedError:
raise
except Exception as e:
if parse_strict == "silent":
pass
else:
# Deliberately not issubclass here. Subclasses
# should use their name.
if f is not unit_format.Generic:
format_clause = f.name + " "
else:
format_clause = ""
msg = (
f"'{s}' did not parse as {format_clause}unit: {str(e)} "
"If this is meant to be a custom unit, "
"define it with 'u.def_unit'. To have it "
"recognized inside a file reader or other code, "
"enable it with 'u.add_enabled_units'. "
"For details, see "
"https://docs.astropy.org/en/latest/units/combining_and_defining.html"
)
if parse_strict == "raise":
raise ValueError(msg)
elif parse_strict == "warn":
warnings.warn(msg, UnitsWarning)
else:
raise ValueError(
"'parse_strict' must be 'warn', 'raise' or 'silent'"
)
return UnrecognizedUnit(s)
elif isinstance(s, (int, float, np.floating, np.integer)):
return CompositeUnit(s, [], [], _error_check=False)
elif isinstance(s, tuple):
from .structured import StructuredUnit
return StructuredUnit(s)
elif s is None:
raise TypeError("None is not a valid Unit")
else:
raise TypeError(f"{s} can not be converted to a Unit")
class Unit(NamedUnit, metaclass=_UnitMetaClass):
"""
The main unit class.
There are a number of different ways to construct a Unit, but
always returns a `UnitBase` instance. If the arguments refer to
an already-existing unit, that existing unit instance is returned,
rather than a new one.
- From a string::
Unit(s, format=None, parse_strict='silent')
Construct from a string representing a (possibly compound) unit.
The optional `format` keyword argument specifies the format the
string is in, by default ``"generic"``. For a description of
the available formats, see `astropy.units.format`.
The optional ``parse_strict`` keyword controls what happens when an
unrecognized unit string is passed in. It may be one of the following:
- ``'raise'``: (default) raise a ValueError exception.
- ``'warn'``: emit a Warning, and return an
`UnrecognizedUnit` instance.
- ``'silent'``: return an `UnrecognizedUnit` instance.
- From a number::
Unit(number)
Creates a dimensionless unit.
- From a `UnitBase` instance::
Unit(unit)
Returns the given unit unchanged.
- From no arguments::
Unit()
Returns the dimensionless unit.
- The last form, which creates a new `Unit` is described in detail
below.
See also: https://docs.astropy.org/en/stable/units/
Parameters
----------
st : str or list of str
The name of the unit. If a list, the first element is the
canonical (short) name, and the rest of the elements are
aliases.
represents : UnitBase instance
The unit that this named unit represents.
doc : str, optional
A docstring describing the unit.
format : dict, optional
A mapping to format-specific representations of this unit.
For example, for the ``Ohm`` unit, it might be nice to have it
displayed as ``\\Omega`` by the ``latex`` formatter. In that
case, `format` argument should be set to::
{'latex': r'\\Omega'}
namespace : dict, optional
When provided, inject the unit (and all of its aliases) into
the given namespace.
Raises
------
ValueError
If any of the given unit names are already in the registry.
ValueError
If any of the given unit names are not valid Python tokens.
"""
def __init__(self, st, represents=None, doc=None, format=None, namespace=None):
represents = Unit(represents)
self._represents = represents
NamedUnit.__init__(self, st, namespace=namespace, doc=doc, format=format)
@property
def represents(self):
"""The unit that this named unit represents."""
return self._represents
def decompose(self, bases=set()):
return self._represents.decompose(bases=bases)
def is_unity(self):
return self._represents.is_unity()
def __hash__(self):
if self._hash is None:
self._hash = hash((self.name, self._represents))
return self._hash
@classmethod
def _from_physical_type_id(cls, physical_type_id):
# get string bases and powers from the ID tuple
bases = [cls(base) for base, _ in physical_type_id]
powers = [power for _, power in physical_type_id]
if len(physical_type_id) == 1 and powers[0] == 1:
unit = bases[0]
else:
unit = CompositeUnit(1, bases, powers, _error_check=False)
return unit
class PrefixUnit(Unit):
"""
A unit that is simply a SI-prefixed version of another unit.
For example, ``mm`` is a `PrefixUnit` of ``.001 * m``.
The constructor is the same as for `Unit`.
"""
class CompositeUnit(UnitBase):
"""
Create a composite unit using expressions of previously defined
units.
Direct use of this class is not recommended. Instead use the
factory function `Unit` and arithmetic operators to compose
units.
Parameters
----------
scale : number
A scaling factor for the unit.
bases : sequence of `UnitBase`
A sequence of units this unit is composed of.
powers : sequence of numbers
A sequence of powers (in parallel with ``bases``) for each
of the base units.
"""
_decomposed_cache = None
def __init__(
self,
scale,
bases,
powers,
decompose=False,
decompose_bases=set(),
_error_check=True,
):
# There are many cases internal to astropy.units where we
# already know that all the bases are Unit objects, and the
# powers have been validated. In those cases, we can skip the
# error checking for performance reasons. When the private
# kwarg `_error_check` is False, the error checking is turned
# off.
if _error_check:
for base in bases:
if not isinstance(base, UnitBase):
raise TypeError("bases must be sequence of UnitBase instances")
powers = [validate_power(p) for p in powers]
if not decompose and len(bases) == 1 and powers[0] >= 0:
# Short-cut; with one unit there's nothing to expand and gather,
# as that has happened already when creating the unit. But do only
# positive powers, since for negative powers we need to re-sort.
unit = bases[0]
power = powers[0]
if power == 1:
scale *= unit.scale
self._bases = unit.bases
self._powers = unit.powers
elif power == 0:
self._bases = []
self._powers = []
else:
scale *= unit.scale**power
self._bases = unit.bases
self._powers = [
operator.mul(*resolve_fractions(p, power)) for p in unit.powers
]
self._scale = sanitize_scale(scale)
else:
# Regular case: use inputs as preliminary scale, bases, and powers,
# then "expand and gather" identical bases, sanitize the scale, &c.
self._scale = scale
self._bases = bases
self._powers = powers
self._expand_and_gather(decompose=decompose, bases=decompose_bases)
def __repr__(self):
if len(self._bases):
return super().__repr__()
else:
if self._scale != 1.0:
return f"Unit(dimensionless with a scale of {self._scale})"
else:
return "Unit(dimensionless)"
@property
def scale(self):
"""
Return the scale of the composite unit.
"""
return self._scale
@property
def bases(self):
"""
Return the bases of the composite unit.
"""
return self._bases
@property
def powers(self):
"""
Return the powers of the composite unit.
"""
return self._powers
def _expand_and_gather(self, decompose=False, bases=set()):
def add_unit(unit, power, scale):
if bases and unit not in bases:
for base in bases:
try:
scale *= unit._to(base) ** power
except UnitsError:
pass
else:
unit = base
break
if unit in new_parts:
a, b = resolve_fractions(new_parts[unit], power)
new_parts[unit] = a + b
else:
new_parts[unit] = power
return scale
new_parts = {}
scale = self._scale
for b, p in zip(self._bases, self._powers):
if decompose and b not in bases:
b = b.decompose(bases=bases)
if isinstance(b, CompositeUnit):
scale *= b._scale**p
for b_sub, p_sub in zip(b._bases, b._powers):
a, b = resolve_fractions(p_sub, p)
scale = add_unit(b_sub, a * b, scale)
else:
scale = add_unit(b, p, scale)
new_parts = [x for x in new_parts.items() if x[1] != 0]
new_parts.sort(key=lambda x: (-x[1], getattr(x[0], "name", "")))
self._bases = [x[0] for x in new_parts]
self._powers = [x[1] for x in new_parts]
self._scale = sanitize_scale(scale)
def __copy__(self):
"""
For compatibility with python copy module.
"""
return CompositeUnit(self._scale, self._bases[:], self._powers[:])
def decompose(self, bases=set()):
if len(bases) == 0 and self._decomposed_cache is not None:
return self._decomposed_cache
for base in self.bases:
if not isinstance(base, IrreducibleUnit) or (
len(bases) and base not in bases
):
break
else:
if len(bases) == 0:
self._decomposed_cache = self
return self
x = CompositeUnit(
self.scale, self.bases, self.powers, decompose=True, decompose_bases=bases
)
if len(bases) == 0:
self._decomposed_cache = x
return x
def is_unity(self):
unit = self.decompose()
return len(unit.bases) == 0 and unit.scale == 1.0
si_prefixes = [
(["Q"], ["quetta"], 1e30),
(["R"], ["ronna"], 1e27),
(["Y"], ["yotta"], 1e24),
(["Z"], ["zetta"], 1e21),
(["E"], ["exa"], 1e18),
(["P"], ["peta"], 1e15),
(["T"], ["tera"], 1e12),
(["G"], ["giga"], 1e9),
(["M"], ["mega"], 1e6),
(["k"], ["kilo"], 1e3),
(["h"], ["hecto"], 1e2),
(["da"], ["deka", "deca"], 1e1),
(["d"], ["deci"], 1e-1),
(["c"], ["centi"], 1e-2),
(["m"], ["milli"], 1e-3),
(["u"], ["micro"], 1e-6),
(["n"], ["nano"], 1e-9),
(["p"], ["pico"], 1e-12),
(["f"], ["femto"], 1e-15),
(["a"], ["atto"], 1e-18),
(["z"], ["zepto"], 1e-21),
(["y"], ["yocto"], 1e-24),
(["r"], ["ronto"], 1e-27),
(["q"], ["quecto"], 1e-30),
]
binary_prefixes = [
(["Ki"], ["kibi"], 2**10),
(["Mi"], ["mebi"], 2**20),
(["Gi"], ["gibi"], 2**30),
(["Ti"], ["tebi"], 2**40),
(["Pi"], ["pebi"], 2**50),
(["Ei"], ["exbi"], 2**60),
]
def _add_prefixes(u, excludes=[], namespace=None, prefixes=False):
"""
Set up all of the standard metric prefixes for a unit. This
function should not be used directly, but instead use the
`prefixes` kwarg on `def_unit`.
Parameters
----------
excludes : list of str, optional
Any prefixes to exclude from creation to avoid namespace
collisions.
namespace : dict, optional
When provided, inject the unit (and all of its aliases) into
the given namespace dictionary.
prefixes : list, optional
When provided, it is a list of prefix definitions of the form:
(short_names, long_tables, factor)
"""
if prefixes is True:
prefixes = si_prefixes
elif prefixes is False:
prefixes = []
for short, full, factor in prefixes:
names = []
format = {}
for prefix in short:
if prefix in excludes:
continue
for alias in u.short_names:
names.append(prefix + alias)
# This is a hack to use Greek mu as a prefix
# for some formatters.
if prefix == "u":
format["latex"] = r"\mu " + u.get_format_name("latex")
format["unicode"] = "\N{MICRO SIGN}" + u.get_format_name("unicode")
for key, val in u._format.items():
format.setdefault(key, prefix + val)
for prefix in full:
if prefix in excludes:
continue
for alias in u.long_names:
names.append(prefix + alias)
if len(names):
PrefixUnit(
names,
CompositeUnit(factor, [u], [1], _error_check=False),
namespace=namespace,
format=format,
)
def def_unit(
s,
represents=None,
doc=None,
format=None,
prefixes=False,
exclude_prefixes=[],
namespace=None,
):
"""
Factory function for defining new units.
Parameters
----------
s : str or list of str
The name of the unit. If a list, the first element is the
canonical (short) name, and the rest of the elements are
aliases.
represents : UnitBase instance, optional
The unit that this named unit represents. If not provided,
a new `IrreducibleUnit` is created.
doc : str, optional
A docstring describing the unit.
format : dict, optional
A mapping to format-specific representations of this unit.
For example, for the ``Ohm`` unit, it might be nice to
have it displayed as ``\\Omega`` by the ``latex``
formatter. In that case, `format` argument should be set
to::
{'latex': r'\\Omega'}
prefixes : bool or list, optional
When `True`, generate all of the SI prefixed versions of the
unit as well. For example, for a given unit ``m``, will
generate ``mm``, ``cm``, ``km``, etc. When a list, it is a list of
prefix definitions of the form:
(short_names, long_tables, factor)
Default is `False`. This function always returns the base
unit object, even if multiple scaled versions of the unit were
created.
exclude_prefixes : list of str, optional
If any of the SI prefixes need to be excluded, they may be
listed here. For example, ``Pa`` can be interpreted either as
"petaannum" or "Pascal". Therefore, when defining the
prefixes for ``a``, ``exclude_prefixes`` should be set to
``["P"]``.
namespace : dict, optional
When provided, inject the unit (and all of its aliases and
prefixes), into the given namespace dictionary.
Returns
-------
unit : `~astropy.units.UnitBase`
The newly-defined unit, or a matching unit that was already
defined.
"""
if represents is not None:
result = Unit(s, represents, namespace=namespace, doc=doc, format=format)
else:
result = IrreducibleUnit(s, namespace=namespace, doc=doc, format=format)
if prefixes:
_add_prefixes(
result, excludes=exclude_prefixes, namespace=namespace, prefixes=prefixes
)
return result
def _condition_arg(value):
"""
Validate value is acceptable for conversion purposes.
Will convert into an array if not a scalar, and can be converted
into an array
Parameters
----------
value : int or float value, or sequence of such values
Returns
-------
Scalar value or numpy array
Raises
------
ValueError
If value is not as expected
"""
if isinstance(value, (np.ndarray, float, int, complex, np.void)):
return value
avalue = np.array(value)
if avalue.dtype.kind not in ["i", "f", "c"]:
raise ValueError(
"Value not scalar compatible or convertible to "
"an int, float, or complex array"
)
return avalue
def unit_scale_converter(val):
"""Function that just multiplies the value by unity.
This is a separate function so it can be recognized and
discarded in unit conversion.
"""
return 1.0 * _condition_arg(val)
dimensionless_unscaled = CompositeUnit(1, [], [], _error_check=False)
# Abbreviation of the above, see #1980
one = dimensionless_unscaled
# Maintain error in old location for backward compatibility
# TODO: Is this still needed? Should there be a deprecation warning?
unit_format.fits.UnitScaleError = UnitScaleError
|
0dacb06cee61c5e804999a9832183f407b06332659840ed0e56b9efaa753d244 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module defines the `Quantity` object, which represents a number with some
associated units. `Quantity` objects support operations like ordinary numbers,
but will deal with unit conversions internally.
"""
# STDLIB
import numbers
import operator
import re
import warnings
from fractions import Fraction
# THIRD PARTY
import numpy as np
# LOCAL
from astropy import config as _config
from astropy.utils.compat import NUMPY_LT_1_22
from astropy.utils.data_info import ParentDtypeInfo
from astropy.utils.decorators import deprecated
from astropy.utils.exceptions import AstropyWarning
from astropy.utils.misc import isiterable
from .core import (
Unit,
UnitBase,
UnitConversionError,
UnitsError,
UnitTypeError,
dimensionless_unscaled,
get_current_unit_registry,
)
from .format import Base, Latex
from .quantity_helper import can_have_arbitrary_unit, check_output, converters_and_unit
from .quantity_helper.function_helpers import (
DISPATCHED_FUNCTIONS,
FUNCTION_HELPERS,
SUBCLASS_SAFE_FUNCTIONS,
UNSUPPORTED_FUNCTIONS,
)
from .structured import StructuredUnit, _structured_unit_like_dtype
from .utils import is_effectively_unity
__all__ = [
"Quantity",
"SpecificTypeQuantity",
"QuantityInfoBase",
"QuantityInfo",
"allclose",
"isclose",
]
# We don't want to run doctests in the docstrings we inherit from Numpy
__doctest_skip__ = ["Quantity.*"]
_UNIT_NOT_INITIALISED = "(Unit not initialised)"
_UFUNCS_FILTER_WARNINGS = {np.arcsin, np.arccos, np.arccosh, np.arctanh}
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for Quantity.
"""
latex_array_threshold = _config.ConfigItem(
100,
"The maximum size an array Quantity can be before its LaTeX "
'representation for IPython gets "summarized" (meaning only the first '
'and last few elements are shown with "..." between). Setting this to a '
"negative number means that the value will instead be whatever numpy "
"gets from get_printoptions.",
)
conf = Conf()
class QuantityIterator:
"""
Flat iterator object to iterate over Quantities.
A `QuantityIterator` iterator is returned by ``q.flat`` for any Quantity
``q``. It allows iterating over the array as if it were a 1-D array,
either in a for-loop or by calling its `next` method.
Iteration is done in C-contiguous style, with the last index varying the
fastest. The iterator can also be indexed using basic slicing or
advanced indexing.
See Also
--------
Quantity.flatten : Returns a flattened copy of an array.
Notes
-----
`QuantityIterator` is inspired by `~numpy.ma.core.MaskedIterator`. It
is not exported by the `~astropy.units` module. Instead of
instantiating a `QuantityIterator` directly, use `Quantity.flat`.
"""
def __init__(self, q):
self._quantity = q
self._dataiter = q.view(np.ndarray).flat
def __iter__(self):
return self
def __getitem__(self, indx):
out = self._dataiter.__getitem__(indx)
# For single elements, ndarray.flat.__getitem__ returns scalars; these
# need a new view as a Quantity.
if isinstance(out, type(self._quantity)):
return out
else:
return self._quantity._new_view(out)
def __setitem__(self, index, value):
self._dataiter[index] = self._quantity._to_own_unit(value)
def __next__(self):
"""
Return the next value, or raise StopIteration.
"""
out = next(self._dataiter)
# ndarray.flat._dataiter returns scalars, so need a view as a Quantity.
return self._quantity._new_view(out)
next = __next__
def __len__(self):
return len(self._dataiter)
#### properties and methods to match `numpy.ndarray.flatiter` ####
@property
def base(self):
"""A reference to the array that is iterated over."""
return self._quantity
@property
def coords(self):
"""An N-dimensional tuple of current coordinates."""
return self._dataiter.coords
@property
def index(self):
"""Current flat index into the array."""
return self._dataiter.index
def copy(self):
"""Get a copy of the iterator as a 1-D array."""
return self._quantity.flatten()
class QuantityInfoBase(ParentDtypeInfo):
# This is on a base class rather than QuantityInfo directly, so that
# it can be used for EarthLocationInfo yet make clear that that class
# should not be considered a typical Quantity subclass by Table.
attrs_from_parent = {"dtype", "unit"} # dtype and unit taken from parent
_supports_indexing = True
@staticmethod
def default_format(val):
return f"{val.value}"
@staticmethod
def possible_string_format_functions(format_):
"""Iterate through possible string-derived format functions.
A string can either be a format specifier for the format built-in,
a new-style format string, or an old-style format string.
This method is overridden in order to suppress printing the unit
in each row since it is already at the top in the column header.
"""
yield lambda format_, val: format(val.value, format_)
yield lambda format_, val: format_.format(val.value)
yield lambda format_, val: format_ % val.value
class QuantityInfo(QuantityInfoBase):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
"""
_represent_as_dict_attrs = ("value", "unit")
_construct_from_dict_args = ["value"]
_represent_as_dict_primary_data = "value"
def new_like(self, cols, length, metadata_conflicts="warn", name=None):
"""
Return a new Quantity instance which is consistent with the
input ``cols`` and has ``length`` rows.
This is intended for creating an empty column object whose elements can
be set in-place for table operations like join or vstack.
Parameters
----------
cols : list
List of input columns
length : int
Length of the output column object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output column name
Returns
-------
col : `~astropy.units.Quantity` (or subclass)
Empty instance of this class consistent with ``cols``
"""
# Get merged info attributes like shape, dtype, format, description, etc.
attrs = self.merge_cols_attributes(
cols, metadata_conflicts, name, ("meta", "format", "description")
)
# Make an empty quantity using the unit of the last one.
shape = (length,) + attrs.pop("shape")
dtype = attrs.pop("dtype")
# Use zeros so we do not get problems for Quantity subclasses such
# as Longitude and Latitude, which cannot take arbitrary values.
data = np.zeros(shape=shape, dtype=dtype)
# Get arguments needed to reconstruct class
map = {
key: (data if key == "value" else getattr(cols[-1], key))
for key in self._represent_as_dict_attrs
}
map["copy"] = False
out = self._construct_from_dict(map)
# Set remaining info attributes
for attr, value in attrs.items():
setattr(out.info, attr, value)
return out
def get_sortable_arrays(self):
"""
Return a list of arrays which can be lexically sorted to represent
the order of the parent column.
For Quantity this is just the quantity itself.
Returns
-------
arrays : list of ndarray
"""
return [self._parent]
class Quantity(np.ndarray):
"""A `~astropy.units.Quantity` represents a number with some associated unit.
See also: https://docs.astropy.org/en/stable/units/quantity.html
Parameters
----------
value : number, `~numpy.ndarray`, `~astropy.units.Quantity` (sequence), or str
The numerical value of this quantity in the units given by unit. If a
`Quantity` or sequence of them (or any other valid object with a
``unit`` attribute), creates a new `Quantity` object, converting to
`unit` units as needed. If a string, it is converted to a number or
`Quantity`, depending on whether a unit is present.
unit : unit-like
An object that represents the unit associated with the input value.
Must be an `~astropy.units.UnitBase` object or a string parseable by
the :mod:`~astropy.units` package.
dtype : ~numpy.dtype, optional
The dtype of the resulting Numpy array or scalar that will
hold the value. If not provided, it is determined from the input,
except that any integer and (non-Quantity) object inputs are converted
to float by default.
If `None`, the normal `numpy.dtype` introspection is used, e.g.
preventing upcasting of integers.
copy : bool, optional
If `True` (default), then the value is copied. Otherwise, a copy will
only be made if ``__array__`` returns a copy, if value is a nested
sequence, or if a copy is needed to satisfy an explicitly given
``dtype``. (The `False` option is intended mostly for internal use,
to speed up initialization where a copy is known to have been made.
Use with care.)
order : {'C', 'F', 'A'}, optional
Specify the order of the array. As in `~numpy.array`. This parameter
is ignored if the input is a `Quantity` and ``copy=False``.
subok : bool, optional
If `False` (default), the returned array will be forced to be a
`Quantity`. Otherwise, `Quantity` subclasses will be passed through,
or a subclass appropriate for the unit will be used (such as
`~astropy.units.Dex` for ``u.dex(u.AA)``).
ndmin : int, optional
Specifies the minimum number of dimensions that the resulting array
should have. Ones will be prepended to the shape as needed to meet
this requirement. This parameter is ignored if the input is a
`Quantity` and ``copy=False``.
Raises
------
TypeError
If the value provided is not a Python numeric type.
TypeError
If the unit provided is not either a :class:`~astropy.units.Unit`
object or a parseable string unit.
Notes
-----
Quantities can also be created by multiplying a number or array with a
:class:`~astropy.units.Unit`. See https://docs.astropy.org/en/latest/units/
Unless the ``dtype`` argument is explicitly specified, integer
or (non-Quantity) object inputs are converted to `float` by default.
"""
# Need to set a class-level default for _equivalencies, or
# Constants can not initialize properly
_equivalencies = []
# Default unit for initialization; can be overridden by subclasses,
# possibly to `None` to indicate there is no default unit.
_default_unit = dimensionless_unscaled
# Ensures views have an undefined unit.
_unit = None
__array_priority__ = 10000
def __class_getitem__(cls, unit_shape_dtype):
"""Quantity Type Hints.
Unit-aware type hints are ``Annotated`` objects that encode the class,
the unit, and possibly shape and dtype information, depending on the
python and :mod:`numpy` versions.
Schematically, ``Annotated[cls[shape, dtype], unit]``
As a classmethod, the type is the class, ie ``Quantity``
produces an ``Annotated[Quantity, ...]`` while a subclass
like :class:`~astropy.coordinates.Angle` returns
``Annotated[Angle, ...]``.
Parameters
----------
unit_shape_dtype : :class:`~astropy.units.UnitBase`, str, `~astropy.units.PhysicalType`, or tuple
Unit specification, can be the physical type (ie str or class).
If tuple, then the first element is the unit specification
and all other elements are for `numpy.ndarray` type annotations.
Whether they are included depends on the python and :mod:`numpy`
versions.
Returns
-------
`typing.Annotated`, `typing_extensions.Annotated`, `astropy.units.Unit`, or `astropy.units.PhysicalType`
Return type in this preference order:
* if python v3.9+ : `typing.Annotated`
* if :mod:`typing_extensions` is installed : `typing_extensions.Annotated`
* `astropy.units.Unit` or `astropy.units.PhysicalType`
Raises
------
TypeError
If the unit/physical_type annotation is not Unit-like or
PhysicalType-like.
Examples
--------
Create a unit-aware Quantity type annotation
>>> Quantity[Unit("s")]
Annotated[Quantity, Unit("s")]
See Also
--------
`~astropy.units.quantity_input`
Use annotations for unit checks on function arguments and results.
Notes
-----
With Python 3.9+ or :mod:`typing_extensions`, |Quantity| types are also
static-type compatible.
"""
# LOCAL
from ._typing import HAS_ANNOTATED, Annotated
# process whether [unit] or [unit, shape, ptype]
if isinstance(unit_shape_dtype, tuple): # unit, shape, dtype
target = unit_shape_dtype[0]
shape_dtype = unit_shape_dtype[1:]
else: # just unit
target = unit_shape_dtype
shape_dtype = ()
# Allowed unit/physical types. Errors if neither.
try:
unit = Unit(target)
except (TypeError, ValueError):
from astropy.units.physical import get_physical_type
try:
unit = get_physical_type(target)
except (TypeError, ValueError, KeyError): # KeyError for Enum
raise TypeError(
"unit annotation is not a Unit or PhysicalType"
) from None
# Allow to sort of work for python 3.8- / no typing_extensions
# instead of bailing out, return the unit for `quantity_input`
if not HAS_ANNOTATED:
warnings.warn(
"Quantity annotations are valid static type annotations only"
" if Python is v3.9+ or `typing_extensions` is installed."
)
return unit
# Quantity does not (yet) properly extend the NumPy generics types,
# introduced in numpy v1.22+, instead just including the unit info as
# metadata using Annotated.
# TODO: ensure we do interact with NDArray.__class_getitem__.
return Annotated.__class_getitem__((cls, unit))
def __new__(
cls,
value,
unit=None,
dtype=np.inexact,
copy=True,
order=None,
subok=False,
ndmin=0,
):
if unit is not None:
# convert unit first, to avoid multiple string->unit conversions
unit = Unit(unit)
# inexact -> upcast to float dtype
float_default = dtype is np.inexact
if float_default:
dtype = None
# optimize speed for Quantity with no dtype given, copy=False
if isinstance(value, Quantity):
if unit is not None and unit is not value.unit:
value = value.to(unit)
# the above already makes a copy (with float dtype)
copy = False
if type(value) is not cls and not (subok and isinstance(value, cls)):
value = value.view(cls)
if float_default and value.dtype.kind in "iu":
dtype = float
return np.array(
value, dtype=dtype, copy=copy, order=order, subok=True, ndmin=ndmin
)
# Maybe str, or list/tuple of Quantity? If so, this may set value_unit.
# To ensure array remains fast, we short-circuit it.
value_unit = None
if not isinstance(value, np.ndarray):
if isinstance(value, str):
# The first part of the regex string matches any integer/float;
# the second parts adds possible trailing .+-, which will break
# the float function below and ensure things like 1.2.3deg
# will not work.
pattern = (
r"\s*[+-]?"
r"((\d+\.?\d*)|(\.\d+)|([nN][aA][nN])|"
r"([iI][nN][fF]([iI][nN][iI][tT][yY]){0,1}))"
r"([eE][+-]?\d+)?"
r"[.+-]?"
)
v = re.match(pattern, value)
unit_string = None
try:
value = float(v.group())
except Exception:
raise TypeError(
f'Cannot parse "{value}" as a {cls.__name__}. It does not '
"start with a number."
)
unit_string = v.string[v.end() :].strip()
if unit_string:
value_unit = Unit(unit_string)
if unit is None:
unit = value_unit # signal no conversion needed below.
elif isiterable(value) and len(value) > 0:
# Iterables like lists and tuples.
if all(isinstance(v, Quantity) for v in value):
# If a list/tuple containing only quantities, convert all
# to the same unit.
if unit is None:
unit = value[0].unit
value = [q.to_value(unit) for q in value]
value_unit = unit # signal below that conversion has been done
elif (
dtype is None
and not hasattr(value, "dtype")
and isinstance(unit, StructuredUnit)
):
# Special case for list/tuple of values and a structured unit:
# ``np.array(value, dtype=None)`` would treat tuples as lower
# levels of the array, rather than as elements of a structured
# array, so we use the structure of the unit to help infer the
# structured dtype of the value.
dtype = unit._recursively_get_dtype(value)
using_default_unit = False
if value_unit is None:
# If the value has a `unit` attribute and if not None
# (for Columns with uninitialized unit), treat it like a quantity.
value_unit = getattr(value, "unit", None)
if value_unit is None:
# Default to dimensionless for no (initialized) unit attribute.
if unit is None:
using_default_unit = True
unit = cls._default_unit
value_unit = unit # signal below that no conversion is needed
else:
try:
value_unit = Unit(value_unit)
except Exception as exc:
raise TypeError(
f"The unit attribute {value.unit!r} of the input could "
"not be parsed as an astropy Unit."
) from exc
if unit is None:
unit = value_unit
elif unit is not value_unit:
copy = False # copy will be made in conversion at end
value = np.array(
value, dtype=dtype, copy=copy, order=order, subok=True, ndmin=ndmin
)
# For no-user-input unit, make sure the constructed unit matches the
# structure of the data.
if using_default_unit and value.dtype.names is not None:
unit = value_unit = _structured_unit_like_dtype(value_unit, value.dtype)
# check that array contains numbers or long int objects
if value.dtype.kind in "OSU" and not (
value.dtype.kind == "O" and isinstance(value.item(0), numbers.Number)
):
raise TypeError("The value must be a valid Python or Numpy numeric type.")
# by default, cast any integer, boolean, etc., to float
if float_default and value.dtype.kind in "iuO":
value = value.astype(float)
# if we allow subclasses, allow a class from the unit.
if subok:
qcls = getattr(unit, "_quantity_class", cls)
if issubclass(qcls, cls):
cls = qcls
value = value.view(cls)
value._set_unit(value_unit)
if unit is value_unit:
return value
else:
# here we had non-Quantity input that had a "unit" attribute
# with a unit different from the desired one. So, convert.
return value.to(unit)
def __array_finalize__(self, obj):
# Check whether super().__array_finalize should be called
# (sadly, ndarray.__array_finalize__ is None; we cannot be sure
# what is above us).
super_array_finalize = super().__array_finalize__
if super_array_finalize is not None:
super_array_finalize(obj)
# If we're a new object or viewing an ndarray, nothing has to be done.
if obj is None or obj.__class__ is np.ndarray:
return
# Copy over the unit and possibly info. Note that the only way the
# unit can already be set is if one enters via _new_view(), where the
# unit is often different from that of self, and where propagation of
# info is not always desirable.
if self._unit is None:
unit = getattr(obj, "_unit", None)
if unit is not None:
self._set_unit(unit)
# Copy info if the original had `info` defined. Because of the way the
# DataInfo works, `'info' in obj.__dict__` is False until the
# `info` attribute is accessed or set.
if "info" in obj.__dict__:
self.info = obj.info
def __array_wrap__(self, obj, context=None):
if context is None:
# Methods like .squeeze() created a new `ndarray` and then call
# __array_wrap__ to turn the array into self's subclass.
return self._new_view(obj)
raise NotImplementedError(
"__array_wrap__ should not be used with a context any more since all "
"use should go through array_function. Please raise an issue on "
"https://github.com/astropy/astropy"
)
def __array_ufunc__(self, function, method, *inputs, **kwargs):
"""Wrap numpy ufuncs, taking care of units.
Parameters
----------
function : callable
ufunc to wrap.
method : str
Ufunc method: ``__call__``, ``at``, ``reduce``, etc.
inputs : tuple
Input arrays.
kwargs : keyword arguments
As passed on, with ``out`` containing possible quantity output.
Returns
-------
result : `~astropy.units.Quantity` or `NotImplemented`
Results of the ufunc, with the unit set properly.
"""
# Determine required conversion functions -- to bring the unit of the
# input to that expected (e.g., radian for np.sin), or to get
# consistent units between two inputs (e.g., in np.add) --
# and the unit of the result (or tuple of units for nout > 1).
try:
converters, unit = converters_and_unit(function, method, *inputs)
out = kwargs.get("out", None)
# Avoid loop back by turning any Quantity output into array views.
if out is not None:
# If pre-allocated output is used, check it is suitable.
# This also returns array view, to ensure we don't loop back.
if function.nout == 1:
out = out[0]
out_array = check_output(out, unit, inputs, function=function)
# Ensure output argument remains a tuple.
kwargs["out"] = (out_array,) if function.nout == 1 else out_array
if method == "reduce" and "initial" in kwargs and unit is not None:
# Special-case for initial argument for reductions like
# np.add.reduce. This should be converted to the output unit as
# well, which is typically the same as the input unit (but can
# in principle be different: unitless for np.equal, radian
# for np.arctan2, though those are not necessarily useful!)
kwargs["initial"] = self._to_own_unit(
kwargs["initial"], check_precision=False, unit=unit
)
# Same for inputs, but here also convert if necessary.
arrays = []
for input_, converter in zip(inputs, converters):
input_ = getattr(input_, "value", input_)
arrays.append(converter(input_) if converter else input_)
# Call our superclass's __array_ufunc__
result = super().__array_ufunc__(function, method, *arrays, **kwargs)
# If unit is None, a plain array is expected (e.g., comparisons), which
# means we're done.
# We're also done if the result was None (for method 'at') or
# NotImplemented, which can happen if other inputs/outputs override
# __array_ufunc__; hopefully, they can then deal with us.
if unit is None or result is None or result is NotImplemented:
return result
return self._result_as_quantity(result, unit, out)
except (TypeError, ValueError, AttributeError) as e:
out_normalized = kwargs.get("out", tuple())
inputs_and_outputs = inputs + out_normalized
ignored_ufunc = (
None,
np.ndarray.__array_ufunc__,
type(self).__array_ufunc__,
)
if not all(
getattr(type(io), "__array_ufunc__", None) in ignored_ufunc
for io in inputs_and_outputs
):
return NotImplemented
else:
raise e
def _result_as_quantity(self, result, unit, out):
"""Turn result into a quantity with the given unit.
If no output is given, it will take a view of the array as a quantity,
and set the unit. If output is given, those should be quantity views
of the result arrays, and the function will just set the unit.
Parameters
----------
result : ndarray or tuple thereof
Array(s) which need to be turned into quantity.
unit : `~astropy.units.Unit`
Unit for the quantities to be returned (or `None` if the result
should not be a quantity). Should be tuple if result is a tuple.
out : `~astropy.units.Quantity` or None
Possible output quantity. Should be `None` or a tuple if result
is a tuple.
Returns
-------
out : `~astropy.units.Quantity`
With units set.
"""
if isinstance(result, (tuple, list)):
if out is None:
out = (None,) * len(result)
return result.__class__(
self._result_as_quantity(result_, unit_, out_)
for (result_, unit_, out_) in zip(result, unit, out)
)
if out is None:
# View the result array as a Quantity with the proper unit.
return (
result
if unit is None
else self._new_view(result, unit, propagate_info=False)
)
elif isinstance(out, Quantity):
# For given Quantity output, just set the unit. We know the unit
# is not None and the output is of the correct Quantity subclass,
# as it was passed through check_output.
# (We cannot do this unconditionally, though, since it is possible
# for out to be ndarray and the unit to be dimensionless.)
out._set_unit(unit)
return out
def __quantity_subclass__(self, unit):
"""
Overridden by subclasses to change what kind of view is
created based on the output unit of an operation.
Parameters
----------
unit : UnitBase
The unit for which the appropriate class should be returned
Returns
-------
tuple :
- `~astropy.units.Quantity` subclass
- bool: True if subclasses of the given class are ok
"""
return Quantity, True
def _new_view(self, obj=None, unit=None, propagate_info=True):
"""Create a Quantity view of some array-like input, and set the unit.
By default, return a view of ``obj`` of the same class as ``self`` and
with the same unit. Subclasses can override the type of class for a
given unit using ``__quantity_subclass__``, and can ensure properties
other than the unit are copied using ``__array_finalize__``.
If the given unit defines a ``_quantity_class`` of which ``self``
is not an instance, a view using this class is taken.
Parameters
----------
obj : ndarray or scalar, optional
The array to create a view of. If obj is a numpy or python scalar,
it will be converted to an array scalar. By default, ``self``
is converted.
unit : unit-like, optional
The unit of the resulting object. It is used to select a
subclass, and explicitly assigned to the view if given.
If not given, the subclass and unit will be that of ``self``.
propagate_info : bool, optional
Whether to transfer ``info`` if present. Default: `True`, as
appropriate for, e.g., unit conversions or slicing, where the
nature of the object does not change.
Returns
-------
view : `~astropy.units.Quantity` subclass
"""
# Determine the unit and quantity subclass that we need for the view.
if unit is None:
unit = self.unit
quantity_subclass = self.__class__
elif unit is self.unit and self.__class__ is Quantity:
# The second part is because we should not presume what other
# classes want to do for the same unit. E.g., Constant will
# always want to fall back to Quantity, and relies on going
# through `__quantity_subclass__`.
quantity_subclass = Quantity
else:
unit = Unit(unit)
quantity_subclass = getattr(unit, "_quantity_class", Quantity)
if isinstance(self, quantity_subclass):
quantity_subclass, subok = self.__quantity_subclass__(unit)
if subok:
quantity_subclass = self.__class__
# We only want to propagate information from ``self`` to our new view,
# so obj should be a regular array. By using ``np.array``, we also
# convert python and numpy scalars, which cannot be viewed as arrays
# and thus not as Quantity either, to zero-dimensional arrays.
# (These are turned back into scalar in `.value`)
# Note that for an ndarray input, the np.array call takes only double
# ``obj.__class is np.ndarray``. So, not worth special-casing.
if obj is None:
obj = self.view(np.ndarray)
else:
obj = np.array(obj, copy=False, subok=True)
# Take the view, set the unit, and update possible other properties
# such as ``info``, ``wrap_angle`` in `Longitude`, etc.
view = obj.view(quantity_subclass)
view._set_unit(unit)
view.__array_finalize__(self)
if propagate_info and "info" in self.__dict__:
view.info = self.info
return view
def _set_unit(self, unit):
"""Set the unit.
This is used anywhere the unit is set or modified, i.e., in the
initializer, in ``__imul__`` and ``__itruediv__`` for in-place
multiplication and division by another unit, as well as in
``__array_finalize__`` for wrapping up views. For Quantity, it just
sets the unit, but subclasses can override it to check that, e.g.,
a unit is consistent.
"""
if not isinstance(unit, UnitBase):
if isinstance(self._unit, StructuredUnit) or isinstance(
unit, StructuredUnit
):
unit = StructuredUnit(unit, self.dtype)
else:
# Trying to go through a string ensures that, e.g., Magnitudes with
# dimensionless physical unit become Quantity with units of mag.
unit = Unit(str(unit), parse_strict="silent")
if not isinstance(unit, (UnitBase, StructuredUnit)):
raise UnitTypeError(
f"{self.__class__.__name__} instances require normal units, "
f"not {unit.__class__} instances."
)
self._unit = unit
def __deepcopy__(self, memo):
# If we don't define this, ``copy.deepcopy(quantity)`` will
# return a bare Numpy array.
return self.copy()
def __reduce__(self):
# patch to pickle Quantity objects (ndarray subclasses), see
# http://www.mail-archive.com/[email protected]/msg02446.html
object_state = list(super().__reduce__())
object_state[2] = (object_state[2], self.__dict__)
return tuple(object_state)
def __setstate__(self, state):
# patch to unpickle Quantity objects (ndarray subclasses), see
# http://www.mail-archive.com/[email protected]/msg02446.html
nd_state, own_state = state
super().__setstate__(nd_state)
self.__dict__.update(own_state)
info = QuantityInfo()
def _to_value(self, unit, equivalencies=[]):
"""Helper method for to and to_value."""
if equivalencies == []:
equivalencies = self._equivalencies
if not self.dtype.names or isinstance(self.unit, StructuredUnit):
# Standard path, let unit to do work.
return self.unit.to(
unit, self.view(np.ndarray), equivalencies=equivalencies
)
else:
# The .to() method of a simple unit cannot convert a structured
# dtype, so we work around it, by recursing.
# TODO: deprecate this?
# Convert simple to Structured on initialization?
result = np.empty_like(self.view(np.ndarray))
for name in self.dtype.names:
result[name] = self[name]._to_value(unit, equivalencies)
return result
def to(self, unit, equivalencies=[], copy=True):
"""
Return a new `~astropy.units.Quantity` object with the specified unit.
Parameters
----------
unit : unit-like
An object that represents the unit to convert to. Must be
an `~astropy.units.UnitBase` object or a string parseable
by the `~astropy.units` package.
equivalencies : list of tuple
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`astropy:unit_equivalencies`.
If not provided or ``[]``, class default equivalencies will be used
(none for `~astropy.units.Quantity`, but may be set for subclasses)
If `None`, no equivalencies will be applied at all, not even any
set globally or within a context.
copy : bool, optional
If `True` (default), then the value is copied. Otherwise, a copy
will only be made if necessary.
See Also
--------
to_value : get the numerical value in a given unit.
"""
# We don't use `to_value` below since we always want to make a copy
# and don't want to slow down this method (esp. the scalar case).
unit = Unit(unit)
if copy:
# Avoid using to_value to ensure that we make a copy. We also
# don't want to slow down this method (esp. the scalar case).
value = self._to_value(unit, equivalencies)
else:
# to_value only copies if necessary
value = self.to_value(unit, equivalencies)
return self._new_view(value, unit)
def to_value(self, unit=None, equivalencies=[]):
"""
The numerical value, possibly in a different unit.
Parameters
----------
unit : unit-like, optional
The unit in which the value should be given. If not given or `None`,
use the current unit.
equivalencies : list of tuple, optional
A list of equivalence pairs to try if the units are not directly
convertible (see :ref:`astropy:unit_equivalencies`). If not provided
or ``[]``, class default equivalencies will be used (none for
`~astropy.units.Quantity`, but may be set for subclasses).
If `None`, no equivalencies will be applied at all, not even any
set globally or within a context.
Returns
-------
value : ndarray or scalar
The value in the units specified. For arrays, this will be a view
of the data if no unit conversion was necessary.
See Also
--------
to : Get a new instance in a different unit.
"""
if unit is None or unit is self.unit:
value = self.view(np.ndarray)
elif not self.dtype.names:
# For non-structured, we attempt a short-cut, where we just get
# the scale. If that is 1, we do not have to do anything.
unit = Unit(unit)
# We want a view if the unit does not change. One could check
# with "==", but that calculates the scale that we need anyway.
# TODO: would be better for `unit.to` to have an in-place flag.
try:
scale = self.unit._to(unit)
except Exception:
# Short-cut failed; try default (maybe equivalencies help).
value = self._to_value(unit, equivalencies)
else:
value = self.view(np.ndarray)
if not is_effectively_unity(scale):
# not in-place!
value = value * scale
else:
# For structured arrays, we go the default route.
value = self._to_value(unit, equivalencies)
# Index with empty tuple to decay array scalars in to numpy scalars.
return value if value.shape else value[()]
value = property(
to_value,
doc="""The numerical value of this instance.
See also
--------
to_value : Get the numerical value in a given unit.
""",
)
@property
def unit(self):
"""
A `~astropy.units.UnitBase` object representing the unit of this
quantity.
"""
return self._unit
@property
def equivalencies(self):
"""
A list of equivalencies that will be applied by default during
unit conversions.
"""
return self._equivalencies
def _recursively_apply(self, func):
"""Apply function recursively to every field.
Returns a copy with the result.
"""
result = np.empty_like(self)
result_value = result.view(np.ndarray)
result_unit = ()
for name in self.dtype.names:
part = func(self[name])
result_value[name] = part.value
result_unit += (part.unit,)
result._set_unit(result_unit)
return result
@property
def si(self):
"""
Returns a copy of the current `Quantity` instance with SI units. The
value of the resulting object will be scaled.
"""
if self.dtype.names:
return self._recursively_apply(operator.attrgetter("si"))
si_unit = self.unit.si
return self._new_view(self.value * si_unit.scale, si_unit / si_unit.scale)
@property
def cgs(self):
"""
Returns a copy of the current `Quantity` instance with CGS units. The
value of the resulting object will be scaled.
"""
if self.dtype.names:
return self._recursively_apply(operator.attrgetter("cgs"))
cgs_unit = self.unit.cgs
return self._new_view(self.value * cgs_unit.scale, cgs_unit / cgs_unit.scale)
@property
def isscalar(self):
"""
True if the `value` of this quantity is a scalar, or False if it
is an array-like object.
.. note::
This is subtly different from `numpy.isscalar` in that
`numpy.isscalar` returns False for a zero-dimensional array
(e.g. ``np.array(1)``), while this is True for quantities,
since quantities cannot represent true numpy scalars.
"""
return not self.shape
# This flag controls whether convenience conversion members, such
# as `q.m` equivalent to `q.to_value(u.m)` are available. This is
# not turned on on Quantity itself, but is on some subclasses of
# Quantity, such as `astropy.coordinates.Angle`.
_include_easy_conversion_members = False
def __dir__(self):
"""
Quantities are able to directly convert to other units that
have the same physical type. This function is implemented in
order to make autocompletion still work correctly in IPython.
"""
if not self._include_easy_conversion_members:
return super().__dir__()
dir_values = set(super().__dir__())
equivalencies = Unit._normalize_equivalencies(self.equivalencies)
for equivalent in self.unit._get_units_with_same_physical_type(equivalencies):
dir_values.update(equivalent.names)
return sorted(dir_values)
def __getattr__(self, attr):
"""
Quantities are able to directly convert to other units that
have the same physical type.
"""
if not self._include_easy_conversion_members:
raise AttributeError(
f"'{self.__class__.__name__}' object has no '{attr}' member"
)
def get_virtual_unit_attribute():
registry = get_current_unit_registry().registry
to_unit = registry.get(attr, None)
if to_unit is None:
return None
try:
return self.unit.to(
to_unit, self.value, equivalencies=self.equivalencies
)
except UnitsError:
return None
value = get_virtual_unit_attribute()
if value is None:
raise AttributeError(
f"{self.__class__.__name__} instance has no attribute '{attr}'"
)
else:
return value
# Equality needs to be handled explicitly as ndarray.__eq__ gives
# DeprecationWarnings on any error, which is distracting, and does not
# deal well with structured arrays (nor does the ufunc).
def __eq__(self, other):
try:
other_value = self._to_own_unit(other)
except UnitsError:
return False
except Exception:
return NotImplemented
return self.value.__eq__(other_value)
def __ne__(self, other):
try:
other_value = self._to_own_unit(other)
except UnitsError:
return True
except Exception:
return NotImplemented
return self.value.__ne__(other_value)
# Unit conversion operator (<<).
def __lshift__(self, other):
try:
other = Unit(other, parse_strict="silent")
except UnitTypeError:
return NotImplemented
return self.__class__(self, other, copy=False, subok=True)
def __ilshift__(self, other):
try:
other = Unit(other, parse_strict="silent")
except UnitTypeError:
return NotImplemented # try other.__rlshift__(self)
try:
factor = self.unit._to(other)
except UnitConversionError: # incompatible, or requires an Equivalency
return NotImplemented
except AttributeError: # StructuredUnit does not have `_to`
# In principle, in-place might be possible.
return NotImplemented
view = self.view(np.ndarray)
try:
view *= factor # operates on view
except TypeError:
# The error is `numpy.core._exceptions._UFuncOutputCastingError`,
# which inherits from `TypeError`.
return NotImplemented
self._set_unit(other)
return self
def __rlshift__(self, other):
if not self.isscalar:
return NotImplemented
return Unit(self).__rlshift__(other)
# Give warning for other >> self, since probably other << self was meant.
def __rrshift__(self, other):
warnings.warn(
">> is not implemented. Did you mean to convert "
"something to this quantity as a unit using '<<'?",
AstropyWarning,
)
return NotImplemented
# Also define __rshift__ and __irshift__ so we override default ndarray
# behaviour, but instead of emitting a warning here, let it be done by
# other (which likely is a unit if this was a mistake).
def __rshift__(self, other):
return NotImplemented
def __irshift__(self, other):
return NotImplemented
# Arithmetic operations
def __mul__(self, other):
"""Multiplication between `Quantity` objects and other objects."""
if isinstance(other, (UnitBase, str)):
try:
return self._new_view(
self.value.copy(), other * self.unit, propagate_info=False
)
except UnitsError: # let other try to deal with it
return NotImplemented
return super().__mul__(other)
def __imul__(self, other):
"""In-place multiplication between `Quantity` objects and others."""
if isinstance(other, (UnitBase, str)):
self._set_unit(other * self.unit)
return self
return super().__imul__(other)
def __rmul__(self, other):
"""
Right Multiplication between `Quantity` objects and other objects.
"""
return self.__mul__(other)
def __truediv__(self, other):
"""Division between `Quantity` objects and other objects."""
if isinstance(other, (UnitBase, str)):
try:
return self._new_view(
self.value.copy(), self.unit / other, propagate_info=False
)
except UnitsError: # let other try to deal with it
return NotImplemented
return super().__truediv__(other)
def __itruediv__(self, other):
"""Inplace division between `Quantity` objects and other objects."""
if isinstance(other, (UnitBase, str)):
self._set_unit(self.unit / other)
return self
return super().__itruediv__(other)
def __rtruediv__(self, other):
"""Right Division between `Quantity` objects and other objects."""
if isinstance(other, (UnitBase, str)):
return self._new_view(
1.0 / self.value, other / self.unit, propagate_info=False
)
return super().__rtruediv__(other)
def __pow__(self, other):
if isinstance(other, Fraction):
# Avoid getting object arrays by raising the value to a Fraction.
return self._new_view(
self.value ** float(other), self.unit**other, propagate_info=False
)
return super().__pow__(other)
# other overrides of special functions
def __hash__(self):
return hash(self.value) ^ hash(self.unit)
def __iter__(self):
if self.isscalar:
raise TypeError(
f"'{self.__class__.__name__}' object with a scalar value is not"
" iterable"
)
# Otherwise return a generator
def quantity_iter():
for val in self.value:
yield self._new_view(val)
return quantity_iter()
def __getitem__(self, key):
if isinstance(key, str) and isinstance(self.unit, StructuredUnit):
return self._new_view(
self.view(np.ndarray)[key], self.unit[key], propagate_info=False
)
try:
out = super().__getitem__(key)
except IndexError:
# We want zero-dimensional Quantity objects to behave like scalars,
# so they should raise a TypeError rather than an IndexError.
if self.isscalar:
raise TypeError(
f"'{self.__class__.__name__}' object with a scalar value "
"does not support indexing"
)
else:
raise
# For single elements, ndarray.__getitem__ returns scalars; these
# need a new view as a Quantity.
if not isinstance(out, np.ndarray):
out = self._new_view(out)
return out
def __setitem__(self, i, value):
if isinstance(i, str):
# Indexing will cause a different unit, so by doing this in
# two steps we effectively try with the right unit.
self[i][...] = value
return
# update indices in info if the info property has been accessed
# (in which case 'info' in self.__dict__ is True; this is guaranteed
# to be the case if we're part of a table).
if not self.isscalar and "info" in self.__dict__:
self.info.adjust_indices(i, value, len(self))
self.view(np.ndarray).__setitem__(i, self._to_own_unit(value))
# __contains__ is OK
def __bool__(self):
"""This method raises ValueError, since truthiness of quantities is ambiguous,
especially for logarithmic units and temperatures. Use explicit comparisons.
"""
raise ValueError(
f"{type(self).__name__} truthiness is ambiguous, especially for logarithmic units"
" and temperatures. Use explicit comparisons."
)
def __len__(self):
if self.isscalar:
raise TypeError(
f"'{self.__class__.__name__}' object with a scalar value has no len()"
)
else:
return len(self.value)
# Numerical types
def __float__(self):
try:
return float(self.to_value(dimensionless_unscaled))
except (UnitsError, TypeError):
raise TypeError(
"only dimensionless scalar quantities can be "
"converted to Python scalars"
)
def __int__(self):
try:
return int(self.to_value(dimensionless_unscaled))
except (UnitsError, TypeError):
raise TypeError(
"only dimensionless scalar quantities can be "
"converted to Python scalars"
)
def __index__(self):
# for indices, we do not want to mess around with scaling at all,
# so unlike for float, int, we insist here on unscaled dimensionless
try:
assert self.unit.is_unity()
return self.value.__index__()
except Exception:
raise TypeError(
"only integer dimensionless scalar quantities "
"can be converted to a Python index"
)
# TODO: we may want to add a hook for dimensionless quantities?
@property
def _unitstr(self):
if self.unit is None:
unitstr = _UNIT_NOT_INITIALISED
else:
unitstr = str(self.unit)
if unitstr:
unitstr = " " + unitstr
return unitstr
def to_string(self, unit=None, precision=None, format=None, subfmt=None):
"""
Generate a string representation of the quantity and its unit.
The behavior of this function can be altered via the
`numpy.set_printoptions` function and its various keywords. The
exception to this is the ``threshold`` keyword, which is controlled via
the ``[units.quantity]`` configuration item ``latex_array_threshold``.
This is treated separately because the numpy default of 1000 is too big
for most browsers to handle.
Parameters
----------
unit : unit-like, optional
Specifies the unit. If not provided,
the unit used to initialize the quantity will be used.
precision : number, optional
The level of decimal precision. If `None`, or not provided,
it will be determined from NumPy print options.
format : str, optional
The format of the result. If not provided, an unadorned
string is returned. Supported values are:
- 'latex': Return a LaTeX-formatted string
- 'latex_inline': Return a LaTeX-formatted string that uses
negative exponents instead of fractions
subfmt : str, optional
Subformat of the result. For the moment, only used for
``format='latex'`` and ``format='latex_inline'``. Supported
values are:
- 'inline': Use ``$ ... $`` as delimiters.
- 'display': Use ``$\\displaystyle ... $`` as delimiters.
Returns
-------
str
A string with the contents of this Quantity
"""
if unit is not None and unit != self.unit:
return self.to(unit).to_string(
unit=None, precision=precision, format=format, subfmt=subfmt
)
formats = {
None: None,
"latex": {
None: ("$", "$"),
"inline": ("$", "$"),
"display": (r"$\displaystyle ", r"$"),
},
}
formats["latex_inline"] = formats["latex"]
if format not in formats:
raise ValueError(f"Unknown format '{format}'")
elif format is None:
if precision is None:
# Use default formatting settings
return f"{self.value}{self._unitstr:s}"
else:
# np.array2string properly formats arrays as well as scalars
return (
np.array2string(self.value, precision=precision, floatmode="fixed")
+ self._unitstr
)
# else, for the moment we assume format="latex" or "latex_inline".
# Set the precision if set, otherwise use numpy default
pops = np.get_printoptions()
format_spec = f".{precision if precision is not None else pops['precision']}g"
def float_formatter(value):
return Latex.format_exponential_notation(value, format_spec=format_spec)
def complex_formatter(value):
return "({}{}i)".format(
Latex.format_exponential_notation(value.real, format_spec=format_spec),
Latex.format_exponential_notation(
value.imag, format_spec="+" + format_spec
),
)
# The view is needed for the scalar case - self.value might be float.
latex_value = np.array2string(
self.view(np.ndarray),
threshold=(
conf.latex_array_threshold
if conf.latex_array_threshold > -1
else pops["threshold"]
),
formatter={
"float_kind": float_formatter,
"complex_kind": complex_formatter,
},
max_line_width=np.inf,
separator=",~",
)
latex_value = latex_value.replace("...", r"\dots")
# Format unit
# [1:-1] strips the '$' on either side needed for math mode
if self.unit is None:
latex_unit = _UNIT_NOT_INITIALISED
elif format == "latex":
latex_unit = self.unit._repr_latex_()[1:-1] # note this is unicode
elif format == "latex_inline":
latex_unit = self.unit.to_string(format="latex_inline")[1:-1]
delimiter_left, delimiter_right = formats[format][subfmt]
return rf"{delimiter_left}{latex_value} \; {latex_unit}{delimiter_right}"
def __str__(self):
return self.to_string()
def __repr__(self):
prefixstr = "<" + self.__class__.__name__ + " "
arrstr = np.array2string(
self.view(np.ndarray), separator=", ", prefix=prefixstr
)
return f"{prefixstr}{arrstr}{self._unitstr:s}>"
def _repr_latex_(self):
"""
Generate a latex representation of the quantity and its unit.
Returns
-------
lstr
A LaTeX string with the contents of this Quantity
"""
# NOTE: This should change to display format in a future release
return self.to_string(format="latex", subfmt="inline")
def __format__(self, format_spec):
try:
return self.to_string(format=format_spec)
except ValueError:
# We might have a unit format not implemented in `to_string()`.
if format_spec in Base.registry:
if self.unit is dimensionless_unscaled:
return f"{self.value}"
else:
return f"{self.value} {format(self.unit, format_spec)}"
# Can the value be formatted on its own?
try:
return f"{format(self.value, format_spec)}{self._unitstr:s}"
except ValueError:
# Format the whole thing as a single string.
return format(f"{self.value}{self._unitstr:s}", format_spec)
def decompose(self, bases=[]):
"""
Generates a new `Quantity` with the units
decomposed. Decomposed units have only irreducible units in
them (see `astropy.units.UnitBase.decompose`).
Parameters
----------
bases : sequence of `~astropy.units.UnitBase`, optional
The bases to decompose into. When not provided,
decomposes down to any irreducible units. When provided,
the decomposed result will only contain the given units.
This will raises a `~astropy.units.UnitsError` if it's not possible
to do so.
Returns
-------
newq : `~astropy.units.Quantity`
A new object equal to this quantity with units decomposed.
"""
return self._decompose(False, bases=bases)
def _decompose(self, allowscaledunits=False, bases=[]):
"""
Generates a new `Quantity` with the units decomposed. Decomposed
units have only irreducible units in them (see
`astropy.units.UnitBase.decompose`).
Parameters
----------
allowscaledunits : bool
If True, the resulting `Quantity` may have a scale factor
associated with it. If False, any scaling in the unit will
be subsumed into the value of the resulting `Quantity`
bases : sequence of UnitBase, optional
The bases to decompose into. When not provided,
decomposes down to any irreducible units. When provided,
the decomposed result will only contain the given units.
This will raises a `~astropy.units.UnitsError` if it's not possible
to do so.
Returns
-------
newq : `~astropy.units.Quantity`
A new object equal to this quantity with units decomposed.
"""
new_unit = self.unit.decompose(bases=bases)
# Be careful here because self.value usually is a view of self;
# be sure that the original value is not being modified.
if not allowscaledunits and hasattr(new_unit, "scale"):
new_value = self.value * new_unit.scale
new_unit = new_unit / new_unit.scale
return self._new_view(new_value, new_unit)
else:
return self._new_view(self.copy(), new_unit)
# These functions need to be overridden to take into account the units
# Array conversion
# https://numpy.org/doc/stable/reference/arrays.ndarray.html#array-conversion
def item(self, *args):
"""Copy an element of an array to a scalar Quantity and return it.
Like :meth:`~numpy.ndarray.item` except that it always
returns a `Quantity`, not a Python scalar.
"""
return self._new_view(super().item(*args))
def tolist(self):
raise NotImplementedError(
"cannot make a list of Quantities. Get list of values with"
" q.value.tolist()."
)
def _to_own_unit(self, value, check_precision=True, *, unit=None):
"""Convert value to one's own unit (or that given).
Here, non-quantities are treated as dimensionless, and care is taken
for values of 0, infinity or nan, which are allowed to have any unit.
Parameters
----------
value : anything convertible to `~astropy.units.Quantity`
The value to be converted to the requested unit.
check_precision : bool
Whether to forbid conversion of float to integer if that changes
the input number. Default: `True`.
unit : `~astropy.units.Unit` or None
The unit to convert to. By default, the unit of ``self``.
Returns
-------
value : number or `~numpy.ndarray`
In the requested units.
"""
if unit is None:
unit = self.unit
try:
_value = value.to_value(unit)
except AttributeError:
# We're not a Quantity.
# First remove two special cases (with a fast test):
# 1) Maybe masked printing? MaskedArray with quantities does not
# work very well, but no reason to break even repr and str.
# 2) np.ma.masked? useful if we're a MaskedQuantity.
if value is np.ma.masked or (
value is np.ma.masked_print_option and self.dtype.kind == "O"
):
return value
# Now, let's try a more general conversion.
# Plain arrays will be converted to dimensionless in the process,
# but anything with a unit attribute will use that.
try:
as_quantity = Quantity(value)
_value = as_quantity.to_value(unit)
except UnitsError:
# last chance: if this was not something with a unit
# and is all 0, inf, or nan, we treat it as arbitrary unit.
if not hasattr(value, "unit") and can_have_arbitrary_unit(
as_quantity.value
):
_value = as_quantity.value
else:
raise
if self.dtype.kind == "i" and check_precision:
# If, e.g., we are casting float to int, we want to fail if
# precision is lost, but let things pass if it works.
_value = np.array(_value, copy=False, subok=True)
if not np.can_cast(_value.dtype, self.dtype):
self_dtype_array = np.array(_value, self.dtype, subok=True)
if not np.all((self_dtype_array == _value) | np.isnan(_value)):
raise TypeError(
"cannot convert value type to array type without precision loss"
)
# Setting names to ensure things like equality work (note that
# above will have failed already if units did not match).
if self.dtype.names:
_value.dtype.names = self.dtype.names
return _value
def itemset(self, *args):
if len(args) == 0:
raise ValueError("itemset must have at least one argument")
self.view(np.ndarray).itemset(*(args[:-1] + (self._to_own_unit(args[-1]),)))
def tostring(self, order="C"):
"""Not implemented, use ``.value.tostring()`` instead."""
raise NotImplementedError(
"cannot write Quantities to string. Write array with"
" q.value.tostring(...)."
)
def tobytes(self, order="C"):
"""Not implemented, use ``.value.tobytes()`` instead."""
raise NotImplementedError(
"cannot write Quantities to bytes. Write array with q.value.tobytes(...)."
)
def tofile(self, fid, sep="", format="%s"):
"""Not implemented, use ``.value.tofile()`` instead."""
raise NotImplementedError(
"cannot write Quantities to file. Write array with q.value.tofile(...)"
)
def dump(self, file):
"""Not implemented, use ``.value.dump()`` instead."""
raise NotImplementedError(
"cannot dump Quantities to file. Write array with q.value.dump()"
)
def dumps(self):
"""Not implemented, use ``.value.dumps()`` instead."""
raise NotImplementedError(
"cannot dump Quantities to string. Write array with q.value.dumps()"
)
# astype, byteswap, copy, view, getfield, setflags OK as is
def fill(self, value):
self.view(np.ndarray).fill(self._to_own_unit(value))
# Shape manipulation: resize cannot be done (does not own data), but
# shape, transpose, swapaxes, flatten, ravel, squeeze all OK. Only
# the flat iterator needs to be overwritten, otherwise single items are
# returned as numbers.
@property
def flat(self):
"""A 1-D iterator over the Quantity array.
This returns a ``QuantityIterator`` instance, which behaves the same
as the `~numpy.flatiter` instance returned by `~numpy.ndarray.flat`,
and is similar to, but not a subclass of, Python's built-in iterator
object.
"""
return QuantityIterator(self)
@flat.setter
def flat(self, value):
y = self.ravel()
y[:] = value
# Item selection and manipulation
# repeat, sort, compress, diagonal OK
def take(self, indices, axis=None, out=None, mode="raise"):
out = super().take(indices, axis=axis, out=out, mode=mode)
# For single elements, ndarray.take returns scalars; these
# need a new view as a Quantity.
if type(out) is not type(self):
out = self._new_view(out)
return out
def put(self, indices, values, mode="raise"):
self.view(np.ndarray).put(indices, self._to_own_unit(values), mode)
def choose(self, choices, out=None, mode="raise"):
raise NotImplementedError(
"cannot choose based on quantity. Choose using array with"
" q.value.choose(...)"
)
# ensure we do not return indices as quantities
def argsort(self, axis=-1, kind="quicksort", order=None):
return self.view(np.ndarray).argsort(axis=axis, kind=kind, order=order)
def searchsorted(self, v, *args, **kwargs):
return np.searchsorted(
np.array(self), self._to_own_unit(v, check_precision=False), *args, **kwargs
) # avoid numpy 1.6 problem
if NUMPY_LT_1_22:
def argmax(self, axis=None, out=None):
return self.view(np.ndarray).argmax(axis, out=out)
def argmin(self, axis=None, out=None):
return self.view(np.ndarray).argmin(axis, out=out)
else:
def argmax(self, axis=None, out=None, *, keepdims=False):
return self.view(np.ndarray).argmax(axis=axis, out=out, keepdims=keepdims)
def argmin(self, axis=None, out=None, *, keepdims=False):
return self.view(np.ndarray).argmin(axis=axis, out=out, keepdims=keepdims)
def __array_function__(self, function, types, args, kwargs):
"""Wrap numpy functions, taking care of units.
Parameters
----------
function : callable
Numpy function to wrap
types : iterable of classes
Classes that provide an ``__array_function__`` override. Can
in principle be used to interact with other classes. Below,
mostly passed on to `~numpy.ndarray`, which can only interact
with subclasses.
args : tuple
Positional arguments provided in the function call.
kwargs : dict
Keyword arguments provided in the function call.
Returns
-------
result: `~astropy.units.Quantity`, `~numpy.ndarray`
As appropriate for the function. If the function is not
supported, `NotImplemented` is returned, which will lead to
a `TypeError` unless another argument overrode the function.
Raises
------
~astropy.units.UnitsError
If operands have incompatible units.
"""
# A function should be in one of the following sets or dicts:
# 1. SUBCLASS_SAFE_FUNCTIONS (set), if the numpy implementation
# supports Quantity; we pass on to ndarray.__array_function__.
# 2. FUNCTION_HELPERS (dict), if the numpy implementation is usable
# after converting quantities to arrays with suitable units,
# and possibly setting units on the result.
# 3. DISPATCHED_FUNCTIONS (dict), if the function makes sense but
# requires a Quantity-specific implementation.
# 4. UNSUPPORTED_FUNCTIONS (set), if the function does not make sense.
# For now, since we may not yet have complete coverage, if a
# function is in none of the above, we simply call the numpy
# implementation.
if function in SUBCLASS_SAFE_FUNCTIONS:
return super().__array_function__(function, types, args, kwargs)
elif function in FUNCTION_HELPERS:
function_helper = FUNCTION_HELPERS[function]
try:
args, kwargs, unit, out = function_helper(*args, **kwargs)
except NotImplementedError:
return self._not_implemented_or_raise(function, types)
result = super().__array_function__(function, types, args, kwargs)
# Fall through to return section
elif function in DISPATCHED_FUNCTIONS:
dispatched_function = DISPATCHED_FUNCTIONS[function]
try:
result, unit, out = dispatched_function(*args, **kwargs)
except NotImplementedError:
return self._not_implemented_or_raise(function, types)
# Fall through to return section
elif function in UNSUPPORTED_FUNCTIONS:
return NotImplemented
else:
warnings.warn(
f"function '{function.__name__}' is not known to astropy's Quantity."
" Will run it anyway, hoping it will treat ndarray subclasses"
" correctly. Please raise an issue at"
" https://github.com/astropy/astropy/issues.",
AstropyWarning,
)
return super().__array_function__(function, types, args, kwargs)
# If unit is None, a plain array is expected (e.g., boolean), which
# means we're done.
# We're also done if the result was NotImplemented, which can happen
# if other inputs/outputs override __array_function__;
# hopefully, they can then deal with us.
if unit is None or result is NotImplemented:
return result
return self._result_as_quantity(result, unit, out=out)
def _not_implemented_or_raise(self, function, types):
# Our function helper or dispatcher found that the function does not
# work with Quantity. In principle, there may be another class that
# knows what to do with us, for which we should return NotImplemented.
# But if there is ndarray (or a non-Quantity subclass of it) around,
# it quite likely coerces, so we should just break.
if any(
issubclass(t, np.ndarray) and not issubclass(t, Quantity) for t in types
):
raise TypeError(
f"the Quantity implementation cannot handle {function} "
"with the given arguments."
) from None
else:
return NotImplemented
# Calculation -- override ndarray methods to take into account units.
# We use the corresponding numpy functions to evaluate the results, since
# the methods do not always allow calling with keyword arguments.
# For instance, np.array([0.,2.]).clip(a_min=0., a_max=1.) gives
# TypeError: 'a_max' is an invalid keyword argument for this function.
def _wrap_function(self, function, *args, unit=None, out=None, **kwargs):
"""Wrap a numpy function that processes self, returning a Quantity.
Parameters
----------
function : callable
Numpy function to wrap.
args : positional arguments
Any positional arguments to the function beyond the first argument
(which will be set to ``self``).
kwargs : keyword arguments
Keyword arguments to the function.
If present, the following arguments are treated specially:
unit : `~astropy.units.Unit`
Unit of the output result. If not given, the unit of ``self``.
out : `~astropy.units.Quantity`
A Quantity instance in which to store the output.
Notes
-----
Output should always be assigned via a keyword argument, otherwise
no proper account of the unit is taken.
Returns
-------
out : `~astropy.units.Quantity`
Result of the function call, with the unit set properly.
"""
if unit is None:
unit = self.unit
# Ensure we don't loop back by turning any Quantity into array views.
args = (self.value,) + tuple(
(arg.value if isinstance(arg, Quantity) else arg) for arg in args
)
if out is not None:
# If pre-allocated output is used, check it is suitable.
# This also returns array view, to ensure we don't loop back.
arrays = tuple(arg for arg in args if isinstance(arg, np.ndarray))
kwargs["out"] = check_output(out, unit, arrays, function=function)
# Apply the function and turn it back into a Quantity.
result = function(*args, **kwargs)
return self._result_as_quantity(result, unit, out)
def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None):
return self._wrap_function(np.trace, offset, axis1, axis2, dtype, out=out)
def var(
self, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True
):
return self._wrap_function(
np.var,
axis,
dtype,
out=out,
ddof=ddof,
keepdims=keepdims,
where=where,
unit=self.unit**2,
)
def std(
self, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True
):
return self._wrap_function(
np.std, axis, dtype, out=out, ddof=ddof, keepdims=keepdims, where=where
)
def mean(self, axis=None, dtype=None, out=None, keepdims=False, *, where=True):
return self._wrap_function(
np.mean, axis, dtype, out=out, keepdims=keepdims, where=where
)
def round(self, decimals=0, out=None):
return self._wrap_function(np.round, decimals, out=out)
def dot(self, b, out=None):
result_unit = self.unit * getattr(b, "unit", dimensionless_unscaled)
return self._wrap_function(np.dot, b, out=out, unit=result_unit)
# Calculation: override methods that do not make sense.
def all(self, axis=None, out=None):
raise TypeError(
"cannot evaluate truth value of quantities. "
"Evaluate array with q.value.all(...)"
)
def any(self, axis=None, out=None):
raise TypeError(
"cannot evaluate truth value of quantities. "
"Evaluate array with q.value.any(...)"
)
# Calculation: numpy functions that can be overridden with methods.
def diff(self, n=1, axis=-1):
return self._wrap_function(np.diff, n, axis)
def ediff1d(self, to_end=None, to_begin=None):
return self._wrap_function(np.ediff1d, to_end, to_begin)
if NUMPY_LT_1_22:
@deprecated("5.3", alternative="np.nansum", obj_type="method")
def nansum(self, axis=None, out=None, keepdims=False):
return self._wrap_function(np.nansum, axis, out=out, keepdims=keepdims)
else:
@deprecated("5.3", alternative="np.nansum", obj_type="method")
def nansum(
self, axis=None, out=None, keepdims=False, *, initial=None, where=True
):
if initial is not None:
initial = self._to_own_unit(initial)
return self._wrap_function(
np.nansum,
axis,
out=out,
keepdims=keepdims,
initial=initial,
where=where,
)
def insert(self, obj, values, axis=None):
"""
Insert values along the given axis before the given indices and return
a new `~astropy.units.Quantity` object.
This is a thin wrapper around the `numpy.insert` function.
Parameters
----------
obj : int, slice or sequence of int
Object that defines the index or indices before which ``values`` is
inserted.
values : array-like
Values to insert. If the type of ``values`` is different
from that of quantity, ``values`` is converted to the matching type.
``values`` should be shaped so that it can be broadcast appropriately
The unit of ``values`` must be consistent with this quantity.
axis : int, optional
Axis along which to insert ``values``. If ``axis`` is None then
the quantity array is flattened before insertion.
Returns
-------
out : `~astropy.units.Quantity`
A copy of quantity with ``values`` inserted. Note that the
insertion does not occur in-place: a new quantity array is returned.
Examples
--------
>>> import astropy.units as u
>>> q = [1, 2] * u.m
>>> q.insert(0, 50 * u.cm)
<Quantity [ 0.5, 1., 2.] m>
>>> q = [[1, 2], [3, 4]] * u.m
>>> q.insert(1, [10, 20] * u.m, axis=0)
<Quantity [[ 1., 2.],
[ 10., 20.],
[ 3., 4.]] m>
>>> q.insert(1, 10 * u.m, axis=1)
<Quantity [[ 1., 10., 2.],
[ 3., 10., 4.]] m>
"""
out_array = np.insert(self.value, obj, self._to_own_unit(values), axis)
return self._new_view(out_array)
class SpecificTypeQuantity(Quantity):
"""Superclass for Quantities of specific physical type.
Subclasses of these work just like :class:`~astropy.units.Quantity`, except
that they are for specific physical types (and may have methods that are
only appropriate for that type). Astropy examples are
:class:`~astropy.coordinates.Angle` and
:class:`~astropy.coordinates.Distance`
At a minimum, subclasses should set ``_equivalent_unit`` to the unit
associated with the physical type.
"""
# The unit for the specific physical type. Instances can only be created
# with units that are equivalent to this.
_equivalent_unit = None
# The default unit used for views. Even with `None`, views of arrays
# without units are possible, but will have an uninitialized unit.
_unit = None
# Default unit for initialization through the constructor.
_default_unit = None
# ensure that we get precedence over our superclass.
__array_priority__ = Quantity.__array_priority__ + 10
def __quantity_subclass__(self, unit):
if unit.is_equivalent(self._equivalent_unit):
return type(self), True
else:
return super().__quantity_subclass__(unit)[0], False
def _set_unit(self, unit):
if unit is None or not unit.is_equivalent(self._equivalent_unit):
raise UnitTypeError(
"{} instances require units equivalent to '{}'".format(
type(self).__name__, self._equivalent_unit
)
+ (
", but no unit was given."
if unit is None
else f", so cannot set it to '{unit}'."
)
)
super()._set_unit(unit)
def isclose(a, b, rtol=1.0e-5, atol=None, equal_nan=False, **kwargs):
"""
Return a boolean array where two arrays are element-wise equal
within a tolerance.
Parameters
----------
a, b : array-like or `~astropy.units.Quantity`
Input values or arrays to compare
rtol : array-like or `~astropy.units.Quantity`
The relative tolerance for the comparison, which defaults to
``1e-5``. If ``rtol`` is a :class:`~astropy.units.Quantity`,
then it must be dimensionless.
atol : number or `~astropy.units.Quantity`
The absolute tolerance for the comparison. The units (or lack
thereof) of ``a``, ``b``, and ``atol`` must be consistent with
each other. If `None`, ``atol`` defaults to zero in the
appropriate units.
equal_nan : `bool`
Whether to compare NaN’s as equal. If `True`, NaNs in ``a`` will
be considered equal to NaN’s in ``b``.
Notes
-----
This is a :class:`~astropy.units.Quantity`-aware version of
:func:`numpy.isclose`. However, this differs from the `numpy` function in
that the default for the absolute tolerance here is zero instead of
``atol=1e-8`` in `numpy`, as there is no natural way to set a default
*absolute* tolerance given two inputs that may have differently scaled
units.
Raises
------
`~astropy.units.UnitsError`
If the dimensions of ``a``, ``b``, or ``atol`` are incompatible,
or if ``rtol`` is not dimensionless.
See Also
--------
allclose
"""
unquantified_args = _unquantify_allclose_arguments(a, b, rtol, atol)
return np.isclose(*unquantified_args, equal_nan=equal_nan, **kwargs)
def allclose(a, b, rtol=1.0e-5, atol=None, equal_nan=False, **kwargs) -> bool:
"""
Whether two arrays are element-wise equal within a tolerance.
Parameters
----------
a, b : array-like or `~astropy.units.Quantity`
Input values or arrays to compare
rtol : array-like or `~astropy.units.Quantity`
The relative tolerance for the comparison, which defaults to
``1e-5``. If ``rtol`` is a :class:`~astropy.units.Quantity`,
then it must be dimensionless.
atol : number or `~astropy.units.Quantity`
The absolute tolerance for the comparison. The units (or lack
thereof) of ``a``, ``b``, and ``atol`` must be consistent with
each other. If `None`, ``atol`` defaults to zero in the
appropriate units.
equal_nan : `bool`
Whether to compare NaN’s as equal. If `True`, NaNs in ``a`` will
be considered equal to NaN’s in ``b``.
Notes
-----
This is a :class:`~astropy.units.Quantity`-aware version of
:func:`numpy.allclose`. However, this differs from the `numpy` function in
that the default for the absolute tolerance here is zero instead of
``atol=1e-8`` in `numpy`, as there is no natural way to set a default
*absolute* tolerance given two inputs that may have differently scaled
units.
Raises
------
`~astropy.units.UnitsError`
If the dimensions of ``a``, ``b``, or ``atol`` are incompatible,
or if ``rtol`` is not dimensionless.
See Also
--------
isclose
"""
unquantified_args = _unquantify_allclose_arguments(a, b, rtol, atol)
return np.allclose(*unquantified_args, equal_nan=equal_nan, **kwargs)
def _unquantify_allclose_arguments(actual, desired, rtol, atol):
actual = Quantity(actual, subok=True, copy=False)
desired = Quantity(desired, subok=True, copy=False)
try:
desired = desired.to(actual.unit)
except UnitsError:
raise UnitsError(
f"Units for 'desired' ({desired.unit}) and 'actual' "
f"({actual.unit}) are not convertible"
)
if atol is None:
# By default, we assume an absolute tolerance of zero in the
# appropriate units. The default value of None for atol is
# needed because the units of atol must be consistent with the
# units for a and b.
atol = Quantity(0)
else:
atol = Quantity(atol, subok=True, copy=False)
try:
atol = atol.to(actual.unit)
except UnitsError:
raise UnitsError(
f"Units for 'atol' ({atol.unit}) and 'actual' "
f"({actual.unit}) are not convertible"
)
rtol = Quantity(rtol, subok=True, copy=False)
try:
rtol = rtol.to(dimensionless_unscaled)
except Exception:
raise UnitsError("'rtol' should be dimensionless")
return actual.value, desired.value, rtol.value, atol.value
|
0e8988ccae2b9d51f7d290eaa5d01c623e2fc112dbc341d28d73f8981d7aa151 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package defines the SI units. They are also available in the
`astropy.units` namespace.
"""
import numpy as _numpy
from astropy.constants import si as _si
from .core import Unit, UnitBase, def_unit
_ns = globals()
###########################################################################
# DIMENSIONLESS
def_unit(
["percent", "pct"],
Unit(0.01),
namespace=_ns,
prefixes=False,
doc="percent: one hundredth of unity, factor 0.01",
format={"generic": "%", "console": "%", "cds": "%", "latex": r"\%", "unicode": "%"},
)
###########################################################################
# LENGTH
def_unit(
["m", "meter"],
namespace=_ns,
prefixes=True,
doc="meter: base unit of length in SI",
)
def_unit(
["micron"],
um,
namespace=_ns,
doc="micron: alias for micrometer (um)",
format={"latex": r"\mu m", "unicode": "\N{MICRO SIGN}m"},
)
def_unit(
["Angstrom", "AA", "angstrom"],
0.1 * nm,
namespace=_ns,
doc="ångström: 10 ** -10 m",
prefixes=[(["m", "milli"], ["milli", "m"], 1.0e-3)],
format={"latex": r"\mathring{A}", "unicode": "Å", "vounit": "Angstrom"},
)
###########################################################################
# VOLUMES
def_unit(
(["l", "L"], ["liter"]),
1000 * cm**3.0,
namespace=_ns,
prefixes=True,
format={"latex": r"\mathcal{l}", "unicode": "ℓ"},
doc="liter: metric unit of volume",
)
###########################################################################
# ANGULAR MEASUREMENTS
def_unit(
["rad", "radian"],
namespace=_ns,
prefixes=True,
doc=(
"radian: angular measurement of the ratio between the length "
"on an arc and its radius"
),
)
def_unit(
["deg", "degree"],
_numpy.pi / 180.0 * rad,
namespace=_ns,
prefixes=True,
doc="degree: angular measurement 1/360 of full rotation",
)
def_unit(
["hourangle"],
15.0 * deg,
namespace=_ns,
prefixes=False,
doc="hour angle: angular measurement with 24 in a full circle",
format={"latex": r"{}^{h}", "unicode": "ʰ"},
)
def_unit(
["arcmin", "arcminute"],
1.0 / 60.0 * deg,
namespace=_ns,
prefixes=True,
doc="arc minute: angular measurement",
)
def_unit(
["arcsec", "arcsecond"],
1.0 / 3600.0 * deg,
namespace=_ns,
prefixes=True,
doc="arc second: angular measurement",
)
# These special formats should only be used for the non-prefix versions
deg._format = {"latex": r"{}^{\circ}", "unicode": "°"}
arcmin._format = {"latex": r"{}^{\prime}", "unicode": "′"}
arcsec._format = {"latex": r"{}^{\prime\prime}", "unicode": "″"}
def_unit(
["mas"],
0.001 * arcsec,
namespace=_ns,
doc="milli arc second: angular measurement",
)
def_unit(
["uas"],
0.000001 * arcsec,
namespace=_ns,
doc="micro arc second: angular measurement",
format={"latex": r"\mu as", "unicode": "μas"},
)
def_unit(
["sr", "steradian"],
rad**2,
namespace=_ns,
prefixes=True,
doc="steradian: unit of solid angle in SI",
)
###########################################################################
# TIME
def_unit(
["s", "second"],
namespace=_ns,
prefixes=True,
exclude_prefixes=["a"],
doc="second: base unit of time in SI.",
)
def_unit(
["min", "minute"],
60 * s,
prefixes=True,
namespace=_ns,
)
def_unit(
["h", "hour", "hr"],
3600 * s,
namespace=_ns,
prefixes=True,
exclude_prefixes=["p"],
)
def_unit(
["d", "day"],
24 * h,
namespace=_ns,
prefixes=True,
exclude_prefixes=["c", "y"],
)
def_unit(
["sday"],
86164.09053 * s,
namespace=_ns,
doc="Sidereal day (sday) is the time of one rotation of the Earth.",
)
def_unit(
["wk", "week"],
7 * day,
namespace=_ns,
)
def_unit(
["fortnight"],
2 * wk,
namespace=_ns,
)
def_unit(
["a", "annum"],
365.25 * d,
namespace=_ns,
prefixes=True,
exclude_prefixes=["P"],
)
def_unit(
["yr", "year"],
365.25 * d,
namespace=_ns,
prefixes=True,
)
###########################################################################
# FREQUENCY
def_unit(
["Hz", "Hertz", "hertz"],
1 / s,
namespace=_ns,
prefixes=True,
doc="Frequency",
)
###########################################################################
# MASS
def_unit(
["kg", "kilogram"],
namespace=_ns,
doc="kilogram: base unit of mass in SI.",
)
def_unit(
["g", "gram"],
1.0e-3 * kg,
namespace=_ns,
prefixes=True,
exclude_prefixes=["k", "kilo"],
)
def_unit(
["t", "tonne"],
1000 * kg,
namespace=_ns,
doc="Metric tonne",
)
###########################################################################
# AMOUNT OF SUBSTANCE
def_unit(
["mol", "mole"],
namespace=_ns,
prefixes=True,
doc="mole: amount of a chemical substance in SI.",
)
###########################################################################
# TEMPERATURE
def_unit(
["K", "Kelvin"],
namespace=_ns,
prefixes=True,
doc="Kelvin: temperature with a null point at absolute zero.",
)
def_unit(
["deg_C", "Celsius"],
namespace=_ns,
doc="Degrees Celsius",
format={"latex": r"{}^{\circ}C", "unicode": "°C", "fits": "Celsius"},
)
###########################################################################
# FORCE
def_unit(
["N", "Newton", "newton"],
kg * m * s**-2,
namespace=_ns,
prefixes=True,
doc="Newton: force",
)
##########################################################################
# ENERGY
def_unit(
["J", "Joule", "joule"],
N * m,
namespace=_ns,
prefixes=True,
doc="Joule: energy",
)
def_unit(
["eV", "electronvolt"],
_si.e.value * J,
namespace=_ns,
prefixes=True,
doc="Electron Volt",
)
##########################################################################
# PRESSURE
def_unit(
["Pa", "Pascal", "pascal"],
J * m**-3,
namespace=_ns,
prefixes=True,
doc="Pascal: pressure",
)
###########################################################################
# POWER
def_unit(
["W", "Watt", "watt"],
J / s,
namespace=_ns,
prefixes=True,
doc="Watt: power",
)
###########################################################################
# ELECTRICAL
def_unit(
["A", "ampere", "amp"],
namespace=_ns,
prefixes=True,
doc="ampere: base unit of electric current in SI",
)
def_unit(
["C", "coulomb"],
A * s,
namespace=_ns,
prefixes=True,
doc="coulomb: electric charge",
)
def_unit(
["V", "Volt", "volt"],
J * C**-1,
namespace=_ns,
prefixes=True,
doc="Volt: electric potential or electromotive force",
)
def_unit(
(["Ohm", "ohm"], ["Ohm"]),
V * A**-1,
namespace=_ns,
prefixes=True,
doc="Ohm: electrical resistance",
format={"latex": r"\Omega", "unicode": "Ω"},
)
def_unit(
["S", "Siemens", "siemens"],
A * V**-1,
namespace=_ns,
prefixes=True,
doc="Siemens: electrical conductance",
)
def_unit(
["F", "Farad", "farad"],
C * V**-1,
namespace=_ns,
prefixes=True,
doc="Farad: electrical capacitance",
)
###########################################################################
# MAGNETIC
def_unit(
["Wb", "Weber", "weber"],
V * s,
namespace=_ns,
prefixes=True,
doc="Weber: magnetic flux",
)
def_unit(
["T", "Tesla", "tesla"],
Wb * m**-2,
namespace=_ns,
prefixes=True,
doc="Tesla: magnetic flux density",
)
def_unit(
["H", "Henry", "henry"],
Wb * A**-1,
namespace=_ns,
prefixes=True,
doc="Henry: inductance",
)
###########################################################################
# ILLUMINATION
def_unit(
["cd", "candela"],
namespace=_ns,
prefixes=True,
doc="candela: base unit of luminous intensity in SI",
)
def_unit(
["lm", "lumen"],
cd * sr,
namespace=_ns,
prefixes=True,
doc="lumen: luminous flux",
)
def_unit(
["lx", "lux"],
lm * m**-2,
namespace=_ns,
prefixes=True,
doc="lux: luminous emittance",
)
###########################################################################
# RADIOACTIVITY
def_unit(
["Bq", "becquerel"],
1 / s,
namespace=_ns,
prefixes=False,
doc="becquerel: unit of radioactivity",
)
def_unit(
["Ci", "curie"],
Bq * 3.7e10,
namespace=_ns,
prefixes=False,
doc="curie: unit of radioactivity",
)
###########################################################################
# BASES
bases = {m, s, kg, A, cd, rad, K, mol}
###########################################################################
# CLEANUP
del UnitBase
del Unit
del def_unit
###########################################################################
# DOCSTRING
if __doc__ is not None:
# This generates a docstring for this module that describes all of the
# standard units defined here.
from .utils import generate_unit_summary as _generate_unit_summary
__doc__ += _generate_unit_summary(globals())
|
90188c3e789729d2658473472a0a6efab3a552068ae8e39cbe9325a64c07a743 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains convenience functions for retrieving solar system
ephemerides from jplephem.
"""
import os.path
import re
from urllib.parse import urlparse
import erfa
import numpy as np
from astropy import units as u
from astropy.constants import c as speed_of_light
from astropy.utils import indent
from astropy.utils.data import download_file
from astropy.utils.decorators import classproperty, deprecated
from astropy.utils.state import ScienceState
from .builtin_frames import GCRS, ICRS, ITRS, TETE
from .builtin_frames.utils import get_jd12
from .representation import CartesianDifferential, CartesianRepresentation
from .sky_coordinate import SkyCoord
__all__ = [
"get_body",
"get_moon",
"get_body_barycentric",
"get_body_barycentric_posvel",
"solar_system_ephemeris",
]
DEFAULT_JPL_EPHEMERIS = "de430"
"""List of kernel pairs needed to calculate positions of a given object."""
BODY_NAME_TO_KERNEL_SPEC = {
"sun": [(0, 10)],
"mercury": [(0, 1), (1, 199)],
"venus": [(0, 2), (2, 299)],
"earth-moon-barycenter": [(0, 3)],
"earth": [(0, 3), (3, 399)],
"moon": [(0, 3), (3, 301)],
"mars": [(0, 4)],
"jupiter": [(0, 5)],
"saturn": [(0, 6)],
"uranus": [(0, 7)],
"neptune": [(0, 8)],
"pluto": [(0, 9)],
}
"""Indices to the plan94 routine for the given object."""
PLAN94_BODY_NAME_TO_PLANET_INDEX = {
"mercury": 1,
"venus": 2,
"earth-moon-barycenter": 3,
"mars": 4,
"jupiter": 5,
"saturn": 6,
"uranus": 7,
"neptune": 8,
}
_EPHEMERIS_NOTE = """
You can either give an explicit ephemeris or use a default, which is normally
a built-in ephemeris that does not require ephemeris files. To change
the default to be the JPL ephemeris::
>>> from astropy.coordinates import solar_system_ephemeris
>>> solar_system_ephemeris.set('jpl') # doctest: +SKIP
Use of any JPL ephemeris requires the jplephem package
(https://pypi.org/project/jplephem/).
If needed, the ephemeris file will be downloaded (and cached).
One can check which bodies are covered by a given ephemeris using::
>>> solar_system_ephemeris.bodies
('earth', 'sun', 'moon', 'mercury', 'venus', 'earth-moon-barycenter', 'mars', 'jupiter', 'saturn', 'uranus', 'neptune')
"""[
1:-1
]
class solar_system_ephemeris(ScienceState):
"""Default ephemerides for calculating positions of Solar-System bodies.
This can be one of the following:
- 'builtin': polynomial approximations to the orbital elements.
- 'dexxx[s]', for a JPL dynamical model, where xxx is the three digit
version number (e.g. de430), and the 's' is optional to specify the
'small' version of a kernel. The version number must correspond to an
ephemeris file available at:
https://naif.jpl.nasa.gov/pub/naif/generic_kernels/spk/planets/
- 'jpl': Alias for the default JPL ephemeris (currently, 'de430').
- URL: (str) The url to a SPK ephemeris in SPICE binary (.bsp) format.
- PATH: (str) File path to a SPK ephemeris in SPICE binary (.bsp) format.
- `None`: Ensure an Exception is raised without an explicit ephemeris.
The default is 'builtin', which uses the ``epv00`` and ``plan94``
routines from the ``erfa`` implementation of the Standards Of Fundamental
Astronomy library.
Notes
-----
Any file required will be downloaded (and cached) when the state is set.
The default Satellite Planet Kernel (SPK) file from NASA JPL (de430) is
~120MB, and covers years ~1550-2650 CE [1]_. The smaller de432s file is
~10MB, and covers years 1950-2050 [2]_ (and similarly for the newer de440
and de440s). Older versions of the JPL ephemerides (such as the widely
used de200) can be used via their URL [3]_.
.. [1] https://naif.jpl.nasa.gov/pub/naif/generic_kernels/spk/planets/aareadme_de430-de431.txt
.. [2] https://naif.jpl.nasa.gov/pub/naif/generic_kernels/spk/planets/aareadme_de432s.txt
.. [3] https://naif.jpl.nasa.gov/pub/naif/generic_kernels/spk/planets/a_old_versions/
"""
_value = "builtin"
_kernel = None
@classmethod
def validate(cls, value):
# make no changes if value is None
if value is None:
return cls._value
# Set up Kernel; if the file is not in cache, this will download it.
cls.get_kernel(value)
return value
@classmethod
def get_kernel(cls, value):
# ScienceState only ensures the `_value` attribute is up to date,
# so we need to be sure any kernel returned is consistent.
if cls._kernel is None or cls._kernel.origin != value:
if cls._kernel is not None:
cls._kernel.daf.file.close()
cls._kernel = None
kernel = _get_kernel(value)
if kernel is not None:
kernel.origin = value
cls._kernel = kernel
return cls._kernel
@classproperty
def kernel(cls):
return cls.get_kernel(cls._value)
@classproperty
def bodies(cls):
if cls._value is None:
return None
if cls._value.lower() == "builtin":
return ("earth", "sun", "moon") + tuple(
PLAN94_BODY_NAME_TO_PLANET_INDEX.keys()
)
else:
return tuple(BODY_NAME_TO_KERNEL_SPEC.keys())
def _get_kernel(value):
"""
Try importing jplephem, download/retrieve from cache the Satellite Planet
Kernel corresponding to the given ephemeris.
"""
if value is None or value.lower() == "builtin":
return None
try:
from jplephem.spk import SPK
except ImportError:
raise ImportError(
"Solar system JPL ephemeris calculations require the jplephem package "
"(https://pypi.org/project/jplephem/)"
)
if value.lower() == "jpl":
# Get the default JPL ephemeris URL
value = DEFAULT_JPL_EPHEMERIS
if re.compile(r"de[0-9][0-9][0-9]s?").match(value.lower()):
value = (
"https://naif.jpl.nasa.gov/pub/naif/generic_kernels"
f"/spk/planets/{value.lower():s}.bsp"
)
elif os.path.isfile(value):
return SPK.open(value)
else:
try:
urlparse(value)
except Exception:
raise ValueError(
f"{value} was not one of the standard strings and "
"could not be parsed as a file path or URL"
)
return SPK.open(download_file(value, cache=True))
def _get_body_barycentric_posvel(body, time, ephemeris=None, get_velocity=True):
"""Calculate the barycentric position (and velocity) of a solar system body.
Parameters
----------
body : str or other
The solar system body for which to calculate positions. Can also be a
kernel specifier (list of 2-tuples) if the ``ephemeris`` is a JPL
kernel.
time : `~astropy.time.Time`
Time of observation.
ephemeris : str, optional
Ephemeris to use. By default, use the one set with
``astropy.coordinates.solar_system_ephemeris.set``
get_velocity : bool, optional
Whether or not to calculate the velocity as well as the position.
Returns
-------
position : `~astropy.coordinates.CartesianRepresentation` or tuple
Barycentric (ICRS) position or tuple of position and velocity.
Notes
-----
Whether or not velocities are calculated makes little difference for the
built-in ephemerides, but for most JPL ephemeris files, the execution time
roughly doubles.
"""
# If the ephemeris is to be taken from solar_system_ephemeris, or the one
# it already contains, use the kernel there. Otherwise, open the ephemeris,
# possibly downloading it, but make sure the file is closed at the end.
default_kernel = ephemeris is None or ephemeris is solar_system_ephemeris._value
kernel = None
try:
if default_kernel:
if solar_system_ephemeris.get() is None:
raise ValueError(_EPHEMERIS_NOTE)
kernel = solar_system_ephemeris.kernel
else:
kernel = _get_kernel(ephemeris)
jd1, jd2 = get_jd12(time, "tdb")
if kernel is None:
body = body.lower()
earth_pv_helio, earth_pv_bary = erfa.epv00(jd1, jd2)
if body == "earth":
body_pv_bary = earth_pv_bary
elif body == "moon":
# The moon98 documentation notes that it takes TT, but that TDB leads
# to errors smaller than the uncertainties in the algorithm.
# moon98 returns the astrometric position relative to the Earth.
moon_pv_geo = erfa.moon98(jd1, jd2)
body_pv_bary = erfa.pvppv(moon_pv_geo, earth_pv_bary)
else:
sun_pv_bary = erfa.pvmpv(earth_pv_bary, earth_pv_helio)
if body == "sun":
body_pv_bary = sun_pv_bary
else:
try:
body_index = PLAN94_BODY_NAME_TO_PLANET_INDEX[body]
except KeyError:
raise KeyError(
f"{body}'s position and velocity cannot be "
f"calculated with the '{ephemeris}' ephemeris."
)
body_pv_helio = erfa.plan94(jd1, jd2, body_index)
body_pv_bary = erfa.pvppv(body_pv_helio, sun_pv_bary)
body_pos_bary = CartesianRepresentation(
body_pv_bary["p"], unit=u.au, xyz_axis=-1, copy=False
)
if get_velocity:
body_vel_bary = CartesianRepresentation(
body_pv_bary["v"], unit=u.au / u.day, xyz_axis=-1, copy=False
)
else:
if isinstance(body, str):
# Look up kernel chain for JPL ephemeris, based on name
try:
kernel_spec = BODY_NAME_TO_KERNEL_SPEC[body.lower()]
except KeyError:
raise KeyError(
f"{body}'s position cannot be calculated with "
f"the {ephemeris} ephemeris."
)
else:
# otherwise, assume the user knows what their doing and intentionally
# passed in a kernel chain
kernel_spec = body
# jplephem cannot handle multi-D arrays, so convert to 1D here.
jd1_shape = getattr(jd1, "shape", ())
if len(jd1_shape) > 1:
jd1, jd2 = jd1.ravel(), jd2.ravel()
# Note that we use the new jd1.shape here to create a 1D result array.
# It is reshaped below.
body_posvel_bary = np.zeros(
(2 if get_velocity else 1, 3) + getattr(jd1, "shape", ())
)
for pair in kernel_spec:
spk = kernel[pair]
if spk.data_type == 3:
# Type 3 kernels contain both position and velocity.
posvel = spk.compute(jd1, jd2)
if get_velocity:
body_posvel_bary += posvel.reshape(body_posvel_bary.shape)
else:
body_posvel_bary[0] += posvel[:4]
else:
# spk.generate first yields the position and then the
# derivative. If no velocities are desired, body_posvel_bary
# has only one element and thus the loop ends after a single
# iteration, avoiding the velocity calculation.
for body_p_or_v, p_or_v in zip(
body_posvel_bary, spk.generate(jd1, jd2)
):
body_p_or_v += p_or_v
body_posvel_bary.shape = body_posvel_bary.shape[:2] + jd1_shape
body_pos_bary = CartesianRepresentation(
body_posvel_bary[0], unit=u.km, copy=False
)
if get_velocity:
body_vel_bary = CartesianRepresentation(
body_posvel_bary[1], unit=u.km / u.day, copy=False
)
return (body_pos_bary, body_vel_bary) if get_velocity else body_pos_bary
finally:
if not default_kernel and kernel is not None:
kernel.daf.file.close()
def get_body_barycentric_posvel(body, time, ephemeris=None):
"""Calculate the barycentric position and velocity of a solar system body.
Parameters
----------
body : str or list of tuple
The solar system body for which to calculate positions. Can also be a
kernel specifier (list of 2-tuples) if the ``ephemeris`` is a JPL
kernel.
time : `~astropy.time.Time`
Time of observation.
ephemeris : str, optional
Ephemeris to use. By default, use the one set with
``astropy.coordinates.solar_system_ephemeris.set``
Returns
-------
position, velocity : tuple of `~astropy.coordinates.CartesianRepresentation`
Tuple of barycentric (ICRS) position and velocity.
See Also
--------
get_body_barycentric : to calculate position only.
This is faster by about a factor two for JPL kernels, but has no
speed advantage for the built-in ephemeris.
Notes
-----
{_EPHEMERIS_NOTE}
"""
return _get_body_barycentric_posvel(body, time, ephemeris)
def get_body_barycentric(body, time, ephemeris=None):
"""Calculate the barycentric position of a solar system body.
Parameters
----------
body : str or list of tuple
The solar system body for which to calculate positions. Can also be a
kernel specifier (list of 2-tuples) if the ``ephemeris`` is a JPL
kernel.
time : `~astropy.time.Time`
Time of observation.
ephemeris : str, optional
Ephemeris to use. By default, use the one set with
``astropy.coordinates.solar_system_ephemeris.set``
Returns
-------
position : `~astropy.coordinates.CartesianRepresentation`
Barycentric (ICRS) position of the body in cartesian coordinates
See Also
--------
get_body_barycentric_posvel : to calculate both position and velocity.
Notes
-----
{_EPHEMERIS_NOTE}
"""
return _get_body_barycentric_posvel(body, time, ephemeris, get_velocity=False)
def _get_apparent_body_position(body, time, ephemeris, obsgeoloc=None):
"""Calculate the apparent position of body ``body`` relative to Earth.
This corrects for the light-travel time to the object.
Parameters
----------
body : str or other
The solar system body for which to calculate positions. Can also be a
kernel specifier (list of 2-tuples) if the ``ephemeris`` is a JPL
kernel.
time : `~astropy.time.Time`
Time of observation.
ephemeris : str, optional
Ephemeris to use. By default, use the one set with
``~astropy.coordinates.solar_system_ephemeris.set``
obsgeoloc : `~astropy.coordinates.CartesianRepresentation`, optional
The GCRS position of the observer
Returns
-------
cartesian_position : `~astropy.coordinates.CartesianRepresentation`
Barycentric (ICRS) apparent position of the body in cartesian coordinates
Notes
-----
{_EPHEMERIS_NOTE}
"""
if ephemeris is None:
ephemeris = solar_system_ephemeris.get()
# Calculate position given approximate light travel time.
delta_light_travel_time = 20.0 * u.s
emitted_time = time
light_travel_time = 0.0 * u.s
earth_loc = get_body_barycentric("earth", time, ephemeris)
if obsgeoloc is not None:
earth_loc += obsgeoloc
while np.any(np.fabs(delta_light_travel_time) > 1.0e-8 * u.s):
body_loc = get_body_barycentric(body, emitted_time, ephemeris)
earth_distance = (body_loc - earth_loc).norm()
delta_light_travel_time = light_travel_time - earth_distance / speed_of_light
light_travel_time = earth_distance / speed_of_light
emitted_time = time - light_travel_time
return get_body_barycentric(body, emitted_time, ephemeris)
def get_body(body, time, location=None, ephemeris=None):
"""
Get a `~astropy.coordinates.SkyCoord` for a solar system body as observed
from a location on Earth in the `~astropy.coordinates.GCRS` reference
system.
Parameters
----------
body : str or list of tuple
The solar system body for which to calculate positions. Can also be a
kernel specifier (list of 2-tuples) if the ``ephemeris`` is a JPL
kernel.
time : `~astropy.time.Time`
Time of observation.
location : `~astropy.coordinates.EarthLocation`, optional
Location of observer on the Earth. If not given, will be taken from
``time`` (if not present, a geocentric observer will be assumed).
ephemeris : str, optional
Ephemeris to use. If not given, use the one set with
``astropy.coordinates.solar_system_ephemeris.set`` (which is
set to 'builtin' by default).
Returns
-------
skycoord : `~astropy.coordinates.SkyCoord`
GCRS Coordinate for the body
Notes
-----
The coordinate returned is the apparent position, which is the position of
the body at time *t* minus the light travel time from the *body* to the
observing *location*.
{_EPHEMERIS_NOTE}
"""
if location is None:
location = time.location
if location is not None:
obsgeoloc, obsgeovel = location.get_gcrs_posvel(time)
else:
obsgeoloc, obsgeovel = None, None
cartrep = _get_apparent_body_position(body, time, ephemeris, obsgeoloc)
icrs = ICRS(cartrep)
gcrs = icrs.transform_to(
GCRS(obstime=time, obsgeoloc=obsgeoloc, obsgeovel=obsgeovel)
)
return SkyCoord(gcrs)
@deprecated("5.3", alternative='get_body("moon")')
def get_moon(time, location=None, ephemeris=None):
"""
Get a `~astropy.coordinates.SkyCoord` for the Earth's Moon as observed
from a location on Earth in the `~astropy.coordinates.GCRS` reference
system.
Parameters
----------
time : `~astropy.time.Time`
Time of observation
location : `~astropy.coordinates.EarthLocation`
Location of observer on the Earth. If none is supplied, taken from
``time`` (if not present, a geocentric observer will be assumed).
ephemeris : str, optional
Ephemeris to use. If not given, use the one set with
``astropy.coordinates.solar_system_ephemeris.set`` (which is
set to 'builtin' by default).
Returns
-------
skycoord : `~astropy.coordinates.SkyCoord`
GCRS Coordinate for the Moon
Notes
-----
The coordinate returned is the apparent position, which is the position of
the moon at time *t* minus the light travel time from the moon to the
observing *location*.
{_EPHEMERIS_NOTE}
"""
return get_body("moon", time, location=location, ephemeris=ephemeris)
# Add note about the ephemeris choices to the docstrings of relevant functions.
# Note: sadly, one cannot use f-strings for docstrings, so we format explicitly.
for f in [
f
for f in locals().values()
if callable(f) and f.__doc__ is not None and "{_EPHEMERIS_NOTE}" in f.__doc__
]:
f.__doc__ = f.__doc__.format(_EPHEMERIS_NOTE=indent(_EPHEMERIS_NOTE)[4:])
deprecation_msg = """
The use of _apparent_position_in_true_coordinates is deprecated because
astropy now implements a True Equator True Equinox Frame (TETE), which
should be used instead.
"""
@deprecated("4.2", deprecation_msg)
def _apparent_position_in_true_coordinates(skycoord):
"""
Convert Skycoord in GCRS frame into one in which RA and Dec
are defined w.r.t to the true equinox and poles of the Earth.
"""
location = getattr(skycoord, "location", None)
if location is None:
gcrs_rep = skycoord.obsgeoloc.with_differentials(
{"s": CartesianDifferential.from_cartesian(skycoord.obsgeovel)}
)
location = (
GCRS(gcrs_rep, obstime=skycoord.obstime)
.transform_to(ITRS(obstime=skycoord.obstime))
.earth_location
)
tete_frame = TETE(obstime=skycoord.obstime, location=location)
return skycoord.transform_to(tete_frame)
|
ea2a66234766112c6761320e8010f1e37f33d23a0beafbe1d402e4c89afbe74a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# This module includes files automatically generated from ply (these end in
# _lextab.py and _parsetab.py). To generate these files, remove them from this
# folder, then build astropy and run the tests in-place:
#
# python setup.py build_ext --inplace
# pytest astropy/coordinates
#
# You can then commit the changes to the re-generated _lextab.py and
# _parsetab.py files.
"""
This module contains formatting functions that are for internal use in
astropy.coordinates.angles. Mainly they are conversions from one format
of data to another.
"""
import threading
from warnings import warn
import numpy as np
from astropy import units as u
from astropy.utils import format_exception, parsing
from astropy.utils.decorators import deprecated
from .errors import (
IllegalHourError,
IllegalHourWarning,
IllegalMinuteError,
IllegalMinuteWarning,
IllegalSecondError,
IllegalSecondWarning,
)
class _AngleParser:
"""
Parses the various angle formats including:
* 01:02:30.43 degrees
* 1 2 0 hours
* 1°2′3″
* 1d2m3s
* -1h2m3s
* 1°2′3″N
This class should not be used directly. Use `parse_angle`
instead.
"""
# For safe multi-threaded operation all class (but not instance)
# members that carry state should be thread-local. They are stored
# in the following class member
_thread_local = threading.local()
def __init__(self):
# TODO: in principle, the parser should be invalidated if we change unit
# system (from CDS to FITS, say). Might want to keep a link to the
# unit_registry used, and regenerate the parser/lexer if it changes.
# Alternatively, perhaps one should not worry at all and just pre-
# generate the parser for each release (as done for unit formats).
# For some discussion of this problem, see
# https://github.com/astropy/astropy/issues/5350#issuecomment-248770151
if "_parser" not in _AngleParser._thread_local.__dict__:
(
_AngleParser._thread_local._parser,
_AngleParser._thread_local._lexer,
) = self._make_parser()
@classmethod
def _get_simple_unit_names(cls):
simple_units = set(u.radian.find_equivalent_units(include_prefix_units=True))
simple_unit_names = set()
# We filter out degree and hourangle, since those are treated
# separately.
for unit in simple_units:
if unit != u.deg and unit != u.hourangle:
simple_unit_names.update(unit.names)
return sorted(simple_unit_names)
@classmethod
def _make_parser(cls):
# List of token names.
tokens = (
"SIGN",
"UINT",
"UFLOAT",
"COLON",
"DEGREE",
"HOUR",
"MINUTE",
"SECOND",
"SIMPLE_UNIT",
"EASTWEST",
"NORTHSOUTH",
)
# NOTE THE ORDERING OF THESE RULES IS IMPORTANT!!
# Regular expression rules for simple tokens
def t_UFLOAT(t):
r"((\d+\.\d*)|(\.\d+))([eE][+-−]?\d+)?"
# The above includes Unicode "MINUS SIGN" \u2212. It is
# important to include the hyphen last, or the regex will
# treat this as a range.
t.value = float(t.value.replace("−", "-"))
return t
def t_UINT(t):
r"\d+"
t.value = int(t.value)
return t
def t_SIGN(t):
r"[+−-]"
# The above include Unicode "MINUS SIGN" \u2212. It is
# important to include the hyphen last, or the regex will
# treat this as a range.
if t.value == "+":
t.value = 1.0
else:
t.value = -1.0
return t
def t_EASTWEST(t):
r"[EW]$"
t.value = -1.0 if t.value == "W" else 1.0
return t
def t_NORTHSOUTH(t):
r"[NS]$"
# We cannot use lower-case letters otherwise we'll confuse
# s[outh] with s[econd]
t.value = -1.0 if t.value == "S" else 1.0
return t
def t_SIMPLE_UNIT(t):
t.value = u.Unit(t.value)
return t
t_SIMPLE_UNIT.__doc__ = "|".join(
f"(?:{x})" for x in cls._get_simple_unit_names()
)
t_COLON = ":"
t_DEGREE = r"d(eg(ree(s)?)?)?|°"
t_HOUR = r"hour(s)?|h(r)?|ʰ"
t_MINUTE = r"m(in(ute(s)?)?)?|′|\'|ᵐ"
t_SECOND = r"s(ec(ond(s)?)?)?|″|\"|ˢ"
# A string containing ignored characters (spaces)
t_ignore = " "
# Error handling rule
def t_error(t):
raise ValueError(f"Invalid character at col {t.lexpos}")
lexer = parsing.lex(lextab="angle_lextab", package="astropy/coordinates")
def p_angle(p):
"""
angle : sign hms eastwest
| sign dms dir
| sign arcsecond dir
| sign arcminute dir
| sign simple dir
"""
sign = p[1] * p[3]
value, unit = p[2]
if isinstance(value, tuple):
p[0] = ((sign * value[0],) + value[1:], unit)
else:
p[0] = (sign * value, unit)
def p_sign(p):
"""
sign : SIGN
|
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = 1.0
def p_eastwest(p):
"""
eastwest : EASTWEST
|
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = 1.0
def p_dir(p):
"""
dir : EASTWEST
| NORTHSOUTH
|
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = 1.0
def p_ufloat(p):
"""
ufloat : UFLOAT
| UINT
"""
p[0] = p[1]
def p_colon(p):
"""
colon : UINT COLON ufloat
| UINT COLON UINT COLON ufloat
"""
if len(p) == 4:
p[0] = (p[1], p[3])
elif len(p) == 6:
p[0] = (p[1], p[3], p[5])
def p_spaced(p):
"""
spaced : UINT ufloat
| UINT UINT ufloat
"""
if len(p) == 3:
p[0] = (p[1], p[2])
elif len(p) == 4:
p[0] = (p[1], p[2], p[3])
def p_generic(p):
"""
generic : colon
| spaced
| ufloat
"""
p[0] = p[1]
def p_hms(p):
"""
hms : UINT HOUR
| UINT HOUR ufloat
| UINT HOUR UINT MINUTE
| UINT HOUR UFLOAT MINUTE
| UINT HOUR UINT MINUTE ufloat
| UINT HOUR UINT MINUTE ufloat SECOND
| generic HOUR
"""
if len(p) == 3:
p[0] = (p[1], u.hourangle)
elif len(p) in (4, 5):
p[0] = ((p[1], p[3]), u.hourangle)
elif len(p) in (6, 7):
p[0] = ((p[1], p[3], p[5]), u.hourangle)
def p_dms(p):
"""
dms : UINT DEGREE
| UINT DEGREE ufloat
| UINT DEGREE UINT MINUTE
| UINT DEGREE UFLOAT MINUTE
| UINT DEGREE UINT MINUTE ufloat
| UINT DEGREE UINT MINUTE ufloat SECOND
| generic DEGREE
"""
if len(p) == 3:
p[0] = (p[1], u.degree)
elif len(p) in (4, 5):
p[0] = ((p[1], p[3]), u.degree)
elif len(p) in (6, 7):
p[0] = ((p[1], p[3], p[5]), u.degree)
def p_simple(p):
"""
simple : generic
| generic SIMPLE_UNIT
"""
if len(p) == 2:
p[0] = (p[1], None)
else:
p[0] = (p[1], p[2])
def p_arcsecond(p):
"""
arcsecond : generic SECOND
"""
p[0] = (p[1], u.arcsecond)
def p_arcminute(p):
"""
arcminute : generic MINUTE
"""
p[0] = (p[1], u.arcminute)
def p_error(p):
raise ValueError
parser = parsing.yacc(tabmodule="angle_parsetab", package="astropy/coordinates")
return parser, lexer
def parse(self, angle, unit, debug=False):
try:
found_angle, found_unit = self._thread_local._parser.parse(
angle, lexer=self._thread_local._lexer, debug=debug
)
except ValueError as e:
if str(e):
raise ValueError(f"{str(e)} in angle {angle!r}") from e
else:
raise ValueError(f"Syntax error parsing angle {angle!r}") from e
if unit is None and found_unit is None:
raise u.UnitsError("No unit specified")
return found_angle, found_unit
def _check_hour_range(hrs):
"""
Checks that the given value is in the range (-24, 24).
"""
if np.any(np.abs(hrs) == 24.0):
warn(IllegalHourWarning(hrs, "Treating as 24 hr"))
elif np.any(hrs < -24.0) or np.any(hrs > 24.0):
raise IllegalHourError(hrs)
def _check_minute_range(m):
"""
Checks that the given value is in the range [0,60]. If the value
is equal to 60, then a warning is raised.
"""
if np.any(m == 60.0):
warn(IllegalMinuteWarning(m, "Treating as 0 min, +1 hr/deg"))
elif np.any(m < -60.0) or np.any(m > 60.0):
# "Error: minutes not in range [-60,60) ({0}).".format(min))
raise IllegalMinuteError(m)
def _check_second_range(sec):
"""
Checks that the given value is in the range [0,60]. If the value
is equal to 60, then a warning is raised.
"""
if np.any(sec == 60.0):
warn(IllegalSecondWarning(sec, "Treating as 0 sec, +1 min"))
elif sec is None:
pass
elif np.any(sec < -60.0) or np.any(sec > 60.0):
# "Error: seconds not in range [-60,60) ({0}).".format(sec))
raise IllegalSecondError(sec)
def check_hms_ranges(h, m, s):
"""
Checks that the given hour, minute and second are all within
reasonable range.
"""
_check_hour_range(h)
_check_minute_range(m)
_check_second_range(s)
def parse_angle(angle, unit=None, debug=False):
"""
Parses an input string value into an angle value.
Parameters
----------
angle : str
A string representing the angle. May be in one of the following forms:
* 01:02:30.43 degrees
* 1 2 0 hours
* 1°2′3″
* 1d2m3s
* -1h2m3s
unit : `~astropy.units.UnitBase` instance, optional
The unit used to interpret the string. If ``unit`` is not
provided, the unit must be explicitly represented in the
string, either at the end or as number separators.
debug : bool, optional
If `True`, print debugging information from the parser.
Returns
-------
value, unit : tuple
``value`` is the value as a floating point number or three-part
tuple, and ``unit`` is a `Unit` instance which is either the
unit passed in or the one explicitly mentioned in the input
string.
"""
return _AngleParser().parse(angle, unit, debug=debug)
def degrees_to_dms(d):
"""
Convert a floating-point degree value into a ``(degree, arcminute,
arcsecond)`` tuple.
"""
sign = np.copysign(1.0, d)
(df, d) = np.modf(np.abs(d)) # (degree fraction, degree)
(mf, m) = np.modf(df * 60.0) # (minute fraction, minute)
s = mf * 60.0
return np.floor(sign * d), sign * np.floor(m), sign * s
@deprecated(
"dms_to_degrees (or creating an Angle with a tuple) has ambiguous "
"behavior when the degree value is 0",
alternative=(
"another way of creating angles instead (e.g. a less "
"ambiguous string like '-0d1m2.3s'"
),
)
def dms_to_degrees(d, m, s=None):
"""
Convert degrees, arcminute, arcsecond to a float degrees value.
"""
_check_minute_range(m)
_check_second_range(s)
# determine sign
sign = np.copysign(1.0, d)
try:
d = np.floor(np.abs(d))
if s is None:
m = np.abs(m)
s = 0
else:
m = np.floor(np.abs(m))
s = np.abs(s)
except ValueError as err:
raise ValueError(
format_exception(
"{func}: dms values ({1[0]},{2[1]},{3[2]}) could not be "
"converted to numbers.",
d,
m,
s,
)
) from err
return sign * (d + m / 60.0 + s / 3600.0)
@deprecated(
"hms_to_hours (or creating an Angle with a tuple) has ambiguous "
"behavior when the hour value is 0",
alternative=(
"another way of creating angles instead (e.g. a less "
"ambiguous string like '-0h1m2.3s'"
),
)
def hms_to_hours(h, m, s=None):
"""
Convert hour, minute, second to a float hour value.
"""
check_hms_ranges(h, m, s)
# determine sign
sign = np.copysign(1.0, h)
try:
h = np.floor(np.abs(h))
if s is None:
m = np.abs(m)
s = 0
else:
m = np.floor(np.abs(m))
s = np.abs(s)
except ValueError as err:
raise ValueError(
format_exception(
"{func}: HMS values ({1[0]},{2[1]},{3[2]}) could not be "
"converted to numbers.",
h,
m,
s,
)
) from err
return sign * (h + m / 60.0 + s / 3600.0)
def hms_to_degrees(h, m, s):
"""
Convert hour, minute, second to a float degrees value.
"""
return hms_to_hours(h, m, s) * 15.0
def hms_to_radians(h, m, s):
"""
Convert hour, minute, second to a float radians value.
"""
return u.degree.to(u.radian, hms_to_degrees(h, m, s))
def hms_to_dms(h, m, s):
"""
Convert degrees, arcminutes, arcseconds to an ``(hour, minute, second)``
tuple.
"""
return degrees_to_dms(hms_to_degrees(h, m, s))
def hours_to_decimal(h):
"""
Convert any parseable hour value into a float value.
"""
from . import angles
return angles.Angle(h, unit=u.hourangle).hour
def hours_to_radians(h):
"""
Convert an angle in Hours to Radians.
"""
return u.hourangle.to(u.radian, h)
def hours_to_hms(h):
"""
Convert an floating-point hour value into an ``(hour, minute,
second)`` tuple.
"""
sign = np.copysign(1.0, h)
(hf, h) = np.modf(np.abs(h)) # (degree fraction, degree)
(mf, m) = np.modf(hf * 60.0) # (minute fraction, minute)
s = mf * 60.0
return (np.floor(sign * h), sign * np.floor(m), sign * s)
def radians_to_degrees(r):
"""
Convert an angle in Radians to Degrees.
"""
return u.radian.to(u.degree, r)
def radians_to_hours(r):
"""
Convert an angle in Radians to Hours.
"""
return u.radian.to(u.hourangle, r)
def radians_to_hms(r):
"""
Convert an angle in Radians to an ``(hour, minute, second)`` tuple.
"""
hours = radians_to_hours(r)
return hours_to_hms(hours)
def radians_to_dms(r):
"""
Convert an angle in Radians to an ``(degree, arcminute,
arcsecond)`` tuple.
"""
degrees = u.radian.to(u.degree, r)
return degrees_to_dms(degrees)
def sexagesimal_to_string(values, precision=None, pad=False, sep=(":",), fields=3):
"""
Given an already separated tuple of sexagesimal values, returns
a string.
See `hours_to_string` and `degrees_to_string` for a higher-level
interface to this functionality.
"""
# Check to see if values[0] is negative, using np.copysign to handle -0
sign = np.copysign(1.0, values[0])
# If the coordinates are negative, we need to take the absolute values.
# We use np.abs because abs(-0) is -0
# TODO: Is this true? (MHvK, 2018-02-01: not on my system)
values = [np.abs(value) for value in values]
if pad:
if sign == -1:
pad = 3
else:
pad = 2
else:
pad = 0
if not isinstance(sep, tuple):
sep = tuple(sep)
if fields < 1 or fields > 3:
raise ValueError("fields must be 1, 2, or 3")
if not sep: # empty string, False, or None, etc.
sep = ("", "", "")
elif len(sep) == 1:
if fields == 3:
sep = sep + (sep[0], "")
elif fields == 2:
sep = sep + ("", "")
else:
sep = ("", "", "")
elif len(sep) == 2:
sep = sep + ("",)
elif len(sep) != 3:
raise ValueError(
"Invalid separator specification for converting angle to string."
)
# Simplify the expression based on the requested precision. For
# example, if the seconds will round up to 60, we should convert
# it to 0 and carry upwards. If the field is hidden (by the
# fields kwarg) we round up around the middle, 30.0.
if precision is None:
rounding_thresh = 60.0 - (10.0**-8)
else:
rounding_thresh = 60.0 - (10.0**-precision)
if fields == 3 and values[2] >= rounding_thresh:
values[2] = 0.0
values[1] += 1.0
elif fields < 3 and values[2] >= 30.0:
values[1] += 1.0
if fields >= 2 and values[1] >= 60.0:
values[1] = 0.0
values[0] += 1.0
elif fields < 2 and values[1] >= 30.0:
values[0] += 1.0
literal = []
last_value = ""
literal.append("{0:0{pad}.0f}{sep[0]}")
if fields >= 2:
literal.append("{1:02d}{sep[1]}")
if fields == 3:
if precision is None:
last_value = f"{abs(values[2]):.8f}"
last_value = last_value.rstrip("0").rstrip(".")
else:
last_value = "{0:.{precision}f}".format(abs(values[2]), precision=precision)
if len(last_value) == 1 or last_value[1] == ".":
last_value = "0" + last_value
literal.append("{last_value}{sep[2]}")
literal = "".join(literal)
return literal.format(
np.copysign(values[0], sign),
int(values[1]),
values[2],
sep=sep,
pad=pad,
last_value=last_value,
)
def hours_to_string(h, precision=5, pad=False, sep=("h", "m", "s"), fields=3):
"""
Takes a decimal hour value and returns a string formatted as hms with
separator specified by the 'sep' parameter.
``h`` must be a scalar.
"""
h, m, s = hours_to_hms(h)
return sexagesimal_to_string(
(h, m, s), precision=precision, pad=pad, sep=sep, fields=fields
)
def degrees_to_string(d, precision=5, pad=False, sep=":", fields=3):
"""
Takes a decimal hour value and returns a string formatted as dms with
separator specified by the 'sep' parameter.
``d`` must be a scalar.
"""
d, m, s = degrees_to_dms(d)
return sexagesimal_to_string(
(d, m, s), precision=precision, pad=pad, sep=sep, fields=fields
)
|
632ad0b437cb70ae17479c99c821bec88dc1b1abb1234e1377a835b23ddc5ce4 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""This module defines custom errors and exceptions used in astropy.coordinates.
"""
from astropy.utils.exceptions import AstropyWarning
__all__ = [
"RangeError",
"BoundsError",
"IllegalHourError",
"IllegalMinuteError",
"IllegalSecondError",
"ConvertError",
"IllegalHourWarning",
"IllegalMinuteWarning",
"IllegalSecondWarning",
"UnknownSiteException",
]
class RangeError(ValueError):
"""
Raised when some part of an angle is out of its valid range.
"""
class BoundsError(RangeError):
"""
Raised when an angle is outside of its user-specified bounds.
"""
class IllegalHourError(RangeError):
"""
Raised when an hour value is not in the range [0,24).
Parameters
----------
hour : int, float
Examples
--------
.. code-block:: python
if not 0 <= hr < 24:
raise IllegalHourError(hour)
"""
def __init__(self, hour):
self.hour = hour
def __str__(self):
return (
f"An invalid value for 'hours' was found ('{self.hour}'); must be in the"
" range [0,24)."
)
class IllegalHourWarning(AstropyWarning):
"""
Raised when an hour value is 24.
Parameters
----------
hour : int, float
"""
def __init__(self, hour, alternativeactionstr=None):
self.hour = hour
self.alternativeactionstr = alternativeactionstr
def __str__(self):
message = (
f"'hour' was found to be '{self.hour}', which is not in range (-24, 24)."
)
if self.alternativeactionstr is not None:
message += " " + self.alternativeactionstr
return message
class IllegalMinuteError(RangeError):
"""
Raised when an minute value is not in the range [0,60].
Parameters
----------
minute : int, float
Examples
--------
.. code-block:: python
if not 0 <= min < 60:
raise IllegalMinuteError(minute)
"""
def __init__(self, minute):
self.minute = minute
def __str__(self):
return (
f"An invalid value for 'minute' was found ('{self.minute}'); should be in"
" the range [0,60)."
)
class IllegalMinuteWarning(AstropyWarning):
"""
Raised when a minute value is 60.
Parameters
----------
minute : int, float
"""
def __init__(self, minute, alternativeactionstr=None):
self.minute = minute
self.alternativeactionstr = alternativeactionstr
def __str__(self):
message = (
f"'minute' was found to be '{self.minute}', which is not in range [0,60)."
)
if self.alternativeactionstr is not None:
message += " " + self.alternativeactionstr
return message
class IllegalSecondError(RangeError):
"""
Raised when an second value (time) is not in the range [0,60].
Parameters
----------
second : int, float
Examples
--------
.. code-block:: python
if not 0 <= sec < 60:
raise IllegalSecondError(second)
"""
def __init__(self, second):
self.second = second
def __str__(self):
return (
f"An invalid value for 'second' was found ('{self.second}'); should be in"
" the range [0,60)."
)
class IllegalSecondWarning(AstropyWarning):
"""
Raised when a second value is 60.
Parameters
----------
second : int, float
"""
def __init__(self, second, alternativeactionstr=None):
self.second = second
self.alternativeactionstr = alternativeactionstr
def __str__(self):
message = (
f"'second' was found to be '{self.second}', which is not in range [0,60)."
)
if self.alternativeactionstr is not None:
message += " " + self.alternativeactionstr
return message
# TODO: consider if this should be used to `units`?
class UnitsError(ValueError):
"""
Raised if units are missing or invalid.
"""
class ConvertError(Exception):
"""
Raised if a coordinate system cannot be converted to another.
"""
class UnknownSiteException(KeyError):
def __init__(self, site, attribute, close_names=None):
message = (
f"Site '{site}' not in database. Use {attribute} to see available sites."
f" If '{site}' exists in the online astropy-data repository, use the"
" 'refresh_cache=True' option to download the latest version."
)
if close_names:
message += " Did you mean one of: '{}'?'".format("', '".join(close_names))
self.site = site
self.attribute = attribute
self.close_names = close_names
return super().__init__(message)
|
0ea9e67f77ea290fd3a3f6e2dff339bb29e4e5903b82df5b664cc4f2841c6deb | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Utililies used for constructing and inspecting rotation matrices.
"""
from functools import reduce
import numpy as np
from astropy import units as u
from astropy.utils import deprecated
from .angles import Angle
@deprecated("5.2", alternative="@")
def matrix_product(*matrices):
"""Matrix multiply all arguments together.
Arguments should have dimension 2 or larger. Larger dimensional objects
are interpreted as stacks of matrices residing in the last two dimensions.
This function mostly exists for readability: using `~numpy.matmul`
directly, one would have ``matmul(matmul(m1, m2), m3)``, etc. For even
better readability, one might consider using `~numpy.matrix` for the
arguments (so that one could write ``m1 * m2 * m3``), but then it is not
possible to handle stacks of matrices. Once only python >=3.5 is supported,
this function can be replaced by ``m1 @ m2 @ m3``.
"""
return reduce(np.matmul, matrices)
def matrix_transpose(matrix):
"""Transpose a matrix or stack of matrices by swapping the last two axes.
This function mostly exists for readability; seeing ``.swapaxes(-2, -1)``
it is not that obvious that one does a transpose. Note that one cannot
use `~numpy.ndarray.T`, as this transposes all axes and thus does not
work for stacks of matrices.
"""
return matrix.swapaxes(-2, -1)
def rotation_matrix(angle, axis="z", unit=None):
"""
Generate matrices for rotation by some angle around some axis.
Parameters
----------
angle : angle-like
The amount of rotation the matrices should represent. Can be an array.
axis : str or array-like
Either ``'x'``, ``'y'``, ``'z'``, or a (x,y,z) specifying the axis to
rotate about. If ``'x'``, ``'y'``, or ``'z'``, the rotation sense is
counterclockwise looking down the + axis (e.g. positive rotations obey
left-hand-rule). If given as an array, the last dimension should be 3;
it will be broadcast against ``angle``.
unit : unit-like, optional
If ``angle`` does not have associated units, they are in this
unit. If neither are provided, it is assumed to be degrees.
Returns
-------
rmat : `numpy.matrix`
A unitary rotation matrix.
"""
if isinstance(angle, u.Quantity):
angle = angle.to_value(u.radian)
else:
if unit is None:
angle = np.deg2rad(angle)
else:
angle = u.Unit(unit).to(u.rad, angle)
s = np.sin(angle)
c = np.cos(angle)
# use optimized implementations for x/y/z
try:
i = "xyz".index(axis)
except TypeError:
axis = np.asarray(axis)
axis = axis / np.sqrt((axis * axis).sum(axis=-1, keepdims=True))
R = (
axis[..., np.newaxis]
* axis[..., np.newaxis, :]
* (1.0 - c)[..., np.newaxis, np.newaxis]
)
for i in range(0, 3):
R[..., i, i] += c
a1 = (i + 1) % 3
a2 = (i + 2) % 3
R[..., a1, a2] += axis[..., i] * s
R[..., a2, a1] -= axis[..., i] * s
else:
a1 = (i + 1) % 3
a2 = (i + 2) % 3
R = np.zeros(getattr(angle, "shape", ()) + (3, 3))
R[..., i, i] = 1.0
R[..., a1, a1] = c
R[..., a1, a2] = s
R[..., a2, a1] = -s
R[..., a2, a2] = c
return R
def angle_axis(matrix):
"""
Angle of rotation and rotation axis for a given rotation matrix.
Parameters
----------
matrix : array-like
A 3 x 3 unitary rotation matrix (or stack of matrices).
Returns
-------
angle : `~astropy.coordinates.Angle`
The angle of rotation.
axis : array
The (normalized) axis of rotation (with last dimension 3).
"""
m = np.asanyarray(matrix)
if m.shape[-2:] != (3, 3):
raise ValueError("matrix is not 3x3")
axis = np.zeros(m.shape[:-1])
axis[..., 0] = m[..., 2, 1] - m[..., 1, 2]
axis[..., 1] = m[..., 0, 2] - m[..., 2, 0]
axis[..., 2] = m[..., 1, 0] - m[..., 0, 1]
r = np.sqrt((axis * axis).sum(-1, keepdims=True))
angle = np.arctan2(r[..., 0], m[..., 0, 0] + m[..., 1, 1] + m[..., 2, 2] - 1.0)
return Angle(angle, u.radian), -axis / r
def is_O3(matrix, atol=None):
"""Check whether a matrix is in the length-preserving group O(3).
Parameters
----------
matrix : (..., N, N) array-like
Must have attribute ``.shape`` and method ``.swapaxes()`` and not error
when using `~numpy.isclose`.
atol : float, optional
The allowed absolute difference.
If `None` it defaults to 1e-15 or 5 * epsilon of the matrix's dtype, if floating.
.. versionadded:: 5.3
Returns
-------
is_o3 : bool or array of bool
If the matrix has more than two axes, the O(3) check is performed on
slices along the last two axes -- (M, N, N) => (M, ) bool array.
Notes
-----
The orthogonal group O(3) preserves lengths, but is not guaranteed to keep
orientations. Rotations and reflections are in this group.
For more information, see https://en.wikipedia.org/wiki/Orthogonal_group
"""
# matrix is in O(3) (rotations, proper and improper).
I = np.identity(matrix.shape[-1])
if atol is None:
if np.issubdtype(matrix.dtype, np.floating):
atol = np.finfo(matrix.dtype).eps * 5
else:
atol = 1e-15
is_o3 = np.all(
np.isclose(matrix @ matrix.swapaxes(-2, -1), I, atol=atol), axis=(-2, -1)
)
return is_o3
def is_rotation(matrix, allow_improper=False, atol=None):
"""Check whether a matrix is a rotation, proper or improper.
Parameters
----------
matrix : (..., N, N) array-like
Must have attribute ``.shape`` and method ``.swapaxes()`` and not error
when using `~numpy.isclose` and `~numpy.linalg.det`.
allow_improper : bool, optional
Whether to restrict check to the SO(3), the group of proper rotations,
or also allow improper rotations (with determinant -1).
The default (False) is only SO(3).
atol : float, optional
The allowed absolute difference.
If `None` it defaults to 1e-15 or 5 * epsilon of the matrix's dtype, if floating.
.. versionadded:: 5.3
Returns
-------
isrot : bool or array of bool
If the matrix has more than two axes, the checks are performed on
slices along the last two axes -- (M, N, N) => (M, ) bool array.
See Also
--------
astopy.coordinates.matrix_utilities.is_O3 :
For the less restrictive check that a matrix is in the group O(3).
Notes
-----
The group SO(3) is the rotation group. It is O(3), with determinant 1.
Rotations with determinant -1 are improper rotations, combining both a
rotation and a reflection.
For more information, see https://en.wikipedia.org/wiki/Orthogonal_group
"""
if atol is None:
if np.issubdtype(matrix.dtype, np.floating):
atol = np.finfo(matrix.dtype).eps * 5
else:
atol = 1e-15
# matrix is in O(3).
is_o3 = is_O3(matrix, atol=atol)
# determinant checks for rotation (proper and improper)
if allow_improper: # determinant can be +/- 1
is_det1 = np.isclose(np.abs(np.linalg.det(matrix)), 1.0, atol=atol)
else: # restrict to SO(3)
is_det1 = np.isclose(np.linalg.det(matrix), 1.0, atol=atol)
return is_o3 & is_det1
|
6eba8e142fec825332804db519dc07cd27ae88782c54f9b6e65027835ee0ace8 | """
In this module, we define the coordinate representation classes, which are
used to represent low-level cartesian, spherical, cylindrical, and other
coordinates.
"""
import abc
import functools
import inspect
import operator
import warnings
import numpy as np
from erfa import ufunc as erfa_ufunc
import astropy.units as u
from astropy.utils import ShapedLikeNDArray, classproperty
from astropy.utils.data_info import MixinInfo
from astropy.utils.exceptions import DuplicateRepresentationWarning
from .angles import Angle, Latitude, Longitude
from .distances import Distance
from .matrix_utilities import is_O3
__all__ = [
"BaseRepresentationOrDifferential",
"BaseRepresentation",
"CartesianRepresentation",
"SphericalRepresentation",
"UnitSphericalRepresentation",
"RadialRepresentation",
"PhysicsSphericalRepresentation",
"CylindricalRepresentation",
"BaseDifferential",
"CartesianDifferential",
"BaseSphericalDifferential",
"BaseSphericalCosLatDifferential",
"SphericalDifferential",
"SphericalCosLatDifferential",
"UnitSphericalDifferential",
"UnitSphericalCosLatDifferential",
"RadialDifferential",
"CylindricalDifferential",
"PhysicsSphericalDifferential",
]
# Module-level dict mapping representation string alias names to classes.
# This is populated by __init_subclass__ when called by Representation or
# Differential classes so that they are all registered automatically.
REPRESENTATION_CLASSES = {}
DIFFERENTIAL_CLASSES = {}
# set for tracking duplicates
DUPLICATE_REPRESENTATIONS = set()
# a hash for the content of the above two dicts, cached for speed.
_REPRDIFF_HASH = None
def _fqn_class(cls):
"""Get the fully qualified name of a class."""
return cls.__module__ + "." + cls.__qualname__
def get_reprdiff_cls_hash():
"""
Returns a hash value that should be invariable if the
`REPRESENTATION_CLASSES` and `DIFFERENTIAL_CLASSES` dictionaries have not
changed.
"""
global _REPRDIFF_HASH
if _REPRDIFF_HASH is None:
_REPRDIFF_HASH = hash(tuple(REPRESENTATION_CLASSES.items())) + hash(
tuple(DIFFERENTIAL_CLASSES.items())
)
return _REPRDIFF_HASH
def _invalidate_reprdiff_cls_hash():
global _REPRDIFF_HASH
_REPRDIFF_HASH = None
class BaseRepresentationOrDifferentialInfo(MixinInfo):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
"""
attrs_from_parent = {"unit"} # Indicates unit is read-only
_supports_indexing = False
@staticmethod
def default_format(val):
# Create numpy dtype so that numpy formatting will work.
components = val.components
values = tuple(getattr(val, component).value for component in components)
a = np.empty(
getattr(val, "shape", ()),
[(component, value.dtype) for component, value in zip(components, values)],
)
for component, value in zip(components, values):
a[component] = value
return str(a)
@property
def _represent_as_dict_attrs(self):
return self._parent.components
@property
def unit(self):
if self._parent is None:
return None
unit = self._parent._unitstr
return unit[1:-1] if unit.startswith("(") else unit
def new_like(self, reps, length, metadata_conflicts="warn", name=None):
"""
Return a new instance like ``reps`` with ``length`` rows.
This is intended for creating an empty column object whose elements can
be set in-place for table operations like join or vstack.
Parameters
----------
reps : list
List of input representations or differentials.
length : int
Length of the output column object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output column name
Returns
-------
col : `~astropy.coordinates.BaseRepresentation` or `~astropy.coordinates.BaseDifferential` subclass instance
Empty instance of this class consistent with ``cols``
"""
# Get merged info attributes like shape, dtype, format, description, etc.
attrs = self.merge_cols_attributes(
reps, metadata_conflicts, name, ("meta", "description")
)
# Make a new representation or differential with the desired length
# using the _apply / __getitem__ machinery to effectively return
# rep0[[0, 0, ..., 0, 0]]. This will have the right shape, and
# include possible differentials.
indexes = np.zeros(length, dtype=np.int64)
out = reps[0][indexes]
# Use __setitem__ machinery to check whether all representations
# can represent themselves as this one without loss of information.
for rep in reps[1:]:
try:
out[0] = rep[0]
except Exception as err:
raise ValueError("input representations are inconsistent.") from err
# Set (merged) info attributes.
for attr in ("name", "meta", "description"):
if attr in attrs:
setattr(out.info, attr, attrs[attr])
return out
class BaseRepresentationOrDifferential(ShapedLikeNDArray):
"""3D coordinate representations and differentials.
Parameters
----------
comp1, comp2, comp3 : `~astropy.units.Quantity` or subclass
The components of the 3D point or differential. The names are the
keys and the subclasses the values of the ``attr_classes`` attribute.
copy : bool, optional
If `True` (default), arrays will be copied; if `False`, they will be
broadcast together but not use new memory.
"""
# Ensure multiplication/division with ndarray or Quantity doesn't lead to
# object arrays.
__array_priority__ = 50000
info = BaseRepresentationOrDifferentialInfo()
def __init__(self, *args, **kwargs):
# make argument a list, so we can pop them off.
args = list(args)
components = self.components
if (
args
and isinstance(args[0], self.__class__)
and all(arg is None for arg in args[1:])
):
rep_or_diff = args[0]
copy = kwargs.pop("copy", True)
attrs = [getattr(rep_or_diff, component) for component in components]
if "info" in rep_or_diff.__dict__:
self.info = rep_or_diff.info
if kwargs:
raise TypeError(
"unexpected keyword arguments for case "
f"where class instance is passed in: {kwargs}"
)
else:
attrs = []
for component in components:
try:
attr = args.pop(0) if args else kwargs.pop(component)
except KeyError:
raise TypeError(
"__init__() missing 1 required positional "
f"argument: {component!r}"
) from None
if attr is None:
raise TypeError(
"__init__() missing 1 required positional argument:"
f" {component!r} (or first argument should be an instance of"
f" {self.__class__.__name__})."
)
attrs.append(attr)
copy = args.pop(0) if args else kwargs.pop("copy", True)
if args:
raise TypeError(f"unexpected arguments: {args}")
if kwargs:
for component in components:
if component in kwargs:
raise TypeError(
f"__init__() got multiple values for argument {component!r}"
)
raise TypeError(f"unexpected keyword arguments: {kwargs}")
# Pass attributes through the required initializing classes.
attrs = [
self.attr_classes[component](attr, copy=copy, subok=True)
for component, attr in zip(components, attrs)
]
try:
bc_attrs = np.broadcast_arrays(*attrs, subok=True)
except ValueError as err:
if len(components) <= 2:
c_str = " and ".join(components)
else:
c_str = ", ".join(components[:2]) + ", and " + components[2]
raise ValueError(f"Input parameters {c_str} cannot be broadcast") from err
# The output of np.broadcast_arrays() has limitations on writeability, so we perform
# additional handling to enable writeability in most situations. This is primarily
# relevant for allowing the changing of the wrap angle of longitude components.
#
# If the shape has changed for a given component, broadcasting is needed:
# If copy=True, we make a copy of the broadcasted array to ensure writeability.
# Note that array had already been copied prior to the broadcasting.
# TODO: Find a way to avoid the double copy.
# If copy=False, we use the broadcasted array, and writeability may still be
# limited.
# If the shape has not changed for a given component, we can proceed with using the
# non-broadcasted array, which avoids writeability issues from np.broadcast_arrays().
attrs = [
(bc_attr.copy() if copy else bc_attr)
if bc_attr.shape != attr.shape
else attr
for attr, bc_attr in zip(attrs, bc_attrs)
]
# Set private attributes for the attributes. (If not defined explicitly
# on the class, the metaclass will define properties to access these.)
for component, attr in zip(components, attrs):
setattr(self, "_" + component, attr)
@classmethod
def get_name(cls):
"""Name of the representation or differential.
In lower case, with any trailing 'representation' or 'differential'
removed. (E.g., 'spherical' for
`~astropy.coordinates.SphericalRepresentation` or
`~astropy.coordinates.SphericalDifferential`.)
"""
name = cls.__name__.lower()
if name.endswith("representation"):
name = name[:-14]
elif name.endswith("differential"):
name = name[:-12]
return name
# The two methods that any subclass has to define.
@classmethod
@abc.abstractmethod
def from_cartesian(cls, other):
"""Create a representation of this class from a supplied Cartesian one.
Parameters
----------
other : `~astropy.coordinates.CartesianRepresentation`
The representation to turn into this class
Returns
-------
representation : `~astropy.coordinates.BaseRepresentation` subclass instance
A new representation of this class's type.
"""
# Note: the above docstring gets overridden for differentials.
raise NotImplementedError()
@abc.abstractmethod
def to_cartesian(self):
"""Convert the representation to its Cartesian form.
Note that any differentials get dropped.
Also note that orientation information at the origin is *not* preserved by
conversions through Cartesian coordinates. For example, transforming
an angular position defined at distance=0 through cartesian coordinates
and back will lose the original angular coordinates::
>>> import astropy.units as u
>>> import astropy.coordinates as coord
>>> rep = coord.SphericalRepresentation(
... lon=15*u.deg,
... lat=-11*u.deg,
... distance=0*u.pc)
>>> rep.to_cartesian().represent_as(coord.SphericalRepresentation)
<SphericalRepresentation (lon, lat, distance) in (rad, rad, pc)
(0., 0., 0.)>
Returns
-------
cartrepr : `~astropy.coordinates.CartesianRepresentation`
The representation in Cartesian form.
"""
# Note: the above docstring gets overridden for differentials.
raise NotImplementedError()
@property
def components(self):
"""A tuple with the in-order names of the coordinate components."""
return tuple(self.attr_classes)
def __eq__(self, value):
"""Equality operator.
This implements strict equality and requires that the representation
classes are identical and that the representation data are exactly equal.
"""
if self.__class__ is not value.__class__:
raise TypeError(
"cannot compare: objects must have same class: "
f"{self.__class__.__name__} vs. {value.__class__.__name__}"
)
try:
np.broadcast(self, value)
except ValueError as exc:
raise ValueError(f"cannot compare: {exc}") from exc
out = True
for comp in self.components:
out &= getattr(self, "_" + comp) == getattr(value, "_" + comp)
return out
def __ne__(self, value):
return np.logical_not(self == value)
def _apply(self, method, *args, **kwargs):
"""Create a new representation or differential with ``method`` applied
to the component data.
In typical usage, the method is any of the shape-changing methods for
`~numpy.ndarray` (``reshape``, ``swapaxes``, etc.), as well as those
picking particular elements (``__getitem__``, ``take``, etc.), which
are all defined in `~astropy.utils.shapes.ShapedLikeNDArray`. It will be
applied to the underlying arrays (e.g., ``x``, ``y``, and ``z`` for
`~astropy.coordinates.CartesianRepresentation`), with the results used
to create a new instance.
Internally, it is also used to apply functions to the components
(in particular, `~numpy.broadcast_to`).
Parameters
----------
method : str or callable
If str, it is the name of a method that is applied to the internal
``components``. If callable, the function is applied.
*args : tuple
Any positional arguments for ``method``.
**kwargs : dict
Any keyword arguments for ``method``.
"""
if callable(method):
apply_method = lambda array: method(array, *args, **kwargs)
else:
apply_method = operator.methodcaller(method, *args, **kwargs)
new = super().__new__(self.__class__)
for component in self.components:
setattr(new, "_" + component, apply_method(getattr(self, component)))
# Copy other 'info' attr only if it has actually been defined.
# See PR #3898 for further explanation and justification, along
# with Quantity.__array_finalize__
if "info" in self.__dict__:
new.info = self.info
return new
def __setitem__(self, item, value):
if value.__class__ is not self.__class__:
raise TypeError(
"can only set from object of same class: "
f"{self.__class__.__name__} vs. {value.__class__.__name__}"
)
for component in self.components:
getattr(self, "_" + component)[item] = getattr(value, "_" + component)
@property
def shape(self):
"""The shape of the instance and underlying arrays.
Like `~numpy.ndarray.shape`, can be set to a new shape by assigning a
tuple. Note that if different instances share some but not all
underlying data, setting the shape of one instance can make the other
instance unusable. Hence, it is strongly recommended to get new,
reshaped instances with the ``reshape`` method.
Raises
------
ValueError
If the new shape has the wrong total number of elements.
AttributeError
If the shape of any of the components cannot be changed without the
arrays being copied. For these cases, use the ``reshape`` method
(which copies any arrays that cannot be reshaped in-place).
"""
return getattr(self, self.components[0]).shape
@shape.setter
def shape(self, shape):
# We keep track of arrays that were already reshaped since we may have
# to return those to their original shape if a later shape-setting
# fails. (This can happen since coordinates are broadcast together.)
reshaped = []
oldshape = self.shape
for component in self.components:
val = getattr(self, component)
if val.size > 1:
try:
val.shape = shape
except Exception:
for val2 in reshaped:
val2.shape = oldshape
raise
else:
reshaped.append(val)
# Required to support multiplication and division, and defined by the base
# representation and differential classes.
@abc.abstractmethod
def _scale_operation(self, op, *args):
raise NotImplementedError()
def __mul__(self, other):
return self._scale_operation(operator.mul, other)
def __rmul__(self, other):
return self.__mul__(other)
def __truediv__(self, other):
return self._scale_operation(operator.truediv, other)
def __neg__(self):
return self._scale_operation(operator.neg)
# Follow numpy convention and make an independent copy.
def __pos__(self):
return self.copy()
# Required to support addition and subtraction, and defined by the base
# representation and differential classes.
@abc.abstractmethod
def _combine_operation(self, op, other, reverse=False):
raise NotImplementedError()
def __add__(self, other):
return self._combine_operation(operator.add, other)
def __radd__(self, other):
return self._combine_operation(operator.add, other, reverse=True)
def __sub__(self, other):
return self._combine_operation(operator.sub, other)
def __rsub__(self, other):
return self._combine_operation(operator.sub, other, reverse=True)
# The following are used for repr and str
@property
def _values(self):
"""Turn the coordinates into a record array with the coordinate values.
The record array fields will have the component names.
"""
coo_items = [(c, getattr(self, c)) for c in self.components]
result = np.empty(self.shape, [(c, coo.dtype) for c, coo in coo_items])
for c, coo in coo_items:
result[c] = coo.value
return result
@property
def _units(self):
"""Return a dictionary with the units of the coordinate components."""
return {cmpnt: getattr(self, cmpnt).unit for cmpnt in self.components}
@property
def _unitstr(self):
units_set = set(self._units.values())
if len(units_set) == 1:
unitstr = units_set.pop().to_string()
else:
unitstr = "({})".format(
", ".join(
self._units[component].to_string() for component in self.components
)
)
return unitstr
def __str__(self):
return f"{np.array2string(self._values, separator=', ')} {self._unitstr:s}"
def __repr__(self):
prefixstr = " "
arrstr = np.array2string(self._values, prefix=prefixstr, separator=", ")
diffstr = ""
if getattr(self, "differentials", None):
diffstr = "\n (has differentials w.r.t.: {})".format(
", ".join([repr(key) for key in self.differentials.keys()])
)
unitstr = ("in " + self._unitstr) if self._unitstr else "[dimensionless]"
return (
f"<{self.__class__.__name__} ({', '.join(self.components)})"
f" {unitstr:s}\n{prefixstr}{arrstr}{diffstr}>"
)
def _make_getter(component):
"""Make an attribute getter for use in a property.
Parameters
----------
component : str
The name of the component that should be accessed. This assumes the
actual value is stored in an attribute of that name prefixed by '_'.
"""
# This has to be done in a function to ensure the reference to component
# is not lost/redirected.
component = "_" + component
def get_component(self):
return getattr(self, component)
return get_component
class RepresentationInfo(BaseRepresentationOrDifferentialInfo):
@property
def _represent_as_dict_attrs(self):
attrs = super()._represent_as_dict_attrs
if self._parent._differentials:
attrs += ("differentials",)
return attrs
def _represent_as_dict(self, attrs=None):
out = super()._represent_as_dict(attrs)
for key, value in out.pop("differentials", {}).items():
out[f"differentials.{key}"] = value
return out
def _construct_from_dict(self, map):
differentials = {}
for key in list(map.keys()):
if key.startswith("differentials."):
differentials[key[14:]] = map.pop(key)
map["differentials"] = differentials
return super()._construct_from_dict(map)
class BaseRepresentation(BaseRepresentationOrDifferential):
"""Base for representing a point in a 3D coordinate system.
Parameters
----------
comp1, comp2, comp3 : `~astropy.units.Quantity` or subclass
The components of the 3D points. The names are the keys and the
subclasses the values of the ``attr_classes`` attribute.
differentials : dict, `~astropy.coordinates.BaseDifferential`, optional
Any differential classes that should be associated with this
representation. The input must either be a single `~astropy.coordinates.BaseDifferential`
subclass instance, or a dictionary with keys set to a string
representation of the SI unit with which the differential (derivative)
is taken. For example, for a velocity differential on a positional
representation, the key would be ``'s'`` for seconds, indicating that
the derivative is a time derivative.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
Notes
-----
All representation classes should subclass this base representation class,
and define an ``attr_classes`` attribute, a `dict`
which maps component names to the class that creates them. They must also
define a ``to_cartesian`` method and a ``from_cartesian`` class method. By
default, transformations are done via the cartesian system, but classes
that want to define a smarter transformation path can overload the
``represent_as`` method. If one wants to use an associated differential
class, one should also define ``unit_vectors`` and ``scale_factors``
methods (see those methods for details).
"""
info = RepresentationInfo()
def __init_subclass__(cls, **kwargs):
# Register representation name (except for BaseRepresentation)
if cls.__name__ == "BaseRepresentation":
return
if not hasattr(cls, "attr_classes"):
raise NotImplementedError(
'Representations must have an "attr_classes" class attribute.'
)
repr_name = cls.get_name()
# first time a duplicate is added
# remove first entry and add both using their qualnames
if repr_name in REPRESENTATION_CLASSES:
DUPLICATE_REPRESENTATIONS.add(repr_name)
fqn_cls = _fqn_class(cls)
existing = REPRESENTATION_CLASSES[repr_name]
fqn_existing = _fqn_class(existing)
if fqn_cls == fqn_existing:
raise ValueError(f'Representation "{fqn_cls}" already defined')
msg = (
f'Representation "{repr_name}" already defined, removing it to avoid'
f' confusion.Use qualnames "{fqn_cls}" and "{fqn_existing}" or class'
" instances directly"
)
warnings.warn(msg, DuplicateRepresentationWarning)
del REPRESENTATION_CLASSES[repr_name]
REPRESENTATION_CLASSES[fqn_existing] = existing
repr_name = fqn_cls
# further definitions with the same name, just add qualname
elif repr_name in DUPLICATE_REPRESENTATIONS:
fqn_cls = _fqn_class(cls)
warnings.warn(
f'Representation "{repr_name}" already defined, using qualname '
f'"{fqn_cls}".'
)
repr_name = fqn_cls
if repr_name in REPRESENTATION_CLASSES:
raise ValueError(f'Representation "{repr_name}" already defined')
REPRESENTATION_CLASSES[repr_name] = cls
_invalidate_reprdiff_cls_hash()
# define getters for any component that does not yet have one.
for component in cls.attr_classes:
if not hasattr(cls, component):
setattr(
cls,
component,
property(
_make_getter(component),
doc=f"The '{component}' component of the points(s).",
),
)
super().__init_subclass__(**kwargs)
def __init__(self, *args, differentials=None, **kwargs):
# Handle any differentials passed in.
super().__init__(*args, **kwargs)
if differentials is None and args and isinstance(args[0], self.__class__):
differentials = args[0]._differentials
self._differentials = self._validate_differentials(differentials)
def _validate_differentials(self, differentials):
"""
Validate that the provided differentials are appropriate for this
representation and recast/reshape as necessary and then return.
Note that this does *not* set the differentials on
``self._differentials``, but rather leaves that for the caller.
"""
# Now handle the actual validation of any specified differential classes
if differentials is None:
differentials = dict()
elif isinstance(differentials, BaseDifferential):
# We can't handle auto-determining the key for this combo
if isinstance(differentials, RadialDifferential) and isinstance(
self, UnitSphericalRepresentation
):
raise ValueError(
"To attach a RadialDifferential to a UnitSphericalRepresentation,"
" you must supply a dictionary with an appropriate key."
)
key = differentials._get_deriv_key(self)
differentials = {key: differentials}
for key in differentials:
try:
diff = differentials[key]
except TypeError as err:
raise TypeError(
"'differentials' argument must be a dictionary-like object"
) from err
diff._check_base(self)
if isinstance(diff, RadialDifferential) and isinstance(
self, UnitSphericalRepresentation
):
# We trust the passing of a key for a RadialDifferential
# attached to a UnitSphericalRepresentation because it will not
# have a paired component name (UnitSphericalRepresentation has
# no .distance) to automatically determine the expected key
pass
else:
expected_key = diff._get_deriv_key(self)
if key != expected_key:
raise ValueError(
f"For differential object '{repr(diff)}', expected "
f"unit key = '{expected_key}' but received key = '{key}'"
)
# For now, we are very rigid: differentials must have the same shape
# as the representation. This makes it easier to handle __getitem__
# and any other shape-changing operations on representations that
# have associated differentials
if diff.shape != self.shape:
# TODO: message of IncompatibleShapeError is not customizable,
# so use a valueerror instead?
raise ValueError(
"Shape of differentials must be the same "
f"as the shape of the representation ({diff.shape} vs {self.shape})"
)
return differentials
def _raise_if_has_differentials(self, op_name):
"""
Used to raise a consistent exception for any operation that is not
supported when a representation has differentials attached.
"""
if self.differentials:
raise TypeError(
f"Operation '{op_name}' is not supported when "
f"differentials are attached to a {self.__class__.__name__}."
)
@classproperty
def _compatible_differentials(cls):
return [DIFFERENTIAL_CLASSES[cls.get_name()]]
@property
def differentials(self):
"""A dictionary of differential class instances.
The keys of this dictionary must be a string representation of the SI
unit with which the differential (derivative) is taken. For example, for
a velocity differential on a positional representation, the key would be
``'s'`` for seconds, indicating that the derivative is a time
derivative.
"""
return self._differentials
# We do not make unit_vectors and scale_factors abstract methods, since
# they are only necessary if one also defines an associated Differential.
# Also, doing so would break pre-differential representation subclasses.
def unit_vectors(self):
r"""Cartesian unit vectors in the direction of each component.
Given unit vectors :math:`\hat{e}_c` and scale factors :math:`f_c`,
a change in one component of :math:`\delta c` corresponds to a change
in representation of :math:`\delta c \times f_c \times \hat{e}_c`.
Returns
-------
unit_vectors : dict of `~astropy.coordinates.CartesianRepresentation`
The keys are the component names.
"""
raise NotImplementedError(f"{type(self)} has not implemented unit vectors")
def scale_factors(self):
r"""Scale factors for each component's direction.
Given unit vectors :math:`\hat{e}_c` and scale factors :math:`f_c`,
a change in one component of :math:`\delta c` corresponds to a change
in representation of :math:`\delta c \times f_c \times \hat{e}_c`.
Returns
-------
scale_factors : dict of `~astropy.units.Quantity`
The keys are the component names.
"""
raise NotImplementedError(f"{type(self)} has not implemented scale factors.")
def _re_represent_differentials(self, new_rep, differential_class):
"""Re-represent the differentials to the specified classes.
This returns a new dictionary with the same keys but with the
attached differentials converted to the new differential classes.
"""
if differential_class is None:
return dict()
if not self.differentials and differential_class:
raise ValueError("No differentials associated with this representation!")
elif (
len(self.differentials) == 1
and inspect.isclass(differential_class)
and issubclass(differential_class, BaseDifferential)
):
# TODO: is there a better way to do this?
differential_class = {
list(self.differentials.keys())[0]: differential_class
}
elif differential_class.keys() != self.differentials.keys():
raise ValueError(
"Desired differential classes must be passed in as a dictionary with"
" keys equal to a string representation of the unit of the derivative"
" for each differential stored with this "
f"representation object ({self.differentials})"
)
new_diffs = dict()
for k in self.differentials:
diff = self.differentials[k]
try:
new_diffs[k] = diff.represent_as(differential_class[k], base=self)
except Exception as err:
if differential_class[k] not in new_rep._compatible_differentials:
raise TypeError(
f"Desired differential class {differential_class[k]} is not "
"compatible with the desired "
f"representation class {new_rep.__class__}"
) from err
else:
raise
return new_diffs
def represent_as(self, other_class, differential_class=None):
"""Convert coordinates to another representation.
If the instance is of the requested class, it is returned unmodified.
By default, conversion is done via Cartesian coordinates.
Also note that orientation information at the origin is *not* preserved by
conversions through Cartesian coordinates. See the docstring for
:meth:`~astropy.coordinates.BaseRepresentationOrDifferential.to_cartesian`
for an example.
Parameters
----------
other_class : `~astropy.coordinates.BaseRepresentation` subclass
The type of representation to turn the coordinates into.
differential_class : dict of `~astropy.coordinates.BaseDifferential`, optional
Classes in which the differentials should be represented.
Can be a single class if only a single differential is attached,
otherwise it should be a `dict` keyed by the same keys as the
differentials.
"""
if other_class is self.__class__ and not differential_class:
return self.without_differentials()
else:
if isinstance(other_class, str):
raise ValueError(
"Input to a representation's represent_as must be a class, not "
"a string. For strings, use frame objects."
)
if other_class is not self.__class__:
# The default is to convert via cartesian coordinates
new_rep = other_class.from_cartesian(self.to_cartesian())
else:
new_rep = self
new_rep._differentials = self._re_represent_differentials(
new_rep, differential_class
)
return new_rep
def transform(self, matrix):
"""Transform coordinates using a 3x3 matrix in a Cartesian basis.
This returns a new representation and does not modify the original one.
Any differentials attached to this representation will also be
transformed.
Parameters
----------
matrix : (3,3) array-like
A 3x3 (or stack thereof) matrix, such as a rotation matrix.
"""
# route transformation through Cartesian
difs_cls = {k: CartesianDifferential for k in self.differentials.keys()}
crep = self.represent_as(
CartesianRepresentation, differential_class=difs_cls
).transform(matrix)
# move back to original representation
difs_cls = {k: diff.__class__ for k, diff in self.differentials.items()}
rep = crep.represent_as(self.__class__, difs_cls)
return rep
def with_differentials(self, differentials):
"""
Create a new representation with the same positions as this
representation, but with these new differentials.
Differential keys that already exist in this object's differential dict
are overwritten.
Parameters
----------
differentials : sequence of `~astropy.coordinates.BaseDifferential` subclass instance
The differentials for the new representation to have.
Returns
-------
`~astropy.coordinates.BaseRepresentation` subclass instance
A copy of this representation, but with the ``differentials`` as
its differentials.
"""
if not differentials:
return self
args = [getattr(self, component) for component in self.components]
# We shallow copy the differentials dictionary so we don't update the
# current object's dictionary when adding new keys
new_rep = self.__class__(
*args, differentials=self.differentials.copy(), copy=False
)
new_rep._differentials.update(new_rep._validate_differentials(differentials))
return new_rep
def without_differentials(self):
"""Return a copy of the representation without attached differentials.
Returns
-------
`~astropy.coordinates.BaseRepresentation` subclass instance
A shallow copy of this representation, without any differentials.
If no differentials were present, no copy is made.
"""
if not self._differentials:
return self
args = [getattr(self, component) for component in self.components]
return self.__class__(*args, copy=False)
@classmethod
def from_representation(cls, representation):
"""Create a new instance of this representation from another one.
Parameters
----------
representation : `~astropy.coordinates.BaseRepresentation` instance
The presentation that should be converted to this class.
"""
return representation.represent_as(cls)
def __eq__(self, value):
"""Equality operator for BaseRepresentation.
This implements strict equality and requires that the representation
classes are identical, the differentials are identical, and that the
representation data are exactly equal.
"""
# BaseRepresentationOrDifferental (checks classes and compares components)
out = super().__eq__(value)
# super() checks that the class is identical so can this even happen?
# (same class, different differentials ?)
if self._differentials.keys() != value._differentials.keys():
raise ValueError("cannot compare: objects must have same differentials")
for self_diff, value_diff in zip(
self._differentials.values(), value._differentials.values()
):
out &= self_diff == value_diff
return out
def __ne__(self, value):
return np.logical_not(self == value)
def _apply(self, method, *args, **kwargs):
"""Create a new representation with ``method`` applied to the component
data.
This is not a simple inherit from ``BaseRepresentationOrDifferential``
because we need to call ``._apply()`` on any associated differential
classes.
See docstring for `BaseRepresentationOrDifferential._apply`.
Parameters
----------
method : str or callable
If str, it is the name of a method that is applied to the internal
``components``. If callable, the function is applied.
*args : tuple
Any positional arguments for ``method``.
**kwargs : dict
Any keyword arguments for ``method``.
"""
rep = super()._apply(method, *args, **kwargs)
rep._differentials = {
k: diff._apply(method, *args, **kwargs)
for k, diff in self._differentials.items()
}
return rep
def __setitem__(self, item, value):
if not isinstance(value, BaseRepresentation):
raise TypeError(
f"value must be a representation instance, not {type(value)}."
)
if not (
isinstance(value, self.__class__)
or len(value.attr_classes) == len(self.attr_classes)
):
raise ValueError(
f"value must be representable as {self.__class__.__name__} "
"without loss of information."
)
diff_classes = {}
if self._differentials:
if self._differentials.keys() != value._differentials.keys():
raise ValueError("value must have the same differentials.")
for key, self_diff in self._differentials.items():
diff_classes[key] = self_diff_cls = self_diff.__class__
value_diff_cls = value._differentials[key].__class__
if not (
isinstance(value_diff_cls, self_diff_cls)
or (
len(value_diff_cls.attr_classes)
== len(self_diff_cls.attr_classes)
)
):
raise ValueError(
f"value differential {key!r} must be representable as "
f"{self_diff.__class__.__name__} without loss of information."
)
value = value.represent_as(self.__class__, diff_classes)
super().__setitem__(item, value)
for key, differential in self._differentials.items():
differential[item] = value._differentials[key]
def _scale_operation(self, op, *args):
"""Scale all non-angular components, leaving angular ones unchanged.
Parameters
----------
op : `~operator` callable
Operator to apply (e.g., `~operator.mul`, `~operator.neg`, etc.
*args
Any arguments required for the operator (typically, what is to
be multiplied with, divided by).
"""
results = []
for component, cls in self.attr_classes.items():
value = getattr(self, component)
if issubclass(cls, Angle):
results.append(value)
else:
results.append(op(value, *args))
# try/except catches anything that cannot initialize the class, such
# as operations that returned NotImplemented or a representation
# instead of a quantity (as would happen for, e.g., rep * rep).
try:
result = self.__class__(*results)
except Exception:
return NotImplemented
for key, differential in self.differentials.items():
diff_result = differential._scale_operation(op, *args, scaled_base=True)
result.differentials[key] = diff_result
return result
def _combine_operation(self, op, other, reverse=False):
"""Combine two representation.
By default, operate on the cartesian representations of both.
Parameters
----------
op : `~operator` callable
Operator to apply (e.g., `~operator.add`, `~operator.sub`, etc.
other : `~astropy.coordinates.BaseRepresentation` subclass instance
The other representation.
reverse : bool
Whether the operands should be reversed (e.g., as we got here via
``self.__rsub__`` because ``self`` is a subclass of ``other``).
"""
self._raise_if_has_differentials(op.__name__)
result = self.to_cartesian()._combine_operation(op, other, reverse)
if result is NotImplemented:
return NotImplemented
else:
return self.from_cartesian(result)
# We need to override this setter to support differentials
@BaseRepresentationOrDifferential.shape.setter
def shape(self, shape):
orig_shape = self.shape
# See: https://stackoverflow.com/questions/3336767/ for an example
BaseRepresentationOrDifferential.shape.fset(self, shape)
# also try to perform shape-setting on any associated differentials
try:
for k in self.differentials:
self.differentials[k].shape = shape
except Exception:
BaseRepresentationOrDifferential.shape.fset(self, orig_shape)
for k in self.differentials:
self.differentials[k].shape = orig_shape
raise
def norm(self):
"""Vector norm.
The norm is the standard Frobenius norm, i.e., the square root of the
sum of the squares of all components with non-angular units.
Note that any associated differentials will be dropped during this
operation.
Returns
-------
norm : `astropy.units.Quantity`
Vector norm, with the same shape as the representation.
"""
return np.sqrt(
sum(
getattr(self, component) ** 2
for component, cls in self.attr_classes.items()
if not issubclass(cls, Angle)
)
)
def mean(self, *args, **kwargs):
"""Vector mean.
Averaging is done by converting the representation to cartesian, and
taking the mean of the x, y, and z components. The result is converted
back to the same representation as the input.
Refer to `~numpy.mean` for full documentation of the arguments, noting
that ``axis`` is the entry in the ``shape`` of the representation, and
that the ``out`` argument cannot be used.
Returns
-------
mean : `~astropy.coordinates.BaseRepresentation` subclass instance
Vector mean, in the same representation as that of the input.
"""
self._raise_if_has_differentials("mean")
return self.from_cartesian(self.to_cartesian().mean(*args, **kwargs))
def sum(self, *args, **kwargs):
"""Vector sum.
Adding is done by converting the representation to cartesian, and
summing the x, y, and z components. The result is converted back to the
same representation as the input.
Refer to `~numpy.sum` for full documentation of the arguments, noting
that ``axis`` is the entry in the ``shape`` of the representation, and
that the ``out`` argument cannot be used.
Returns
-------
sum : `~astropy.coordinates.BaseRepresentation` subclass instance
Vector sum, in the same representation as that of the input.
"""
self._raise_if_has_differentials("sum")
return self.from_cartesian(self.to_cartesian().sum(*args, **kwargs))
def dot(self, other):
"""Dot product of two representations.
The calculation is done by converting both ``self`` and ``other``
to `~astropy.coordinates.CartesianRepresentation`.
Note that any associated differentials will be dropped during this
operation.
Parameters
----------
other : `~astropy.coordinates.BaseRepresentation`
The representation to take the dot product with.
Returns
-------
dot_product : `~astropy.units.Quantity`
The sum of the product of the x, y, and z components of the
cartesian representations of ``self`` and ``other``.
"""
return self.to_cartesian().dot(other)
def cross(self, other):
"""Vector cross product of two representations.
The calculation is done by converting both ``self`` and ``other``
to `~astropy.coordinates.CartesianRepresentation`, and converting the
result back to the type of representation of ``self``.
Parameters
----------
other : `~astropy.coordinates.BaseRepresentation` subclass instance
The representation to take the cross product with.
Returns
-------
cross_product : `~astropy.coordinates.BaseRepresentation` subclass instance
With vectors perpendicular to both ``self`` and ``other``, in the
same type of representation as ``self``.
"""
self._raise_if_has_differentials("cross")
return self.from_cartesian(self.to_cartesian().cross(other))
class CartesianRepresentation(BaseRepresentation):
"""
Representation of points in 3D cartesian coordinates.
Parameters
----------
x, y, z : `~astropy.units.Quantity` or array
The x, y, and z coordinates of the point(s). If ``x``, ``y``, and ``z``
have different shapes, they should be broadcastable. If not quantity,
``unit`` should be set. If only ``x`` is given, it is assumed that it
contains an array with the 3 coordinates stored along ``xyz_axis``.
unit : unit-like
If given, the coordinates will be converted to this unit (or taken to
be in this unit if not given.
xyz_axis : int, optional
The axis along which the coordinates are stored when a single array is
provided rather than distinct ``x``, ``y``, and ``z`` (default: 0).
differentials : dict, `~astropy.coordinates.CartesianDifferential`, optional
Any differential classes that should be associated with this
representation. The input must either be a single
`~astropy.coordinates.CartesianDifferential` instance, or a dictionary of
`~astropy.coordinates.CartesianDifferential` s with keys set to a string representation of
the SI unit with which the differential (derivative) is taken. For
example, for a velocity differential on a positional representation, the
key would be ``'s'`` for seconds, indicating that the derivative is a
time derivative.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
attr_classes = {"x": u.Quantity, "y": u.Quantity, "z": u.Quantity}
_xyz = None
def __init__(
self, x, y=None, z=None, unit=None, xyz_axis=None, differentials=None, copy=True
):
if y is None and z is None:
if isinstance(x, np.ndarray) and x.dtype.kind not in "OV":
# Short-cut for 3-D array input.
x = u.Quantity(x, unit, copy=copy, subok=True)
# Keep a link to the array with all three coordinates
# so that we can return it quickly if needed in get_xyz.
self._xyz = x
if xyz_axis:
x = np.moveaxis(x, xyz_axis, 0)
self._xyz_axis = xyz_axis
else:
self._xyz_axis = 0
self._x, self._y, self._z = x
self._differentials = self._validate_differentials(differentials)
return
elif (
isinstance(x, CartesianRepresentation)
and unit is None
and xyz_axis is None
):
if differentials is None:
differentials = x._differentials
return super().__init__(x, differentials=differentials, copy=copy)
else:
x, y, z = x
if xyz_axis is not None:
raise ValueError(
"xyz_axis should only be set if x, y, and z are in a single array"
" passed in through x, i.e., y and z should not be not given."
)
if y is None or z is None:
raise ValueError(
f"x, y, and z are required to instantiate {self.__class__.__name__}"
)
if unit is not None:
x = u.Quantity(x, unit, copy=copy, subok=True)
y = u.Quantity(y, unit, copy=copy, subok=True)
z = u.Quantity(z, unit, copy=copy, subok=True)
copy = False
super().__init__(x, y, z, copy=copy, differentials=differentials)
if not (
self._x.unit.is_equivalent(self._y.unit)
and self._x.unit.is_equivalent(self._z.unit)
):
raise u.UnitsError("x, y, and z should have matching physical types")
def unit_vectors(self):
l = np.broadcast_to(1.0 * u.one, self.shape, subok=True)
o = np.broadcast_to(0.0 * u.one, self.shape, subok=True)
return {
"x": CartesianRepresentation(l, o, o, copy=False),
"y": CartesianRepresentation(o, l, o, copy=False),
"z": CartesianRepresentation(o, o, l, copy=False),
}
def scale_factors(self):
l = np.broadcast_to(1.0 * u.one, self.shape, subok=True)
return {"x": l, "y": l, "z": l}
def get_xyz(self, xyz_axis=0):
"""Return a vector array of the x, y, and z coordinates.
Parameters
----------
xyz_axis : int, optional
The axis in the final array along which the x, y, z components
should be stored (default: 0).
Returns
-------
xyz : `~astropy.units.Quantity`
With dimension 3 along ``xyz_axis``. Note that, if possible,
this will be a view.
"""
if self._xyz is not None:
if self._xyz_axis == xyz_axis:
return self._xyz
else:
return np.moveaxis(self._xyz, self._xyz_axis, xyz_axis)
# Create combined array. TO DO: keep it in _xyz for repeated use?
# But then in-place changes have to cancel it. Likely best to
# also update components.
return np.stack([self._x, self._y, self._z], axis=xyz_axis)
xyz = property(get_xyz)
@classmethod
def from_cartesian(cls, other):
return other
def to_cartesian(self):
return self
def transform(self, matrix):
"""
Transform the cartesian coordinates using a 3x3 matrix.
This returns a new representation and does not modify the original one.
Any differentials attached to this representation will also be
transformed.
Parameters
----------
matrix : ndarray
A 3x3 transformation matrix, such as a rotation matrix.
Examples
--------
We can start off by creating a cartesian representation object:
>>> from astropy import units as u
>>> from astropy.coordinates import CartesianRepresentation
>>> rep = CartesianRepresentation([1, 2] * u.pc,
... [2, 3] * u.pc,
... [3, 4] * u.pc)
We now create a rotation matrix around the z axis:
>>> from astropy.coordinates.matrix_utilities import rotation_matrix
>>> rotation = rotation_matrix(30 * u.deg, axis='z')
Finally, we can apply this transformation:
>>> rep_new = rep.transform(rotation)
>>> rep_new.xyz # doctest: +FLOAT_CMP
<Quantity [[ 1.8660254 , 3.23205081],
[ 1.23205081, 1.59807621],
[ 3. , 4. ]] pc>
"""
# erfa rxp: Multiply a p-vector by an r-matrix.
p = erfa_ufunc.rxp(matrix, self.get_xyz(xyz_axis=-1))
# transformed representation
rep = self.__class__(p, xyz_axis=-1, copy=False)
# Handle differentials attached to this representation
new_diffs = {
k: d.transform(matrix, self, rep) for k, d in self.differentials.items()
}
return rep.with_differentials(new_diffs)
def _combine_operation(self, op, other, reverse=False):
self._raise_if_has_differentials(op.__name__)
try:
other_c = other.to_cartesian()
except Exception:
return NotImplemented
first, second = (self, other_c) if not reverse else (other_c, self)
return self.__class__(
*(
op(getattr(first, component), getattr(second, component))
for component in first.components
)
)
def norm(self):
"""Vector norm.
The norm is the standard Frobenius norm, i.e., the square root of the
sum of the squares of all components with non-angular units.
Note that any associated differentials will be dropped during this
operation.
Returns
-------
norm : `astropy.units.Quantity`
Vector norm, with the same shape as the representation.
"""
# erfa pm: Modulus of p-vector.
return erfa_ufunc.pm(self.get_xyz(xyz_axis=-1))
def mean(self, *args, **kwargs):
"""Vector mean.
Returns a new CartesianRepresentation instance with the means of the
x, y, and z components.
Refer to `~numpy.mean` for full documentation of the arguments, noting
that ``axis`` is the entry in the ``shape`` of the representation, and
that the ``out`` argument cannot be used.
"""
self._raise_if_has_differentials("mean")
return self._apply("mean", *args, **kwargs)
def sum(self, *args, **kwargs):
"""Vector sum.
Returns a new CartesianRepresentation instance with the sums of the
x, y, and z components.
Refer to `~numpy.sum` for full documentation of the arguments, noting
that ``axis`` is the entry in the ``shape`` of the representation, and
that the ``out`` argument cannot be used.
"""
self._raise_if_has_differentials("sum")
return self._apply("sum", *args, **kwargs)
def dot(self, other):
"""Dot product of two representations.
Note that any associated differentials will be dropped during this
operation.
Parameters
----------
other : `~astropy.coordinates.BaseRepresentation` subclass instance
If not already cartesian, it is converted.
Returns
-------
dot_product : `~astropy.units.Quantity`
The sum of the product of the x, y, and z components of ``self``
and ``other``.
"""
try:
other_c = other.to_cartesian()
except Exception as err:
raise TypeError(
"can only take dot product with another "
f"representation, not a {type(other)} instance."
) from err
# erfa pdp: p-vector inner (=scalar=dot) product.
return erfa_ufunc.pdp(self.get_xyz(xyz_axis=-1), other_c.get_xyz(xyz_axis=-1))
def cross(self, other):
"""Cross product of two representations.
Parameters
----------
other : `~astropy.coordinates.BaseRepresentation` subclass instance
If not already cartesian, it is converted.
Returns
-------
cross_product : `~astropy.coordinates.CartesianRepresentation`
With vectors perpendicular to both ``self`` and ``other``.
"""
self._raise_if_has_differentials("cross")
try:
other_c = other.to_cartesian()
except Exception as err:
raise TypeError(
"cannot only take cross product with another "
f"representation, not a {type(other)} instance."
) from err
# erfa pxp: p-vector outer (=vector=cross) product.
sxo = erfa_ufunc.pxp(self.get_xyz(xyz_axis=-1), other_c.get_xyz(xyz_axis=-1))
return self.__class__(sxo, xyz_axis=-1)
class UnitSphericalRepresentation(BaseRepresentation):
"""
Representation of points on a unit sphere.
Parameters
----------
lon, lat : `~astropy.units.Quantity` ['angle'] or str
The longitude and latitude of the point(s), in angular units. The
latitude should be between -90 and 90 degrees, and the longitude will
be wrapped to an angle between 0 and 360 degrees. These can also be
instances of `~astropy.coordinates.Angle`,
`~astropy.coordinates.Longitude`, or `~astropy.coordinates.Latitude`.
differentials : dict, `~astropy.coordinates.BaseDifferential`, optional
Any differential classes that should be associated with this
representation. The input must either be a single `~astropy.coordinates.BaseDifferential`
instance (see `._compatible_differentials` for valid types), or a
dictionary of of differential instances with keys set to a string
representation of the SI unit with which the differential (derivative)
is taken. For example, for a velocity differential on a positional
representation, the key would be ``'s'`` for seconds, indicating that
the derivative is a time derivative.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
attr_classes = {"lon": Longitude, "lat": Latitude}
@classproperty
def _dimensional_representation(cls):
return SphericalRepresentation
def __init__(self, lon, lat=None, differentials=None, copy=True):
super().__init__(lon, lat, differentials=differentials, copy=copy)
@classproperty
def _compatible_differentials(cls):
return [
UnitSphericalDifferential,
UnitSphericalCosLatDifferential,
SphericalDifferential,
SphericalCosLatDifferential,
RadialDifferential,
]
# Could let the metaclass define these automatically, but good to have
# a bit clearer docstrings.
@property
def lon(self):
"""
The longitude of the point(s).
"""
return self._lon
@property
def lat(self):
"""
The latitude of the point(s).
"""
return self._lat
def unit_vectors(self):
sinlon, coslon = np.sin(self.lon), np.cos(self.lon)
sinlat, coslat = np.sin(self.lat), np.cos(self.lat)
return {
"lon": CartesianRepresentation(-sinlon, coslon, 0.0, copy=False),
"lat": CartesianRepresentation(
-sinlat * coslon, -sinlat * sinlon, coslat, copy=False
),
}
def scale_factors(self, omit_coslat=False):
sf_lat = np.broadcast_to(1.0 / u.radian, self.shape, subok=True)
sf_lon = sf_lat if omit_coslat else np.cos(self.lat) / u.radian
return {"lon": sf_lon, "lat": sf_lat}
def to_cartesian(self):
"""
Converts spherical polar coordinates to 3D rectangular cartesian
coordinates.
"""
# erfa s2c: Convert [unit]spherical coordinates to Cartesian.
p = erfa_ufunc.s2c(self.lon, self.lat)
return CartesianRepresentation(p, xyz_axis=-1, copy=False)
@classmethod
def from_cartesian(cls, cart):
"""
Converts 3D rectangular cartesian coordinates to spherical polar
coordinates.
"""
p = cart.get_xyz(xyz_axis=-1)
# erfa c2s: P-vector to [unit]spherical coordinates.
return cls(*erfa_ufunc.c2s(p), copy=False)
def represent_as(self, other_class, differential_class=None):
# Take a short cut if the other class is a spherical representation
# TODO! for differential_class. This cannot (currently) be implemented
# like in the other Representations since `_re_represent_differentials`
# keeps differentials' unit keys, but this can result in a mismatch
# between the UnitSpherical expected key (e.g. "s") and that expected
# in the other class (here "s / m"). For more info, see PR #11467
if inspect.isclass(other_class) and not differential_class:
if issubclass(other_class, PhysicsSphericalRepresentation):
return other_class(
phi=self.lon, theta=90 * u.deg - self.lat, r=1.0, copy=False
)
elif issubclass(other_class, SphericalRepresentation):
return other_class(lon=self.lon, lat=self.lat, distance=1.0, copy=False)
return super().represent_as(other_class, differential_class)
def transform(self, matrix):
r"""Transform the unit-spherical coordinates using a 3x3 matrix.
This returns a new representation and does not modify the original one.
Any differentials attached to this representation will also be
transformed.
Parameters
----------
matrix : (3,3) array-like
A 3x3 matrix, such as a rotation matrix (or a stack of matrices).
Returns
-------
`~astropy.coordinates.UnitSphericalRepresentation` or `~astropy.coordinates.SphericalRepresentation`
If ``matrix`` is O(3) -- :math:`M \dot M^T = I` -- like a rotation,
then the result is a `~astropy.coordinates.UnitSphericalRepresentation`.
All other matrices will change the distance, so the dimensional
representation is used instead.
"""
# the transformation matrix does not need to be a rotation matrix,
# so the unit-distance is not guaranteed. For speed, we check if the
# matrix is in O(3) and preserves lengths.
if np.all(is_O3(matrix)): # remain in unit-rep
xyz = erfa_ufunc.s2c(self.lon, self.lat)
p = erfa_ufunc.rxp(matrix, xyz)
lon, lat = erfa_ufunc.c2s(p)
rep = self.__class__(lon=lon, lat=lat)
# handle differentials
new_diffs = {
k: d.transform(matrix, self, rep) for k, d in self.differentials.items()
}
rep = rep.with_differentials(new_diffs)
else: # switch to dimensional representation
rep = self._dimensional_representation(
lon=self.lon, lat=self.lat, distance=1, differentials=self.differentials
).transform(matrix)
return rep
def _scale_operation(self, op, *args):
return self._dimensional_representation(
lon=self.lon, lat=self.lat, distance=1.0, differentials=self.differentials
)._scale_operation(op, *args)
def __neg__(self):
if any(
differential.base_representation is not self.__class__
for differential in self.differentials.values()
):
return super().__neg__()
result = self.__class__(self.lon + 180.0 * u.deg, -self.lat, copy=False)
for key, differential in self.differentials.items():
new_comps = (
op(getattr(differential, comp))
for op, comp in zip(
(operator.pos, operator.neg), differential.components
)
)
result.differentials[key] = differential.__class__(*new_comps, copy=False)
return result
def norm(self):
"""Vector norm.
The norm is the standard Frobenius norm, i.e., the square root of the
sum of the squares of all components with non-angular units, which is
always unity for vectors on the unit sphere.
Returns
-------
norm : `~astropy.units.Quantity` ['dimensionless']
Dimensionless ones, with the same shape as the representation.
"""
return u.Quantity(np.ones(self.shape), u.dimensionless_unscaled, copy=False)
def _combine_operation(self, op, other, reverse=False):
self._raise_if_has_differentials(op.__name__)
result = self.to_cartesian()._combine_operation(op, other, reverse)
if result is NotImplemented:
return NotImplemented
else:
return self._dimensional_representation.from_cartesian(result)
def mean(self, *args, **kwargs):
"""Vector mean.
The representation is converted to cartesian, the means of the x, y,
and z components are calculated, and the result is converted to a
`~astropy.coordinates.SphericalRepresentation`.
Refer to `~numpy.mean` for full documentation of the arguments, noting
that ``axis`` is the entry in the ``shape`` of the representation, and
that the ``out`` argument cannot be used.
"""
self._raise_if_has_differentials("mean")
return self._dimensional_representation.from_cartesian(
self.to_cartesian().mean(*args, **kwargs)
)
def sum(self, *args, **kwargs):
"""Vector sum.
The representation is converted to cartesian, the sums of the x, y,
and z components are calculated, and the result is converted to a
`~astropy.coordinates.SphericalRepresentation`.
Refer to `~numpy.sum` for full documentation of the arguments, noting
that ``axis`` is the entry in the ``shape`` of the representation, and
that the ``out`` argument cannot be used.
"""
self._raise_if_has_differentials("sum")
return self._dimensional_representation.from_cartesian(
self.to_cartesian().sum(*args, **kwargs)
)
def cross(self, other):
"""Cross product of two representations.
The calculation is done by converting both ``self`` and ``other``
to `~astropy.coordinates.CartesianRepresentation`, and converting the
result back to `~astropy.coordinates.SphericalRepresentation`.
Parameters
----------
other : `~astropy.coordinates.BaseRepresentation` subclass instance
The representation to take the cross product with.
Returns
-------
cross_product : `~astropy.coordinates.SphericalRepresentation`
With vectors perpendicular to both ``self`` and ``other``.
"""
self._raise_if_has_differentials("cross")
return self._dimensional_representation.from_cartesian(
self.to_cartesian().cross(other)
)
class RadialRepresentation(BaseRepresentation):
"""
Representation of the distance of points from the origin.
Note that this is mostly intended as an internal helper representation.
It can do little else but being used as a scale in multiplication.
Parameters
----------
distance : `~astropy.units.Quantity` ['length']
The distance of the point(s) from the origin.
differentials : dict, `~astropy.coordinates.BaseDifferential`, optional
Any differential classes that should be associated with this
representation. The input must either be a single `~astropy.coordinates.BaseDifferential`
instance (see `._compatible_differentials` for valid types), or a
dictionary of of differential instances with keys set to a string
representation of the SI unit with which the differential (derivative)
is taken. For example, for a velocity differential on a positional
representation, the key would be ``'s'`` for seconds, indicating that
the derivative is a time derivative.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
attr_classes = {"distance": u.Quantity}
def __init__(self, distance, differentials=None, copy=True):
super().__init__(distance, differentials=differentials, copy=copy)
@property
def distance(self):
"""
The distance from the origin to the point(s).
"""
return self._distance
def unit_vectors(self):
"""Cartesian unit vectors are undefined for radial representation."""
raise NotImplementedError(
f"Cartesian unit vectors are undefined for {self.__class__} instances"
)
def scale_factors(self):
l = np.broadcast_to(1.0 * u.one, self.shape, subok=True)
return {"distance": l}
def to_cartesian(self):
"""Cannot convert radial representation to cartesian."""
raise NotImplementedError(
f"cannot convert {self.__class__} instance to cartesian."
)
@classmethod
def from_cartesian(cls, cart):
"""
Converts 3D rectangular cartesian coordinates to radial coordinate.
"""
return cls(distance=cart.norm(), copy=False)
def __mul__(self, other):
if isinstance(other, BaseRepresentation):
return self.distance * other
else:
return super().__mul__(other)
def norm(self):
"""Vector norm.
Just the distance itself.
Returns
-------
norm : `~astropy.units.Quantity` ['dimensionless']
Dimensionless ones, with the same shape as the representation.
"""
return self.distance
def _combine_operation(self, op, other, reverse=False):
return NotImplemented
def transform(self, matrix):
"""Radial representations cannot be transformed by a Cartesian matrix.
Parameters
----------
matrix : array-like
The transformation matrix in a Cartesian basis.
Must be a multiplication: a diagonal matrix with identical elements.
Must have shape (..., 3, 3), where the last 2 indices are for the
matrix on each other axis. Make sure that the matrix shape is
compatible with the shape of this representation.
Raises
------
ValueError
If the matrix is not a multiplication.
"""
scl = matrix[..., 0, 0]
# check that the matrix is a scaled identity matrix on the last 2 axes.
if np.any(matrix != scl[..., np.newaxis, np.newaxis] * np.identity(3)):
raise ValueError(
"Radial representations can only be "
"transformed by a scaled identity matrix"
)
return self * scl
def _spherical_op_funcs(op, *args):
"""For given operator, return functions that adjust lon, lat, distance."""
if op is operator.neg:
return lambda x: x + 180 * u.deg, operator.neg, operator.pos
try:
scale_sign = np.sign(args[0])
except Exception:
# This should always work, even if perhaps we get a negative distance.
return operator.pos, operator.pos, lambda x: op(x, *args)
scale = abs(args[0])
return (
lambda x: x + 180 * u.deg * np.signbit(scale_sign),
lambda x: x * scale_sign,
lambda x: op(x, scale),
)
class SphericalRepresentation(BaseRepresentation):
"""
Representation of points in 3D spherical coordinates.
Parameters
----------
lon, lat : `~astropy.units.Quantity` ['angle']
The longitude and latitude of the point(s), in angular units. The
latitude should be between -90 and 90 degrees, and the longitude will
be wrapped to an angle between 0 and 360 degrees. These can also be
instances of `~astropy.coordinates.Angle`,
`~astropy.coordinates.Longitude`, or `~astropy.coordinates.Latitude`.
distance : `~astropy.units.Quantity` ['length']
The distance to the point(s). If the distance is a length, it is
passed to the :class:`~astropy.coordinates.Distance` class, otherwise
it is passed to the :class:`~astropy.units.Quantity` class.
differentials : dict, `~astropy.coordinates.BaseDifferential`, optional
Any differential classes that should be associated with this
representation. The input must either be a single `~astropy.coordinates.BaseDifferential`
instance (see `._compatible_differentials` for valid types), or a
dictionary of of differential instances with keys set to a string
representation of the SI unit with which the differential (derivative)
is taken. For example, for a velocity differential on a positional
representation, the key would be ``'s'`` for seconds, indicating that
the derivative is a time derivative.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
attr_classes = {"lon": Longitude, "lat": Latitude, "distance": u.Quantity}
_unit_representation = UnitSphericalRepresentation
def __init__(self, lon, lat=None, distance=None, differentials=None, copy=True):
super().__init__(lon, lat, distance, copy=copy, differentials=differentials)
if (
not isinstance(self._distance, Distance)
and self._distance.unit.physical_type == "length"
):
try:
self._distance = Distance(self._distance, copy=False)
except ValueError as e:
if e.args[0].startswith("distance must be >= 0"):
raise ValueError(
"Distance must be >= 0. To allow negative distance values, you"
" must explicitly pass in a `Distance` object with the the "
"argument 'allow_negative=True'."
) from e
else:
raise
@classproperty
def _compatible_differentials(cls):
return [
UnitSphericalDifferential,
UnitSphericalCosLatDifferential,
SphericalDifferential,
SphericalCosLatDifferential,
RadialDifferential,
]
@property
def lon(self):
"""
The longitude of the point(s).
"""
return self._lon
@property
def lat(self):
"""
The latitude of the point(s).
"""
return self._lat
@property
def distance(self):
"""
The distance from the origin to the point(s).
"""
return self._distance
def unit_vectors(self):
sinlon, coslon = np.sin(self.lon), np.cos(self.lon)
sinlat, coslat = np.sin(self.lat), np.cos(self.lat)
return {
"lon": CartesianRepresentation(-sinlon, coslon, 0.0, copy=False),
"lat": CartesianRepresentation(
-sinlat * coslon, -sinlat * sinlon, coslat, copy=False
),
"distance": CartesianRepresentation(
coslat * coslon, coslat * sinlon, sinlat, copy=False
),
}
def scale_factors(self, omit_coslat=False):
sf_lat = self.distance / u.radian
sf_lon = sf_lat if omit_coslat else sf_lat * np.cos(self.lat)
sf_distance = np.broadcast_to(1.0 * u.one, self.shape, subok=True)
return {"lon": sf_lon, "lat": sf_lat, "distance": sf_distance}
def represent_as(self, other_class, differential_class=None):
# Take a short cut if the other class is a spherical representation
if inspect.isclass(other_class):
if issubclass(other_class, PhysicsSphericalRepresentation):
diffs = self._re_represent_differentials(
other_class, differential_class
)
return other_class(
phi=self.lon,
theta=90 * u.deg - self.lat,
r=self.distance,
differentials=diffs,
copy=False,
)
elif issubclass(other_class, UnitSphericalRepresentation):
diffs = self._re_represent_differentials(
other_class, differential_class
)
return other_class(
lon=self.lon, lat=self.lat, differentials=diffs, copy=False
)
return super().represent_as(other_class, differential_class)
def to_cartesian(self):
"""
Converts spherical polar coordinates to 3D rectangular cartesian
coordinates.
"""
# We need to convert Distance to Quantity to allow negative values.
if isinstance(self.distance, Distance):
d = self.distance.view(u.Quantity)
else:
d = self.distance
# erfa s2p: Convert spherical polar coordinates to p-vector.
p = erfa_ufunc.s2p(self.lon, self.lat, d)
return CartesianRepresentation(p, xyz_axis=-1, copy=False)
@classmethod
def from_cartesian(cls, cart):
"""
Converts 3D rectangular cartesian coordinates to spherical polar
coordinates.
"""
p = cart.get_xyz(xyz_axis=-1)
# erfa p2s: P-vector to spherical polar coordinates.
return cls(*erfa_ufunc.p2s(p), copy=False)
def transform(self, matrix):
"""Transform the spherical coordinates using a 3x3 matrix.
This returns a new representation and does not modify the original one.
Any differentials attached to this representation will also be
transformed.
Parameters
----------
matrix : (3,3) array-like
A 3x3 matrix, such as a rotation matrix (or a stack of matrices).
"""
xyz = erfa_ufunc.s2c(self.lon, self.lat)
p = erfa_ufunc.rxp(matrix, xyz)
lon, lat, ur = erfa_ufunc.p2s(p)
rep = self.__class__(lon=lon, lat=lat, distance=self.distance * ur)
# handle differentials
new_diffs = {
k: d.transform(matrix, self, rep) for k, d in self.differentials.items()
}
return rep.with_differentials(new_diffs)
def norm(self):
"""Vector norm.
The norm is the standard Frobenius norm, i.e., the square root of the
sum of the squares of all components with non-angular units. For
spherical coordinates, this is just the absolute value of the distance.
Returns
-------
norm : `astropy.units.Quantity`
Vector norm, with the same shape as the representation.
"""
return np.abs(self.distance)
def _scale_operation(self, op, *args):
# TODO: expand special-casing to UnitSpherical and RadialDifferential.
if any(
differential.base_representation is not self.__class__
for differential in self.differentials.values()
):
return super()._scale_operation(op, *args)
lon_op, lat_op, distance_op = _spherical_op_funcs(op, *args)
result = self.__class__(
lon_op(self.lon), lat_op(self.lat), distance_op(self.distance), copy=False
)
for key, differential in self.differentials.items():
new_comps = (
op(getattr(differential, comp))
for op, comp in zip(
(operator.pos, lat_op, distance_op), differential.components
)
)
result.differentials[key] = differential.__class__(*new_comps, copy=False)
return result
class PhysicsSphericalRepresentation(BaseRepresentation):
"""
Representation of points in 3D spherical coordinates (using the physics
convention of using ``phi`` and ``theta`` for azimuth and inclination
from the pole).
Parameters
----------
phi, theta : `~astropy.units.Quantity` or str
The azimuth and inclination of the point(s), in angular units. The
inclination should be between 0 and 180 degrees, and the azimuth will
be wrapped to an angle between 0 and 360 degrees. These can also be
instances of `~astropy.coordinates.Angle`. If ``copy`` is False, `phi`
will be changed inplace if it is not between 0 and 360 degrees.
r : `~astropy.units.Quantity`
The distance to the point(s). If the distance is a length, it is
passed to the :class:`~astropy.coordinates.Distance` class, otherwise
it is passed to the :class:`~astropy.units.Quantity` class.
differentials : dict, `~astropy.coordinates.PhysicsSphericalDifferential`, optional
Any differential classes that should be associated with this
representation. The input must either be a single
`~astropy.coordinates.PhysicsSphericalDifferential` instance, or a dictionary of of
differential instances with keys set to a string representation of the
SI unit with which the differential (derivative) is taken. For example,
for a velocity differential on a positional representation, the key
would be ``'s'`` for seconds, indicating that the derivative is a time
derivative.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
attr_classes = {"phi": Angle, "theta": Angle, "r": u.Quantity}
def __init__(self, phi, theta=None, r=None, differentials=None, copy=True):
super().__init__(phi, theta, r, copy=copy, differentials=differentials)
# Wrap/validate phi/theta
# Note that _phi already holds our own copy if copy=True.
self._phi.wrap_at(360 * u.deg, inplace=True)
if np.any(self._theta < 0.0 * u.deg) or np.any(self._theta > 180.0 * u.deg):
raise ValueError(
"Inclination angle(s) must be within 0 deg <= angle <= 180 deg, "
f"got {theta.to(u.degree)}"
)
if self._r.unit.physical_type == "length":
self._r = self._r.view(Distance)
@property
def phi(self):
"""
The azimuth of the point(s).
"""
return self._phi
@property
def theta(self):
"""
The elevation of the point(s).
"""
return self._theta
@property
def r(self):
"""
The distance from the origin to the point(s).
"""
return self._r
def unit_vectors(self):
sinphi, cosphi = np.sin(self.phi), np.cos(self.phi)
sintheta, costheta = np.sin(self.theta), np.cos(self.theta)
return {
"phi": CartesianRepresentation(-sinphi, cosphi, 0.0, copy=False),
"theta": CartesianRepresentation(
costheta * cosphi, costheta * sinphi, -sintheta, copy=False
),
"r": CartesianRepresentation(
sintheta * cosphi, sintheta * sinphi, costheta, copy=False
),
}
def scale_factors(self):
r = self.r / u.radian
sintheta = np.sin(self.theta)
l = np.broadcast_to(1.0 * u.one, self.shape, subok=True)
return {"phi": r * sintheta, "theta": r, "r": l}
def represent_as(self, other_class, differential_class=None):
# Take a short cut if the other class is a spherical representation
if inspect.isclass(other_class):
if issubclass(other_class, SphericalRepresentation):
diffs = self._re_represent_differentials(
other_class, differential_class
)
return other_class(
lon=self.phi,
lat=90 * u.deg - self.theta,
distance=self.r,
differentials=diffs,
copy=False,
)
elif issubclass(other_class, UnitSphericalRepresentation):
diffs = self._re_represent_differentials(
other_class, differential_class
)
return other_class(
lon=self.phi,
lat=90 * u.deg - self.theta,
differentials=diffs,
copy=False,
)
return super().represent_as(other_class, differential_class)
def to_cartesian(self):
"""
Converts spherical polar coordinates to 3D rectangular cartesian
coordinates.
"""
# We need to convert Distance to Quantity to allow negative values.
if isinstance(self.r, Distance):
d = self.r.view(u.Quantity)
else:
d = self.r
x = d * np.sin(self.theta) * np.cos(self.phi)
y = d * np.sin(self.theta) * np.sin(self.phi)
z = d * np.cos(self.theta)
return CartesianRepresentation(x=x, y=y, z=z, copy=False)
@classmethod
def from_cartesian(cls, cart):
"""
Converts 3D rectangular cartesian coordinates to spherical polar
coordinates.
"""
s = np.hypot(cart.x, cart.y)
r = np.hypot(s, cart.z)
phi = np.arctan2(cart.y, cart.x)
theta = np.arctan2(s, cart.z)
return cls(phi=phi, theta=theta, r=r, copy=False)
def transform(self, matrix):
"""Transform the spherical coordinates using a 3x3 matrix.
This returns a new representation and does not modify the original one.
Any differentials attached to this representation will also be
transformed.
Parameters
----------
matrix : (3,3) array-like
A 3x3 matrix, such as a rotation matrix (or a stack of matrices).
"""
# apply transformation in unit-spherical coordinates
xyz = erfa_ufunc.s2c(self.phi, 90 * u.deg - self.theta)
p = erfa_ufunc.rxp(matrix, xyz)
lon, lat, ur = erfa_ufunc.p2s(p) # `ur` is transformed unit-`r`
# create transformed physics-spherical representation,
# reapplying the distance scaling
rep = self.__class__(phi=lon, theta=90 * u.deg - lat, r=self.r * ur)
new_diffs = {
k: d.transform(matrix, self, rep) for k, d in self.differentials.items()
}
return rep.with_differentials(new_diffs)
def norm(self):
"""Vector norm.
The norm is the standard Frobenius norm, i.e., the square root of the
sum of the squares of all components with non-angular units. For
spherical coordinates, this is just the absolute value of the radius.
Returns
-------
norm : `astropy.units.Quantity`
Vector norm, with the same shape as the representation.
"""
return np.abs(self.r)
def _scale_operation(self, op, *args):
if any(
differential.base_representation is not self.__class__
for differential in self.differentials.values()
):
return super()._scale_operation(op, *args)
phi_op, adjust_theta_sign, r_op = _spherical_op_funcs(op, *args)
# Also run phi_op on theta to ensure theta remains between 0 and 180:
# any time the scale is negative, we do -theta + 180 degrees.
result = self.__class__(
phi_op(self.phi),
phi_op(adjust_theta_sign(self.theta)),
r_op(self.r),
copy=False,
)
for key, differential in self.differentials.items():
new_comps = (
op(getattr(differential, comp))
for op, comp in zip(
(operator.pos, adjust_theta_sign, r_op), differential.components
)
)
result.differentials[key] = differential.__class__(*new_comps, copy=False)
return result
class CylindricalRepresentation(BaseRepresentation):
"""
Representation of points in 3D cylindrical coordinates.
Parameters
----------
rho : `~astropy.units.Quantity`
The distance from the z axis to the point(s).
phi : `~astropy.units.Quantity` or str
The azimuth of the point(s), in angular units, which will be wrapped
to an angle between 0 and 360 degrees. This can also be instances of
`~astropy.coordinates.Angle`,
z : `~astropy.units.Quantity`
The z coordinate(s) of the point(s)
differentials : dict, `~astropy.coordinates.CylindricalDifferential`, optional
Any differential classes that should be associated with this
representation. The input must either be a single
`~astropy.coordinates.CylindricalDifferential` instance, or a dictionary of of differential
instances with keys set to a string representation of the SI unit with
which the differential (derivative) is taken. For example, for a
velocity differential on a positional representation, the key would be
``'s'`` for seconds, indicating that the derivative is a time
derivative.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
attr_classes = {"rho": u.Quantity, "phi": Angle, "z": u.Quantity}
def __init__(self, rho, phi=None, z=None, differentials=None, copy=True):
super().__init__(rho, phi, z, copy=copy, differentials=differentials)
if not self._rho.unit.is_equivalent(self._z.unit):
raise u.UnitsError("rho and z should have matching physical types")
@property
def rho(self):
"""
The distance of the point(s) from the z-axis.
"""
return self._rho
@property
def phi(self):
"""
The azimuth of the point(s).
"""
return self._phi
@property
def z(self):
"""
The height of the point(s).
"""
return self._z
def unit_vectors(self):
sinphi, cosphi = np.sin(self.phi), np.cos(self.phi)
l = np.broadcast_to(1.0, self.shape)
return {
"rho": CartesianRepresentation(cosphi, sinphi, 0, copy=False),
"phi": CartesianRepresentation(-sinphi, cosphi, 0, copy=False),
"z": CartesianRepresentation(0, 0, l, unit=u.one, copy=False),
}
def scale_factors(self):
rho = self.rho / u.radian
l = np.broadcast_to(1.0 * u.one, self.shape, subok=True)
return {"rho": l, "phi": rho, "z": l}
@classmethod
def from_cartesian(cls, cart):
"""
Converts 3D rectangular cartesian coordinates to cylindrical polar
coordinates.
"""
rho = np.hypot(cart.x, cart.y)
phi = np.arctan2(cart.y, cart.x)
z = cart.z
return cls(rho=rho, phi=phi, z=z, copy=False)
def to_cartesian(self):
"""
Converts cylindrical polar coordinates to 3D rectangular cartesian
coordinates.
"""
x = self.rho * np.cos(self.phi)
y = self.rho * np.sin(self.phi)
z = self.z
return CartesianRepresentation(x=x, y=y, z=z, copy=False)
def _scale_operation(self, op, *args):
if any(
differential.base_representation is not self.__class__
for differential in self.differentials.values()
):
return super()._scale_operation(op, *args)
phi_op, _, rho_op = _spherical_op_funcs(op, *args)
z_op = lambda x: op(x, *args)
result = self.__class__(
rho_op(self.rho), phi_op(self.phi), z_op(self.z), copy=False
)
for key, differential in self.differentials.items():
new_comps = (
op(getattr(differential, comp))
for op, comp in zip(
(rho_op, operator.pos, z_op), differential.components
)
)
result.differentials[key] = differential.__class__(*new_comps, copy=False)
return result
class BaseDifferential(BaseRepresentationOrDifferential):
r"""A base class representing differentials of representations.
These represent differences or derivatives along each component.
E.g., for physics spherical coordinates, these would be
:math:`\delta r, \delta \theta, \delta \phi`.
Parameters
----------
d_comp1, d_comp2, d_comp3 : `~astropy.units.Quantity` or subclass
The components of the 3D differentials. The names are the keys and the
subclasses the values of the ``attr_classes`` attribute.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
Notes
-----
All differential representation classes should subclass this base class,
and define an ``base_representation`` attribute with the class of the
regular `~astropy.coordinates.BaseRepresentation` for which differential
coordinates are provided. This will set up a default ``attr_classes``
instance with names equal to the base component names prefixed by ``d_``,
and all classes set to `~astropy.units.Quantity`, plus properties to access
those, and a default ``__init__`` for initialization.
"""
def __init_subclass__(cls, **kwargs):
"""Set default ``attr_classes`` and component getters on a Differential.
For these, the components are those of the base representation prefixed
by 'd_', and the class is `~astropy.units.Quantity`.
"""
# Don't do anything for base helper classes.
if cls.__name__ in (
"BaseDifferential",
"BaseSphericalDifferential",
"BaseSphericalCosLatDifferential",
):
return
if not hasattr(cls, "base_representation"):
raise NotImplementedError(
"Differential representations must have a"
'"base_representation" class attribute.'
)
# If not defined explicitly, create attr_classes.
if not hasattr(cls, "attr_classes"):
base_attr_classes = cls.base_representation.attr_classes
cls.attr_classes = {"d_" + c: u.Quantity for c in base_attr_classes}
repr_name = cls.get_name()
if repr_name in DIFFERENTIAL_CLASSES:
raise ValueError(f"Differential class {repr_name} already defined")
DIFFERENTIAL_CLASSES[repr_name] = cls
_invalidate_reprdiff_cls_hash()
# If not defined explicitly, create properties for the components.
for component in cls.attr_classes:
if not hasattr(cls, component):
setattr(
cls,
component,
property(
_make_getter(component),
doc=f"Component '{component}' of the Differential.",
),
)
super().__init_subclass__(**kwargs)
@classmethod
def _check_base(cls, base):
if cls not in base._compatible_differentials:
raise TypeError(
f"Differential class {cls} is not compatible with the "
f"base (representation) class {base.__class__}"
)
def _get_deriv_key(self, base):
"""Given a base (representation instance), determine the unit of the
derivative by removing the representation unit from the component units
of this differential.
"""
# This check is just a last resort so we don't return a strange unit key
# from accidentally passing in the wrong base.
self._check_base(base)
for name in base.components:
comp = getattr(base, name)
d_comp = getattr(self, f"d_{name}", None)
if d_comp is not None:
d_unit = comp.unit / d_comp.unit
# This is quite a bit faster than using to_system() or going
# through Quantity()
d_unit_si = d_unit.decompose(u.si.bases)
d_unit_si._scale = 1 # remove the scale from the unit
return str(d_unit_si)
else:
raise RuntimeError(
"Invalid representation-differential units! This likely happened "
"because either the representation or the associated differential "
"have non-standard units. Check that the input positional data have "
"positional units, and the input velocity data have velocity units, "
"or are both dimensionless."
)
@classmethod
def _get_base_vectors(cls, base):
"""Get unit vectors and scale factors from base.
Parameters
----------
base : instance of ``self.base_representation``
The points for which the unit vectors and scale factors should be
retrieved.
Returns
-------
unit_vectors : dict of `~astropy.coordinates.CartesianRepresentation`
In the directions of the coordinates of base.
scale_factors : dict of `~astropy.units.Quantity`
Scale factors for each of the coordinates
Raises
------
TypeError : if the base is not of the correct type
"""
cls._check_base(base)
return base.unit_vectors(), base.scale_factors()
def to_cartesian(self, base):
"""Convert the differential to 3D rectangular cartesian coordinates.
Parameters
----------
base : instance of ``self.base_representation``
The points for which the differentials are to be converted: each of
the components is multiplied by its unit vectors and scale factors.
Returns
-------
`~astropy.coordinates.CartesianDifferential`
This object, converted.
"""
base_e, base_sf = self._get_base_vectors(base)
return functools.reduce(
operator.add,
(
getattr(self, d_c) * base_sf[c] * base_e[c]
for d_c, c in zip(self.components, base.components)
),
)
@classmethod
def from_cartesian(cls, other, base):
"""Convert the differential from 3D rectangular cartesian coordinates to
the desired class.
Parameters
----------
other
The object to convert into this differential.
base : `~astropy.coordinates.BaseRepresentation`
The points for which the differentials are to be converted: each of
the components is multiplied by its unit vectors and scale factors.
Will be converted to ``cls.base_representation`` if needed.
Returns
-------
`~astropy.coordinates.BaseDifferential` subclass instance
A new differential object that is this class' type.
"""
base = base.represent_as(cls.base_representation)
base_e, base_sf = cls._get_base_vectors(base)
return cls(
*(other.dot(e / base_sf[component]) for component, e in base_e.items()),
copy=False,
)
def represent_as(self, other_class, base):
"""Convert coordinates to another representation.
If the instance is of the requested class, it is returned unmodified.
By default, conversion is done via cartesian coordinates.
Parameters
----------
other_class : `~astropy.coordinates.BaseRepresentation` subclass
The type of representation to turn the coordinates into.
base : instance of ``self.base_representation``
Base relative to which the differentials are defined. If the other
class is a differential representation, the base will be converted
to its ``base_representation``.
"""
if other_class is self.__class__:
return self
# The default is to convert via cartesian coordinates.
self_cartesian = self.to_cartesian(base)
if issubclass(other_class, BaseDifferential):
return other_class.from_cartesian(self_cartesian, base)
else:
return other_class.from_cartesian(self_cartesian)
@classmethod
def from_representation(cls, representation, base):
"""Create a new instance of this representation from another one.
Parameters
----------
representation : `~astropy.coordinates.BaseRepresentation` instance
The presentation that should be converted to this class.
base : instance of ``cls.base_representation``
The base relative to which the differentials will be defined. If
the representation is a differential itself, the base will be
converted to its ``base_representation`` to help convert it.
"""
if isinstance(representation, BaseDifferential):
cartesian = representation.to_cartesian(
base.represent_as(representation.base_representation)
)
else:
cartesian = representation.to_cartesian()
return cls.from_cartesian(cartesian, base)
def transform(self, matrix, base, transformed_base):
"""Transform differential using a 3x3 matrix in a Cartesian basis.
This returns a new differential and does not modify the original one.
Parameters
----------
matrix : (3,3) array-like
A 3x3 (or stack thereof) matrix, such as a rotation matrix.
base : instance of ``cls.base_representation``
Base relative to which the differentials are defined. If the other
class is a differential representation, the base will be converted
to its ``base_representation``.
transformed_base : instance of ``cls.base_representation``
Base relative to which the transformed differentials are defined.
If the other class is a differential representation, the base will
be converted to its ``base_representation``.
"""
# route transformation through Cartesian
cdiff = self.represent_as(CartesianDifferential, base=base).transform(matrix)
# move back to original representation
diff = cdiff.represent_as(self.__class__, transformed_base)
return diff
def _scale_operation(self, op, *args, scaled_base=False):
"""Scale all components.
Parameters
----------
op : `~operator` callable
Operator to apply (e.g., `~operator.mul`, `~operator.neg`, etc.
*args
Any arguments required for the operator (typically, what is to
be multiplied with, divided by).
scaled_base : bool, optional
Whether the base was scaled the same way. This affects whether
differential components should be scaled. For instance, a differential
in longitude should not be scaled if its spherical base is scaled
in radius.
"""
scaled_attrs = [op(getattr(self, c), *args) for c in self.components]
return self.__class__(*scaled_attrs, copy=False)
def _combine_operation(self, op, other, reverse=False):
"""Combine two differentials, or a differential with a representation.
If ``other`` is of the same differential type as ``self``, the
components will simply be combined. If ``other`` is a representation,
it will be used as a base for which to evaluate the differential,
and the result is a new representation.
Parameters
----------
op : `~operator` callable
Operator to apply (e.g., `~operator.add`, `~operator.sub`, etc.
other : `~astropy.coordinates.BaseRepresentation` subclass instance
The other differential or representation.
reverse : bool
Whether the operands should be reversed (e.g., as we got here via
``self.__rsub__`` because ``self`` is a subclass of ``other``).
"""
if isinstance(self, type(other)):
first, second = (self, other) if not reverse else (other, self)
return self.__class__(
*[op(getattr(first, c), getattr(second, c)) for c in self.components]
)
else:
try:
self_cartesian = self.to_cartesian(other)
except TypeError:
return NotImplemented
return other._combine_operation(op, self_cartesian, not reverse)
def __sub__(self, other):
# avoid "differential - representation".
if isinstance(other, BaseRepresentation):
return NotImplemented
return super().__sub__(other)
def norm(self, base=None):
"""Vector norm.
The norm is the standard Frobenius norm, i.e., the square root of the
sum of the squares of all components with non-angular units.
Parameters
----------
base : instance of ``self.base_representation``
Base relative to which the differentials are defined. This is
required to calculate the physical size of the differential for
all but Cartesian differentials or radial differentials.
Returns
-------
norm : `astropy.units.Quantity`
Vector norm, with the same shape as the representation.
"""
# RadialDifferential overrides this function, so there is no handling here
if not isinstance(self, CartesianDifferential) and base is None:
raise ValueError(
"`base` must be provided to calculate the norm of a"
f" {type(self).__name__}"
)
return self.to_cartesian(base).norm()
class CartesianDifferential(BaseDifferential):
"""Differentials in of points in 3D cartesian coordinates.
Parameters
----------
d_x, d_y, d_z : `~astropy.units.Quantity` or array
The x, y, and z coordinates of the differentials. If ``d_x``, ``d_y``,
and ``d_z`` have different shapes, they should be broadcastable. If not
quantities, ``unit`` should be set. If only ``d_x`` is given, it is
assumed that it contains an array with the 3 coordinates stored along
``xyz_axis``.
unit : `~astropy.units.Unit` or str
If given, the differentials will be converted to this unit (or taken to
be in this unit if not given.
xyz_axis : int, optional
The axis along which the coordinates are stored when a single array is
provided instead of distinct ``d_x``, ``d_y``, and ``d_z`` (default: 0).
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
base_representation = CartesianRepresentation
_d_xyz = None
def __init__(self, d_x, d_y=None, d_z=None, unit=None, xyz_axis=None, copy=True):
if d_y is None and d_z is None:
if isinstance(d_x, np.ndarray) and d_x.dtype.kind not in "OV":
# Short-cut for 3-D array input.
d_x = u.Quantity(d_x, unit, copy=copy, subok=True)
# Keep a link to the array with all three coordinates
# so that we can return it quickly if needed in get_xyz.
self._d_xyz = d_x
if xyz_axis:
d_x = np.moveaxis(d_x, xyz_axis, 0)
self._xyz_axis = xyz_axis
else:
self._xyz_axis = 0
self._d_x, self._d_y, self._d_z = d_x
return
else:
d_x, d_y, d_z = d_x
if xyz_axis is not None:
raise ValueError(
"xyz_axis should only be set if d_x, d_y, and d_z are in a single array"
" passed in through d_x, i.e., d_y and d_z should not be not given."
)
if d_y is None or d_z is None:
raise ValueError(
"d_x, d_y, and d_z are required to instantiate"
f" {self.__class__.__name__}"
)
if unit is not None:
d_x = u.Quantity(d_x, unit, copy=copy, subok=True)
d_y = u.Quantity(d_y, unit, copy=copy, subok=True)
d_z = u.Quantity(d_z, unit, copy=copy, subok=True)
copy = False
super().__init__(d_x, d_y, d_z, copy=copy)
if not (
self._d_x.unit.is_equivalent(self._d_y.unit)
and self._d_x.unit.is_equivalent(self._d_z.unit)
):
raise u.UnitsError("d_x, d_y and d_z should have equivalent units.")
def to_cartesian(self, base=None):
return CartesianRepresentation(*[getattr(self, c) for c in self.components])
@classmethod
def from_cartesian(cls, other, base=None):
return cls(*[getattr(other, c) for c in other.components])
def transform(self, matrix, base=None, transformed_base=None):
"""Transform differentials using a 3x3 matrix in a Cartesian basis.
This returns a new differential and does not modify the original one.
Parameters
----------
matrix : (3,3) array-like
A 3x3 (or stack thereof) matrix, such as a rotation matrix.
base, transformed_base : `~astropy.coordinates.CartesianRepresentation` or None, optional
Not used in the Cartesian transformation.
"""
# erfa rxp: Multiply a p-vector by an r-matrix.
p = erfa_ufunc.rxp(matrix, self.get_d_xyz(xyz_axis=-1))
return self.__class__(p, xyz_axis=-1, copy=False)
def get_d_xyz(self, xyz_axis=0):
"""Return a vector array of the x, y, and z coordinates.
Parameters
----------
xyz_axis : int, optional
The axis in the final array along which the x, y, z components
should be stored (default: 0).
Returns
-------
d_xyz : `~astropy.units.Quantity`
With dimension 3 along ``xyz_axis``. Note that, if possible,
this will be a view.
"""
if self._d_xyz is not None:
if self._xyz_axis == xyz_axis:
return self._d_xyz
else:
return np.moveaxis(self._d_xyz, self._xyz_axis, xyz_axis)
# Create combined array. TO DO: keep it in _d_xyz for repeated use?
# But then in-place changes have to cancel it. Likely best to
# also update components.
return np.stack([self._d_x, self._d_y, self._d_z], axis=xyz_axis)
d_xyz = property(get_d_xyz)
class BaseSphericalDifferential(BaseDifferential):
def _d_lon_coslat(self, base):
"""Convert longitude differential d_lon to d_lon_coslat.
Parameters
----------
base : instance of ``cls.base_representation``
The base from which the latitude will be taken.
"""
self._check_base(base)
return self.d_lon * np.cos(base.lat)
@classmethod
def _get_d_lon(cls, d_lon_coslat, base):
"""Convert longitude differential d_lon_coslat to d_lon.
Parameters
----------
d_lon_coslat : `~astropy.units.Quantity`
Longitude differential that includes ``cos(lat)``.
base : instance of ``cls.base_representation``
The base from which the latitude will be taken.
"""
cls._check_base(base)
return d_lon_coslat / np.cos(base.lat)
def _combine_operation(self, op, other, reverse=False):
"""Combine two differentials, or a differential with a representation.
If ``other`` is of the same differential type as ``self``, the
components will simply be combined. If both are different parts of
a `~astropy.coordinates.SphericalDifferential` (e.g., a
`~astropy.coordinates.UnitSphericalDifferential` and a
`~astropy.coordinates.RadialDifferential`), they will combined
appropriately.
If ``other`` is a representation, it will be used as a base for which
to evaluate the differential, and the result is a new representation.
Parameters
----------
op : `~operator` callable
Operator to apply (e.g., `~operator.add`, `~operator.sub`, etc.
other : `~astropy.coordinates.BaseRepresentation` subclass instance
The other differential or representation.
reverse : bool
Whether the operands should be reversed (e.g., as we got here via
``self.__rsub__`` because ``self`` is a subclass of ``other``).
"""
if (
isinstance(other, BaseSphericalDifferential)
and not isinstance(self, type(other))
or isinstance(other, RadialDifferential)
):
all_components = set(self.components) | set(other.components)
first, second = (self, other) if not reverse else (other, self)
result_args = {
c: op(getattr(first, c, 0.0), getattr(second, c, 0.0))
for c in all_components
}
return SphericalDifferential(**result_args)
return super()._combine_operation(op, other, reverse)
class UnitSphericalDifferential(BaseSphericalDifferential):
"""Differential(s) of points on a unit sphere.
Parameters
----------
d_lon, d_lat : `~astropy.units.Quantity`
The longitude and latitude of the differentials.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
base_representation = UnitSphericalRepresentation
@classproperty
def _dimensional_differential(cls):
return SphericalDifferential
def __init__(self, d_lon, d_lat=None, copy=True):
super().__init__(d_lon, d_lat, copy=copy)
if not self._d_lon.unit.is_equivalent(self._d_lat.unit):
raise u.UnitsError("d_lon and d_lat should have equivalent units.")
@classmethod
def from_cartesian(cls, other, base):
# Go via the dimensional equivalent, so that the longitude and latitude
# differentials correctly take into account the norm of the base.
dimensional = cls._dimensional_differential.from_cartesian(other, base)
return dimensional.represent_as(cls)
def to_cartesian(self, base):
if isinstance(base, SphericalRepresentation):
scale = base.distance
elif isinstance(base, PhysicsSphericalRepresentation):
scale = base.r
else:
return super().to_cartesian(base)
base = base.represent_as(UnitSphericalRepresentation)
return scale * super().to_cartesian(base)
def represent_as(self, other_class, base=None):
# Only have enough information to represent other unit-spherical.
if issubclass(other_class, UnitSphericalCosLatDifferential):
return other_class(self._d_lon_coslat(base), self.d_lat)
return super().represent_as(other_class, base)
@classmethod
def from_representation(cls, representation, base=None):
# All spherical differentials can be done without going to Cartesian,
# though CosLat needs base for the latitude.
if isinstance(representation, SphericalDifferential):
return cls(representation.d_lon, representation.d_lat)
elif isinstance(
representation,
(SphericalCosLatDifferential, UnitSphericalCosLatDifferential),
):
d_lon = cls._get_d_lon(representation.d_lon_coslat, base)
return cls(d_lon, representation.d_lat)
elif isinstance(representation, PhysicsSphericalDifferential):
return cls(representation.d_phi, -representation.d_theta)
return super().from_representation(representation, base)
def transform(self, matrix, base, transformed_base):
"""Transform differential using a 3x3 matrix in a Cartesian basis.
This returns a new differential and does not modify the original one.
Parameters
----------
matrix : (3,3) array-like
A 3x3 (or stack thereof) matrix, such as a rotation matrix.
base : instance of ``cls.base_representation``
Base relative to which the differentials are defined. If the other
class is a differential representation, the base will be converted
to its ``base_representation``.
transformed_base : instance of ``cls.base_representation``
Base relative to which the transformed differentials are defined.
If the other class is a differential representation, the base will
be converted to its ``base_representation``.
"""
# the transformation matrix does not need to be a rotation matrix,
# so the unit-distance is not guaranteed. For speed, we check if the
# matrix is in O(3) and preserves lengths.
if np.all(is_O3(matrix)): # remain in unit-rep
# TODO! implement without Cartesian intermediate step.
# some of this can be moved to the parent class.
diff = super().transform(matrix, base, transformed_base)
else: # switch to dimensional representation
du = self.d_lon.unit / base.lon.unit # derivative unit
diff = self._dimensional_differential(
d_lon=self.d_lon, d_lat=self.d_lat, d_distance=0 * du
).transform(matrix, base, transformed_base)
return diff
def _scale_operation(self, op, *args, scaled_base=False):
if scaled_base:
return self.copy()
else:
return super()._scale_operation(op, *args)
class SphericalDifferential(BaseSphericalDifferential):
"""Differential(s) of points in 3D spherical coordinates.
Parameters
----------
d_lon, d_lat : `~astropy.units.Quantity`
The differential longitude and latitude.
d_distance : `~astropy.units.Quantity`
The differential distance.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
base_representation = SphericalRepresentation
_unit_differential = UnitSphericalDifferential
def __init__(self, d_lon, d_lat=None, d_distance=None, copy=True):
super().__init__(d_lon, d_lat, d_distance, copy=copy)
if not self._d_lon.unit.is_equivalent(self._d_lat.unit):
raise u.UnitsError("d_lon and d_lat should have equivalent units.")
def represent_as(self, other_class, base=None):
# All spherical differentials can be done without going to Cartesian,
# though CosLat needs base for the latitude.
if issubclass(other_class, UnitSphericalDifferential):
return other_class(self.d_lon, self.d_lat)
elif issubclass(other_class, RadialDifferential):
return other_class(self.d_distance)
elif issubclass(other_class, SphericalCosLatDifferential):
return other_class(self._d_lon_coslat(base), self.d_lat, self.d_distance)
elif issubclass(other_class, UnitSphericalCosLatDifferential):
return other_class(self._d_lon_coslat(base), self.d_lat)
elif issubclass(other_class, PhysicsSphericalDifferential):
return other_class(self.d_lon, -self.d_lat, self.d_distance)
else:
return super().represent_as(other_class, base)
@classmethod
def from_representation(cls, representation, base=None):
# Other spherical differentials can be done without going to Cartesian,
# though CosLat needs base for the latitude.
if isinstance(representation, SphericalCosLatDifferential):
d_lon = cls._get_d_lon(representation.d_lon_coslat, base)
return cls(d_lon, representation.d_lat, representation.d_distance)
elif isinstance(representation, PhysicsSphericalDifferential):
return cls(
representation.d_phi, -representation.d_theta, representation.d_r
)
return super().from_representation(representation, base)
def _scale_operation(self, op, *args, scaled_base=False):
if scaled_base:
return self.__class__(self.d_lon, self.d_lat, op(self.d_distance, *args))
else:
return super()._scale_operation(op, *args)
class BaseSphericalCosLatDifferential(BaseDifferential):
"""Differentials from points on a spherical base representation.
With cos(lat) assumed to be included in the longitude differential.
"""
@classmethod
def _get_base_vectors(cls, base):
"""Get unit vectors and scale factors from (unit)spherical base.
Parameters
----------
base : instance of ``self.base_representation``
The points for which the unit vectors and scale factors should be
retrieved.
Returns
-------
unit_vectors : dict of `~astropy.coordinates.CartesianRepresentation`
In the directions of the coordinates of base.
scale_factors : dict of `~astropy.units.Quantity`
Scale factors for each of the coordinates. The scale factor for
longitude does not include the cos(lat) factor.
Raises
------
TypeError : if the base is not of the correct type
"""
cls._check_base(base)
return base.unit_vectors(), base.scale_factors(omit_coslat=True)
def _d_lon(self, base):
"""Convert longitude differential with cos(lat) to one without.
Parameters
----------
base : instance of ``cls.base_representation``
The base from which the latitude will be taken.
"""
self._check_base(base)
return self.d_lon_coslat / np.cos(base.lat)
@classmethod
def _get_d_lon_coslat(cls, d_lon, base):
"""Convert longitude differential d_lon to d_lon_coslat.
Parameters
----------
d_lon : `~astropy.units.Quantity`
Value of the longitude differential without ``cos(lat)``.
base : instance of ``cls.base_representation``
The base from which the latitude will be taken.
"""
cls._check_base(base)
return d_lon * np.cos(base.lat)
def _combine_operation(self, op, other, reverse=False):
"""Combine two differentials, or a differential with a representation.
If ``other`` is of the same differential type as ``self``, the
components will simply be combined. If both are different parts of
a `~astropy.coordinates.SphericalDifferential` (e.g., a
`~astropy.coordinates.UnitSphericalDifferential` and a
`~astropy.coordinates.RadialDifferential`), they will combined
appropriately.
If ``other`` is a representation, it will be used as a base for which
to evaluate the differential, and the result is a new representation.
Parameters
----------
op : `~operator` callable
Operator to apply (e.g., `~operator.add`, `~operator.sub`, etc.
other : `~astropy.coordinates.BaseRepresentation` subclass instance
The other differential or representation.
reverse : bool
Whether the operands should be reversed (e.g., as we got here via
``self.__rsub__`` because ``self`` is a subclass of ``other``).
"""
if (
isinstance(other, BaseSphericalCosLatDifferential)
and not isinstance(self, type(other))
or isinstance(other, RadialDifferential)
):
all_components = set(self.components) | set(other.components)
first, second = (self, other) if not reverse else (other, self)
result_args = {
c: op(getattr(first, c, 0.0), getattr(second, c, 0.0))
for c in all_components
}
return SphericalCosLatDifferential(**result_args)
return super()._combine_operation(op, other, reverse)
class UnitSphericalCosLatDifferential(BaseSphericalCosLatDifferential):
"""Differential(s) of points on a unit sphere.
Parameters
----------
d_lon_coslat, d_lat : `~astropy.units.Quantity`
The longitude and latitude of the differentials.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
base_representation = UnitSphericalRepresentation
attr_classes = {"d_lon_coslat": u.Quantity, "d_lat": u.Quantity}
@classproperty
def _dimensional_differential(cls):
return SphericalCosLatDifferential
def __init__(self, d_lon_coslat, d_lat=None, copy=True):
super().__init__(d_lon_coslat, d_lat, copy=copy)
if not self._d_lon_coslat.unit.is_equivalent(self._d_lat.unit):
raise u.UnitsError("d_lon_coslat and d_lat should have equivalent units.")
@classmethod
def from_cartesian(cls, other, base):
# Go via the dimensional equivalent, so that the longitude and latitude
# differentials correctly take into account the norm of the base.
dimensional = cls._dimensional_differential.from_cartesian(other, base)
return dimensional.represent_as(cls)
def to_cartesian(self, base):
if isinstance(base, SphericalRepresentation):
scale = base.distance
elif isinstance(base, PhysicsSphericalRepresentation):
scale = base.r
else:
return super().to_cartesian(base)
base = base.represent_as(UnitSphericalRepresentation)
return scale * super().to_cartesian(base)
def represent_as(self, other_class, base=None):
# Only have enough information to represent other unit-spherical.
if issubclass(other_class, UnitSphericalDifferential):
return other_class(self._d_lon(base), self.d_lat)
return super().represent_as(other_class, base)
@classmethod
def from_representation(cls, representation, base=None):
# All spherical differentials can be done without going to Cartesian,
# though w/o CosLat needs base for the latitude.
if isinstance(representation, SphericalCosLatDifferential):
return cls(representation.d_lon_coslat, representation.d_lat)
elif isinstance(
representation, (SphericalDifferential, UnitSphericalDifferential)
):
d_lon_coslat = cls._get_d_lon_coslat(representation.d_lon, base)
return cls(d_lon_coslat, representation.d_lat)
elif isinstance(representation, PhysicsSphericalDifferential):
d_lon_coslat = cls._get_d_lon_coslat(representation.d_phi, base)
return cls(d_lon_coslat, -representation.d_theta)
return super().from_representation(representation, base)
def transform(self, matrix, base, transformed_base):
"""Transform differential using a 3x3 matrix in a Cartesian basis.
This returns a new differential and does not modify the original one.
Parameters
----------
matrix : (3,3) array-like
A 3x3 (or stack thereof) matrix, such as a rotation matrix.
base : instance of ``cls.base_representation``
Base relative to which the differentials are defined. If the other
class is a differential representation, the base will be converted
to its ``base_representation``.
transformed_base : instance of ``cls.base_representation``
Base relative to which the transformed differentials are defined.
If the other class is a differential representation, the base will
be converted to its ``base_representation``.
"""
# the transformation matrix does not need to be a rotation matrix,
# so the unit-distance is not guaranteed. For speed, we check if the
# matrix is in O(3) and preserves lengths.
if np.all(is_O3(matrix)): # remain in unit-rep
# TODO! implement without Cartesian intermediate step.
diff = super().transform(matrix, base, transformed_base)
else: # switch to dimensional representation
du = self.d_lat.unit / base.lat.unit # derivative unit
diff = self._dimensional_differential(
d_lon_coslat=self.d_lon_coslat, d_lat=self.d_lat, d_distance=0 * du
).transform(matrix, base, transformed_base)
return diff
def _scale_operation(self, op, *args, scaled_base=False):
if scaled_base:
return self.copy()
else:
return super()._scale_operation(op, *args)
class SphericalCosLatDifferential(BaseSphericalCosLatDifferential):
"""Differential(s) of points in 3D spherical coordinates.
Parameters
----------
d_lon_coslat, d_lat : `~astropy.units.Quantity`
The differential longitude (with cos(lat) included) and latitude.
d_distance : `~astropy.units.Quantity`
The differential distance.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
base_representation = SphericalRepresentation
_unit_differential = UnitSphericalCosLatDifferential
attr_classes = {
"d_lon_coslat": u.Quantity,
"d_lat": u.Quantity,
"d_distance": u.Quantity,
}
def __init__(self, d_lon_coslat, d_lat=None, d_distance=None, copy=True):
super().__init__(d_lon_coslat, d_lat, d_distance, copy=copy)
if not self._d_lon_coslat.unit.is_equivalent(self._d_lat.unit):
raise u.UnitsError("d_lon_coslat and d_lat should have equivalent units.")
def represent_as(self, other_class, base=None):
# All spherical differentials can be done without going to Cartesian,
# though some need base for the latitude to remove cos(lat).
if issubclass(other_class, UnitSphericalCosLatDifferential):
return other_class(self.d_lon_coslat, self.d_lat)
elif issubclass(other_class, RadialDifferential):
return other_class(self.d_distance)
elif issubclass(other_class, SphericalDifferential):
return other_class(self._d_lon(base), self.d_lat, self.d_distance)
elif issubclass(other_class, UnitSphericalDifferential):
return other_class(self._d_lon(base), self.d_lat)
elif issubclass(other_class, PhysicsSphericalDifferential):
return other_class(self._d_lon(base), -self.d_lat, self.d_distance)
return super().represent_as(other_class, base)
@classmethod
def from_representation(cls, representation, base=None):
# Other spherical differentials can be done without going to Cartesian,
# though we need base for the latitude to remove coslat.
if isinstance(representation, SphericalDifferential):
d_lon_coslat = cls._get_d_lon_coslat(representation.d_lon, base)
return cls(d_lon_coslat, representation.d_lat, representation.d_distance)
elif isinstance(representation, PhysicsSphericalDifferential):
d_lon_coslat = cls._get_d_lon_coslat(representation.d_phi, base)
return cls(d_lon_coslat, -representation.d_theta, representation.d_r)
return super().from_representation(representation, base)
def _scale_operation(self, op, *args, scaled_base=False):
if scaled_base:
return self.__class__(
self.d_lon_coslat, self.d_lat, op(self.d_distance, *args)
)
else:
return super()._scale_operation(op, *args)
class RadialDifferential(BaseDifferential):
"""Differential(s) of radial distances.
Parameters
----------
d_distance : `~astropy.units.Quantity`
The differential distance.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
base_representation = RadialRepresentation
def to_cartesian(self, base):
unit_vec = base.represent_as(UnitSphericalRepresentation).to_cartesian()
return self.d_distance * unit_vec
def norm(self, base=None):
return self.d_distance
@classmethod
def from_cartesian(cls, other, base):
return cls(
other.dot(base.represent_as(UnitSphericalRepresentation)), copy=False
)
@classmethod
def from_representation(cls, representation, base=None):
if isinstance(
representation, (SphericalDifferential, SphericalCosLatDifferential)
):
return cls(representation.d_distance)
elif isinstance(representation, PhysicsSphericalDifferential):
return cls(representation.d_r)
else:
return super().from_representation(representation, base)
def _combine_operation(self, op, other, reverse=False):
if isinstance(other, self.base_representation):
if reverse:
first, second = other.distance, self.d_distance
else:
first, second = self.d_distance, other.distance
return other.__class__(op(first, second), copy=False)
elif isinstance(
other, (BaseSphericalDifferential, BaseSphericalCosLatDifferential)
):
all_components = set(self.components) | set(other.components)
first, second = (self, other) if not reverse else (other, self)
result_args = {
c: op(getattr(first, c, 0.0), getattr(second, c, 0.0))
for c in all_components
}
return SphericalDifferential(**result_args)
else:
return super()._combine_operation(op, other, reverse)
class PhysicsSphericalDifferential(BaseDifferential):
"""Differential(s) of 3D spherical coordinates using physics convention.
Parameters
----------
d_phi, d_theta : `~astropy.units.Quantity`
The differential azimuth and inclination.
d_r : `~astropy.units.Quantity`
The differential radial distance.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
base_representation = PhysicsSphericalRepresentation
def __init__(self, d_phi, d_theta=None, d_r=None, copy=True):
super().__init__(d_phi, d_theta, d_r, copy=copy)
if not self._d_phi.unit.is_equivalent(self._d_theta.unit):
raise u.UnitsError("d_phi and d_theta should have equivalent units.")
def represent_as(self, other_class, base=None):
# All spherical differentials can be done without going to Cartesian,
# though CosLat needs base for the latitude. For those, explicitly
# do the equivalent of self._d_lon_coslat in SphericalDifferential.
if issubclass(other_class, SphericalDifferential):
return other_class(self.d_phi, -self.d_theta, self.d_r)
elif issubclass(other_class, UnitSphericalDifferential):
return other_class(self.d_phi, -self.d_theta)
elif issubclass(other_class, SphericalCosLatDifferential):
self._check_base(base)
d_lon_coslat = self.d_phi * np.sin(base.theta)
return other_class(d_lon_coslat, -self.d_theta, self.d_r)
elif issubclass(other_class, UnitSphericalCosLatDifferential):
self._check_base(base)
d_lon_coslat = self.d_phi * np.sin(base.theta)
return other_class(d_lon_coslat, -self.d_theta)
elif issubclass(other_class, RadialDifferential):
return other_class(self.d_r)
return super().represent_as(other_class, base)
@classmethod
def from_representation(cls, representation, base=None):
# Other spherical differentials can be done without going to Cartesian,
# though we need base for the latitude to remove coslat. For that case,
# do the equivalent of cls._d_lon in SphericalDifferential.
if isinstance(representation, SphericalDifferential):
return cls(
representation.d_lon, -representation.d_lat, representation.d_distance
)
elif isinstance(representation, SphericalCosLatDifferential):
cls._check_base(base)
d_phi = representation.d_lon_coslat / np.sin(base.theta)
return cls(d_phi, -representation.d_lat, representation.d_distance)
return super().from_representation(representation, base)
def _scale_operation(self, op, *args, scaled_base=False):
if scaled_base:
return self.__class__(self.d_phi, self.d_theta, op(self.d_r, *args))
else:
return super()._scale_operation(op, *args)
class CylindricalDifferential(BaseDifferential):
"""Differential(s) of points in cylindrical coordinates.
Parameters
----------
d_rho : `~astropy.units.Quantity` ['speed']
The differential cylindrical radius.
d_phi : `~astropy.units.Quantity` ['angular speed']
The differential azimuth.
d_z : `~astropy.units.Quantity` ['speed']
The differential height.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
base_representation = CylindricalRepresentation
def __init__(self, d_rho, d_phi=None, d_z=None, copy=False):
super().__init__(d_rho, d_phi, d_z, copy=copy)
if not self._d_rho.unit.is_equivalent(self._d_z.unit):
raise u.UnitsError("d_rho and d_z should have equivalent units.")
|
36d0da89fd7fcba13154762f7f0de1365686b9deb2ae4369616c6f61c8b741a6 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains a general framework for defining graphs of transformations
between coordinates, suitable for either spatial coordinates or more generalized
coordinate systems.
The fundamental idea is that each class is a node in the transformation graph,
and transitions from one node to another are defined as functions (or methods)
wrapped in transformation objects.
This module also includes more specific transformation classes for
celestial/spatial coordinate frames, generally focused around matrix-style
transformations that are typically how the algorithms are defined.
"""
import heapq
import inspect
import subprocess
from abc import ABCMeta, abstractmethod
from collections import defaultdict
from contextlib import contextmanager, suppress
from inspect import signature
from warnings import warn
import numpy as np
from astropy import units as u
from astropy.utils.exceptions import AstropyWarning
__all__ = [
"TransformGraph",
"CoordinateTransform",
"FunctionTransform",
"BaseAffineTransform",
"AffineTransform",
"StaticMatrixTransform",
"DynamicMatrixTransform",
"FunctionTransformWithFiniteDifference",
"CompositeTransform",
]
def frame_attrs_from_set(frame_set):
"""
A `dict` of all the attributes of all frame classes in this
`~astropy.coordinates.TransformGraph`.
Broken out of the class so this can be called on a temporary frame set to
validate new additions to the transform graph before actually adding them.
"""
result = {}
for frame_cls in frame_set:
result.update(frame_cls.frame_attributes)
return result
def frame_comps_from_set(frame_set):
"""
A `set` of all component names every defined within any frame class in
this `~astropy.coordinates.TransformGraph`.
Broken out of the class so this can be called on a temporary frame set to
validate new additions to the transform graph before actually adding them.
"""
result = set()
for frame_cls in frame_set:
rep_info = frame_cls._frame_specific_representation_info
for mappings in rep_info.values():
for rep_map in mappings:
result.update([rep_map.framename])
return result
class TransformGraph:
"""
A graph representing the paths between coordinate frames.
"""
def __init__(self):
self._graph = defaultdict(dict)
self.invalidate_cache() # generates cache entries
@property
def _cached_names(self):
if self._cached_names_dct is None:
self._cached_names_dct = dct = {}
for c in self.frame_set:
nm = getattr(c, "name", None)
if nm is not None:
if not isinstance(nm, list):
nm = [nm]
for name in nm:
dct[name] = c
return self._cached_names_dct
@property
def frame_set(self):
"""
A `set` of all the frame classes present in this TransformGraph.
"""
if self._cached_frame_set is None:
self._cached_frame_set = set()
for a in self._graph:
self._cached_frame_set.add(a)
for b in self._graph[a]:
self._cached_frame_set.add(b)
return self._cached_frame_set.copy()
@property
def frame_attributes(self):
"""
A `dict` of all the attributes of all frame classes in this TransformGraph.
"""
if self._cached_frame_attributes is None:
self._cached_frame_attributes = frame_attrs_from_set(self.frame_set)
return self._cached_frame_attributes
@property
def frame_component_names(self):
"""
A `set` of all component names every defined within any frame class in
this TransformGraph.
"""
if self._cached_component_names is None:
self._cached_component_names = frame_comps_from_set(self.frame_set)
return self._cached_component_names
def invalidate_cache(self):
"""
Invalidates the cache that stores optimizations for traversing the
transform graph. This is called automatically when transforms
are added or removed, but will need to be called manually if
weights on transforms are modified inplace.
"""
self._cached_names_dct = None
self._cached_frame_set = None
self._cached_frame_attributes = None
self._cached_component_names = None
self._shortestpaths = {}
self._composite_cache = {}
def add_transform(self, fromsys, tosys, transform):
"""Add a new coordinate transformation to the graph.
Parameters
----------
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
transform : `~astropy.coordinates.CoordinateTransform`
The transformation object. Typically a
`~astropy.coordinates.CoordinateTransform` object, although it may
be some other callable that is called with the same signature.
Raises
------
TypeError
If ``fromsys`` or ``tosys`` are not classes or ``transform`` is
not callable.
"""
if not inspect.isclass(fromsys):
raise TypeError("fromsys must be a class")
if not inspect.isclass(tosys):
raise TypeError("tosys must be a class")
if not callable(transform):
raise TypeError("transform must be callable")
frame_set = self.frame_set.copy()
frame_set.add(fromsys)
frame_set.add(tosys)
# Now we check to see if any attributes on the proposed frames override
# *any* component names, which we can't allow for some of the logic in
# the SkyCoord initializer to work
attrs = set(frame_attrs_from_set(frame_set).keys())
comps = frame_comps_from_set(frame_set)
invalid_attrs = attrs.intersection(comps)
if invalid_attrs:
invalid_frames = set()
for attr in invalid_attrs:
if attr in fromsys.frame_attributes:
invalid_frames.update([fromsys])
if attr in tosys.frame_attributes:
invalid_frames.update([tosys])
raise ValueError(
f"Frame(s) {list(invalid_frames)} contain invalid attribute names:"
f" {invalid_attrs}\nFrame attributes can not conflict with *any* of"
" the frame data component names (see"
" `frame_transform_graph.frame_component_names`)."
)
self._graph[fromsys][tosys] = transform
self.invalidate_cache()
def remove_transform(self, fromsys, tosys, transform):
"""
Removes a coordinate transform from the graph.
Parameters
----------
fromsys : class or None
The coordinate frame *class* to start from. If `None`,
``transform`` will be searched for and removed (``tosys`` must
also be `None`).
tosys : class or None
The coordinate frame *class* to transform into. If `None`,
``transform`` will be searched for and removed (``fromsys`` must
also be `None`).
transform : callable or None
The transformation object to be removed or `None`. If `None`
and ``tosys`` and ``fromsys`` are supplied, there will be no
check to ensure the correct object is removed.
"""
if fromsys is None or tosys is None:
if not (tosys is None and fromsys is None):
raise ValueError("fromsys and tosys must both be None if either are")
if transform is None:
raise ValueError("cannot give all Nones to remove_transform")
# search for the requested transform by brute force and remove it
for a in self._graph:
agraph = self._graph[a]
for b in agraph:
if agraph[b] is transform:
del agraph[b]
fromsys = a
break
# If the transform was found, need to break out of the outer for loop too
if fromsys:
break
else:
raise ValueError(f"Could not find transform {transform} in the graph")
else:
if transform is None:
self._graph[fromsys].pop(tosys, None)
else:
curr = self._graph[fromsys].get(tosys, None)
if curr is transform:
self._graph[fromsys].pop(tosys)
else:
raise ValueError(
f"Current transform from {fromsys} to {tosys} is not"
f" {transform}"
)
# Remove the subgraph if it is now empty
if self._graph[fromsys] == {}:
self._graph.pop(fromsys)
self.invalidate_cache()
def find_shortest_path(self, fromsys, tosys):
"""
Computes the shortest distance along the transform graph from
one system to another.
Parameters
----------
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
Returns
-------
path : list of class or None
The path from ``fromsys`` to ``tosys`` as an in-order sequence
of classes. This list includes *both* ``fromsys`` and
``tosys``. Is `None` if there is no possible path.
distance : float or int
The total distance/priority from ``fromsys`` to ``tosys``. If
priorities are not set this is the number of transforms
needed. Is ``inf`` if there is no possible path.
"""
inf = float("inf")
# special-case the 0 or 1-path
if tosys is fromsys:
if tosys not in self._graph[fromsys]:
# Means there's no transform necessary to go from it to itself.
return [tosys], 0
if tosys in self._graph[fromsys]:
# this will also catch the case where tosys is fromsys, but has
# a defined transform.
t = self._graph[fromsys][tosys]
return [fromsys, tosys], float(t.priority if hasattr(t, "priority") else 1)
# otherwise, need to construct the path:
if fromsys in self._shortestpaths:
# already have a cached result
fpaths = self._shortestpaths[fromsys]
if tosys in fpaths:
return fpaths[tosys]
else:
return None, inf
# use Dijkstra's algorithm to find shortest path in all other cases
nodes = []
# first make the list of nodes
for a in self._graph:
if a not in nodes:
nodes.append(a)
for b in self._graph[a]:
if b not in nodes:
nodes.append(b)
if fromsys not in nodes or tosys not in nodes:
# fromsys or tosys are isolated or not registered, so there's
# certainly no way to get from one to the other
return None, inf
edgeweights = {}
# construct another graph that is a dict of dicts of priorities
# (used as edge weights in Dijkstra's algorithm)
for a in self._graph:
edgeweights[a] = aew = {}
agraph = self._graph[a]
for b in agraph:
aew[b] = float(getattr(agraph[b], "priority", 1))
# entries in q are [distance, count, nodeobj, pathlist]
# count is needed because in py 3.x, tie-breaking fails on the nodes.
# this way, insertion order is preserved if the weights are the same
q = [[inf, i, n, []] for i, n in enumerate(nodes) if n is not fromsys]
q.insert(0, [0, -1, fromsys, []])
# this dict will store the distance to node from ``fromsys`` and the path
result = {}
# definitely starts as a valid heap because of the insert line; from the
# node to itself is always the shortest distance
while len(q) > 0:
d, orderi, n, path = heapq.heappop(q)
if d == inf:
# everything left is unreachable from fromsys, just copy them to
# the results and jump out of the loop
result[n] = (None, d)
for d, orderi, n, path in q:
result[n] = (None, d)
break
else:
result[n] = (path, d)
path.append(n)
if n not in edgeweights:
# this is a system that can be transformed to, but not from.
continue
for n2 in edgeweights[n]:
if n2 not in result: # already visited
# find where n2 is in the heap
for i in range(len(q)):
if q[i][2] == n2:
break
else:
raise ValueError(
"n2 not in heap - this should be impossible!"
)
newd = d + edgeweights[n][n2]
if newd < q[i][0]:
q[i][0] = newd
q[i][3] = list(path)
heapq.heapify(q)
# cache for later use
self._shortestpaths[fromsys] = result
return result[tosys]
def get_transform(self, fromsys, tosys):
"""Generates and returns the CompositeTransform for a transformation
between two coordinate systems.
Parameters
----------
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
Returns
-------
trans : `~astropy.coordinates.CompositeTransform` or None
If there is a path from ``fromsys`` to ``tosys``, this is a
transform object for that path. If no path could be found, this is
`None`.
Notes
-----
A `~astropy.coordinates.CompositeTransform` is always returned, because
`~astropy.coordinates.CompositeTransform` is slightly more adaptable in
the way it can be called than other transform classes. Specifically, it
takes care of intermediate steps of transformations in a way that is
consistent with 1-hop transformations.
"""
if not inspect.isclass(fromsys):
raise TypeError("fromsys is not a class")
if not inspect.isclass(tosys):
raise TypeError("tosys is not a class")
path, distance = self.find_shortest_path(fromsys, tosys)
if path is None:
return None
transforms = []
currsys = fromsys
for p in path[1:]: # first element is fromsys so we skip it
transforms.append(self._graph[currsys][p])
currsys = p
fttuple = (fromsys, tosys)
if fttuple not in self._composite_cache:
comptrans = CompositeTransform(
transforms, fromsys, tosys, register_graph=False
)
self._composite_cache[fttuple] = comptrans
return self._composite_cache[fttuple]
def lookup_name(self, name):
"""
Tries to locate the coordinate class with the provided alias.
Parameters
----------
name : str
The alias to look up.
Returns
-------
`BaseCoordinateFrame` subclass
The coordinate class corresponding to the ``name`` or `None` if
no such class exists.
"""
return self._cached_names.get(name, None)
def get_names(self):
"""
Returns all available transform names. They will all be
valid arguments to `lookup_name`.
Returns
-------
nms : list
The aliases for coordinate systems.
"""
return list(self._cached_names.keys())
def to_dot_graph(
self,
priorities=True,
addnodes=[],
savefn=None,
savelayout="plain",
saveformat=None,
color_edges=True,
):
"""
Converts this transform graph to the graphviz_ DOT format.
Optionally saves it (requires `graphviz`_ be installed and on your path).
.. _graphviz: http://www.graphviz.org/
Parameters
----------
priorities : bool
If `True`, show the priority values for each transform. Otherwise,
the will not be included in the graph.
addnodes : sequence of str
Additional coordinate systems to add (this can include systems
already in the transform graph, but they will only appear once).
savefn : None or str
The file name to save this graph to or `None` to not save
to a file.
savelayout : str
The graphviz program to use to layout the graph (see
graphviz_ for details) or 'plain' to just save the DOT graph
content. Ignored if ``savefn`` is `None`.
saveformat : str
The graphviz output format. (e.g. the ``-Txxx`` option for
the command line program - see graphviz docs for details).
Ignored if ``savefn`` is `None`.
color_edges : bool
Color the edges between two nodes (frames) based on the type of
transform. ``FunctionTransform``: red, ``StaticMatrixTransform``:
blue, ``DynamicMatrixTransform``: green.
Returns
-------
dotgraph : str
A string with the DOT format graph.
"""
nodes = []
# find the node names
for a in self._graph:
if a not in nodes:
nodes.append(a)
for b in self._graph[a]:
if b not in nodes:
nodes.append(b)
for node in addnodes:
if node not in nodes:
nodes.append(node)
nodenames = []
invclsaliases = {
f: [k for k, v in self._cached_names.items() if v == f]
for f in self.frame_set
}
for n in nodes:
if n in invclsaliases:
aliases = "`\\n`".join(invclsaliases[n])
nodenames.append(
'{0} [shape=oval label="{0}\\n`{1}`"]'.format(n.__name__, aliases)
)
else:
nodenames.append(n.__name__ + "[ shape=oval ]")
edgenames = []
# Now the edges
for a in self._graph:
agraph = self._graph[a]
for b in agraph:
transform = agraph[b]
pri = transform.priority if hasattr(transform, "priority") else 1
color = trans_to_color[transform.__class__] if color_edges else "black"
edgenames.append((a.__name__, b.__name__, pri, color))
# generate simple dot format graph
lines = ["digraph AstropyCoordinateTransformGraph {"]
lines.append("graph [rankdir=LR]")
lines.append("; ".join(nodenames) + ";")
for enm1, enm2, weights, color in edgenames:
labelstr_fmt = "[ {0} {1} ]"
if priorities:
priority_part = f'label = "{weights}"'
else:
priority_part = ""
color_part = f'color = "{color}"'
labelstr = labelstr_fmt.format(priority_part, color_part)
lines.append(f"{enm1} -> {enm2}{labelstr};")
lines.append("")
lines.append("overlap=false")
lines.append("}")
dotgraph = "\n".join(lines)
if savefn is not None:
if savelayout == "plain":
with open(savefn, "w") as f:
f.write(dotgraph)
else:
args = [savelayout]
if saveformat is not None:
args.append("-T" + saveformat)
proc = subprocess.Popen(
args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, stderr = proc.communicate(dotgraph)
if proc.returncode != 0:
raise OSError("problem running graphviz: \n" + stderr)
with open(savefn, "w") as f:
f.write(stdout)
return dotgraph
def to_networkx_graph(self):
"""
Converts this transform graph into a networkx graph.
.. note::
You must have the `networkx <https://networkx.github.io/>`_
package installed for this to work.
Returns
-------
nxgraph : ``networkx.Graph``
This `~astropy.coordinates.TransformGraph` as a
`networkx.Graph <https://networkx.github.io/documentation/stable/reference/classes/graph.html>`_.
"""
import networkx as nx
nxgraph = nx.Graph()
# first make the nodes
for a in self._graph:
if a not in nxgraph:
nxgraph.add_node(a)
for b in self._graph[a]:
if b not in nxgraph:
nxgraph.add_node(b)
# Now the edges
for a in self._graph:
agraph = self._graph[a]
for b in agraph:
transform = agraph[b]
pri = transform.priority if hasattr(transform, "priority") else 1
color = trans_to_color[transform.__class__]
nxgraph.add_edge(a, b, weight=pri, color=color)
return nxgraph
def transform(self, transcls, fromsys, tosys, priority=1, **kwargs):
"""A function decorator for defining transformations.
.. note::
If decorating a static method of a class, ``@staticmethod``
should be added *above* this decorator.
Parameters
----------
transcls : class
The class of the transformation object to create.
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
priority : float or int
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
Additional keyword arguments are passed into the ``transcls``
constructor.
Returns
-------
deco : function
A function that can be called on another function as a decorator
(see example).
Notes
-----
This decorator assumes the first argument of the ``transcls``
initializer accepts a callable, and that the second and third are
``fromsys`` and ``tosys``. If this is not true, you should just
initialize the class manually and use
`~astropy.coordinates.TransformGraph.add_transform` instead of this
decorator.
Examples
--------
::
graph = TransformGraph()
class Frame1(BaseCoordinateFrame):
...
class Frame2(BaseCoordinateFrame):
...
@graph.transform(FunctionTransform, Frame1, Frame2)
def f1_to_f2(f1_obj):
... do something with f1_obj ...
return f2_obj
"""
def deco(func):
# this doesn't do anything directly with the transform because
# ``register_graph=self`` stores it in the transform graph
# automatically
transcls(
func, fromsys, tosys, priority=priority, register_graph=self, **kwargs
)
return func
return deco
def _add_merged_transform(self, fromsys, tosys, *furthersys, priority=1):
"""
Add a single-step transform that encapsulates a multi-step transformation path,
using the transforms that already exist in the graph.
The created transform internally calls the existing transforms. If all of the
transforms are affine, the merged transform is
`~astropy.coordinates.DynamicMatrixTransform` (if there are no
origin shifts) or `~astropy.coordinates.AffineTransform`
(otherwise). If at least one of the transforms is not affine, the merged
transform is
`~astropy.coordinates.FunctionTransformWithFiniteDifference`.
This method is primarily useful for defining loopback transformations
(i.e., where ``fromsys`` and the final ``tosys`` are the same).
Parameters
----------
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform to.
*furthersys : class
Additional coordinate frame classes to transform to in order.
priority : number
The priority of this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
Notes
-----
Even though the created transform is a single step in the graph, it
will still internally call the constituent transforms. Thus, there is
no performance benefit for using this created transform.
For Astropy's built-in frames, loopback transformations typically use
`~astropy.coordinates.ICRS` to be safe. Transforming through an inertial
frame ensures that changes in observation time and observer
location/velocity are properly accounted for.
An error will be raised if a direct transform between ``fromsys`` and
``tosys`` already exist.
"""
frames = [fromsys, tosys, *furthersys]
lastsys = frames[-1]
full_path = self.get_transform(fromsys, lastsys)
transforms = [
self.get_transform(frame_a, frame_b)
for frame_a, frame_b in zip(frames[:-1], frames[1:])
]
if None in transforms:
raise ValueError("This transformation path is not possible")
if len(full_path.transforms) == 1:
raise ValueError(
f"A direct transform for {fromsys.__name__}->{lastsys.__name__} already"
" exists"
)
self.add_transform(
fromsys,
lastsys,
CompositeTransform(
transforms, fromsys, lastsys, priority=priority
)._as_single_transform(),
)
@contextmanager
def impose_finite_difference_dt(self, dt):
"""
Context manager to impose a finite-difference time step on all applicable transformations.
For each transformation in this transformation graph that has the attribute
``finite_difference_dt``, that attribute is set to the provided value. The only standard
transformation with this attribute is
`~astropy.coordinates.FunctionTransformWithFiniteDifference`.
Parameters
----------
dt : `~astropy.units.Quantity` ['time'] or callable
If a quantity, this is the size of the differential used to do the finite difference.
If a callable, should accept ``(fromcoord, toframe)`` and return the ``dt`` value.
"""
key = "finite_difference_dt"
saved_settings = []
try:
for to_frames in self._graph.values():
for transform in to_frames.values():
if hasattr(transform, key):
old_setting = (transform, key, getattr(transform, key))
saved_settings.append(old_setting)
setattr(transform, key, dt)
yield
finally:
for setting in saved_settings:
setattr(*setting)
# <-------------------Define the builtin transform classes-------------------->
class CoordinateTransform(metaclass=ABCMeta):
"""
An object that transforms a coordinate from one system to another.
Subclasses must implement `__call__` with the provided signature.
They should also call this superclass's ``__init__`` in their
``__init__``.
Parameters
----------
fromsys : `~astropy.coordinates.BaseCoordinateFrame` subclass
The coordinate frame class to start from.
tosys : `~astropy.coordinates.BaseCoordinateFrame` subclass
The coordinate frame class to transform into.
priority : float or int
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
register_graph : `~astropy.coordinates.TransformGraph` or None
A graph to register this transformation with on creation, or
`None` to leave it unregistered.
"""
def __init__(self, fromsys, tosys, priority=1, register_graph=None):
if not inspect.isclass(fromsys):
raise TypeError("fromsys must be a class")
if not inspect.isclass(tosys):
raise TypeError("tosys must be a class")
self.fromsys = fromsys
self.tosys = tosys
self.priority = float(priority)
if register_graph:
# this will do the type-checking when it adds to the graph
self.register(register_graph)
else:
if not inspect.isclass(fromsys) or not inspect.isclass(tosys):
raise TypeError("fromsys and tosys must be classes")
self.overlapping_frame_attr_names = overlap = []
if hasattr(fromsys, "frame_attributes") and hasattr(tosys, "frame_attributes"):
# the if statement is there so that non-frame things might be usable
# if it makes sense
for from_nm in fromsys.frame_attributes:
if from_nm in tosys.frame_attributes:
overlap.append(from_nm)
def register(self, graph):
"""
Add this transformation to the requested Transformation graph,
replacing anything already connecting these two coordinates.
Parameters
----------
graph : `~astropy.coordinates.TransformGraph` object
The graph to register this transformation with.
"""
graph.add_transform(self.fromsys, self.tosys, self)
def unregister(self, graph):
"""
Remove this transformation from the requested transformation
graph.
Parameters
----------
graph : a TransformGraph object
The graph to unregister this transformation from.
Raises
------
ValueError
If this is not currently in the transform graph.
"""
graph.remove_transform(self.fromsys, self.tosys, self)
@abstractmethod
def __call__(self, fromcoord, toframe):
"""
Does the actual coordinate transformation from the ``fromsys`` class to
the ``tosys`` class.
Parameters
----------
fromcoord : `~astropy.coordinates.BaseCoordinateFrame` subclass instance
An object of class matching ``fromsys`` that is to be transformed.
toframe : object
An object that has the attributes necessary to fully specify the
frame. That is, it must have attributes with names that match the
keys of the dictionary ``tosys.frame_attributes``.
Typically this is of class ``tosys``, but it *might* be
some other class as long as it has the appropriate attributes.
Returns
-------
tocoord : `~astropy.coordinates.BaseCoordinateFrame` subclass instance
The new coordinate after the transform has been applied.
"""
class FunctionTransform(CoordinateTransform):
"""
A coordinate transformation defined by a function that accepts a
coordinate object and returns the transformed coordinate object.
Parameters
----------
func : callable
The transformation function. Should have a call signature
``func(formcoord, toframe)``. Note that, unlike
`CoordinateTransform.__call__`, ``toframe`` is assumed to be of type
``tosys`` for this function.
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
priority : float or int
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
register_graph : `~astropy.coordinates.TransformGraph` or None
A graph to register this transformation with on creation, or
`None` to leave it unregistered.
Raises
------
TypeError
If ``func`` is not callable.
ValueError
If ``func`` cannot accept two arguments.
"""
def __init__(self, func, fromsys, tosys, priority=1, register_graph=None):
if not callable(func):
raise TypeError("func must be callable")
with suppress(TypeError):
sig = signature(func)
kinds = [x.kind for x in sig.parameters.values()]
if (
len(x for x in kinds if x == sig.POSITIONAL_ONLY) != 2
and sig.VAR_POSITIONAL not in kinds
):
raise ValueError("provided function does not accept two arguments")
self.func = func
super().__init__(
fromsys, tosys, priority=priority, register_graph=register_graph
)
def __call__(self, fromcoord, toframe):
res = self.func(fromcoord, toframe)
if not isinstance(res, self.tosys):
raise TypeError(
f"the transformation function yielded {res} but "
f"should have been of type {self.tosys}"
)
if fromcoord.data.differentials and not res.data.differentials:
warn(
"Applied a FunctionTransform to a coordinate frame with "
"differentials, but the FunctionTransform does not handle "
"differentials, so they have been dropped.",
AstropyWarning,
)
return res
class FunctionTransformWithFiniteDifference(FunctionTransform):
r"""Transormation based on functions using finite difference for velocities.
A coordinate transformation that works like a
`~astropy.coordinates.FunctionTransform`, but computes velocity shifts
based on the finite-difference relative to one of the frame attributes.
Note that the transform function should *not* change the differential at
all in this case, as any differentials will be overridden.
When a differential is in the from coordinate, the finite difference
calculation has two components. The first part is simple the existing
differential, but re-orientation (using finite-difference techniques) to
point in the direction the velocity vector has in the *new* frame. The
second component is the "induced" velocity. That is, the velocity
intrinsic to the frame itself, estimated by shifting the frame using the
``finite_difference_frameattr_name`` frame attribute a small amount
(``finite_difference_dt``) in time and re-calculating the position.
Parameters
----------
finite_difference_frameattr_name : str or None
The name of the frame attribute on the frames to use for the finite
difference. Both the to and the from frame will be checked for this
attribute, but only one needs to have it. If None, no velocity
component induced from the frame itself will be included - only the
re-orientation of any existing differential.
finite_difference_dt : `~astropy.units.Quantity` ['time'] or callable
If a quantity, this is the size of the differential used to do the
finite difference. If a callable, should accept
``(fromcoord, toframe)`` and return the ``dt`` value.
symmetric_finite_difference : bool
If True, the finite difference is computed as
:math:`\frac{x(t + \Delta t / 2) - x(t + \Delta t / 2)}{\Delta t}`, or
if False, :math:`\frac{x(t + \Delta t) - x(t)}{\Delta t}`. The latter
case has slightly better performance (and more stable finite difference
behavior).
All other parameters are identical to the initializer for
`~astropy.coordinates.FunctionTransform`.
"""
def __init__(
self,
func,
fromsys,
tosys,
priority=1,
register_graph=None,
finite_difference_frameattr_name="obstime",
finite_difference_dt=1 * u.second,
symmetric_finite_difference=True,
):
super().__init__(func, fromsys, tosys, priority, register_graph)
self.finite_difference_frameattr_name = finite_difference_frameattr_name
self.finite_difference_dt = finite_difference_dt
self.symmetric_finite_difference = symmetric_finite_difference
@property
def finite_difference_frameattr_name(self):
return self._finite_difference_frameattr_name
@finite_difference_frameattr_name.setter
def finite_difference_frameattr_name(self, value):
if value is None:
self._diff_attr_in_fromsys = self._diff_attr_in_tosys = False
else:
diff_attr_in_fromsys = value in self.fromsys.frame_attributes
diff_attr_in_tosys = value in self.tosys.frame_attributes
if diff_attr_in_fromsys or diff_attr_in_tosys:
self._diff_attr_in_fromsys = diff_attr_in_fromsys
self._diff_attr_in_tosys = diff_attr_in_tosys
else:
raise ValueError(
f"Frame attribute name {value} is not a frame attribute of"
f" {self.fromsys} or {self.tosys}"
)
self._finite_difference_frameattr_name = value
def __call__(self, fromcoord, toframe):
from .representation import CartesianDifferential, CartesianRepresentation
supcall = self.func
if fromcoord.data.differentials:
# this is the finite difference case
if callable(self.finite_difference_dt):
dt = self.finite_difference_dt(fromcoord, toframe)
else:
dt = self.finite_difference_dt
halfdt = dt / 2
from_diffless = fromcoord.realize_frame(
fromcoord.data.without_differentials()
)
reprwithoutdiff = supcall(from_diffless, toframe)
# first we use the existing differential to compute an offset due to
# the already-existing velocity, but in the new frame
fromcoord_cart = fromcoord.cartesian
if self.symmetric_finite_difference:
fwdxyz = (
fromcoord_cart.xyz
+ fromcoord_cart.differentials["s"].d_xyz * halfdt
)
fwd = supcall(
fromcoord.realize_frame(CartesianRepresentation(fwdxyz)), toframe
)
backxyz = (
fromcoord_cart.xyz
- fromcoord_cart.differentials["s"].d_xyz * halfdt
)
back = supcall(
fromcoord.realize_frame(CartesianRepresentation(backxyz)), toframe
)
else:
fwdxyz = (
fromcoord_cart.xyz + fromcoord_cart.differentials["s"].d_xyz * dt
)
fwd = supcall(
fromcoord.realize_frame(CartesianRepresentation(fwdxyz)), toframe
)
back = reprwithoutdiff
diffxyz = (fwd.cartesian - back.cartesian).xyz / dt
# now we compute the "induced" velocities due to any movement in
# the frame itself over time
attrname = self.finite_difference_frameattr_name
if attrname is not None:
if self.symmetric_finite_difference:
if self._diff_attr_in_fromsys:
kws = {attrname: getattr(from_diffless, attrname) + halfdt}
from_diffless_fwd = from_diffless.replicate(**kws)
else:
from_diffless_fwd = from_diffless
if self._diff_attr_in_tosys:
kws = {attrname: getattr(toframe, attrname) + halfdt}
fwd_frame = toframe.replicate_without_data(**kws)
else:
fwd_frame = toframe
fwd = supcall(from_diffless_fwd, fwd_frame)
if self._diff_attr_in_fromsys:
kws = {attrname: getattr(from_diffless, attrname) - halfdt}
from_diffless_back = from_diffless.replicate(**kws)
else:
from_diffless_back = from_diffless
if self._diff_attr_in_tosys:
kws = {attrname: getattr(toframe, attrname) - halfdt}
back_frame = toframe.replicate_without_data(**kws)
else:
back_frame = toframe
back = supcall(from_diffless_back, back_frame)
else:
if self._diff_attr_in_fromsys:
kws = {attrname: getattr(from_diffless, attrname) + dt}
from_diffless_fwd = from_diffless.replicate(**kws)
else:
from_diffless_fwd = from_diffless
if self._diff_attr_in_tosys:
kws = {attrname: getattr(toframe, attrname) + dt}
fwd_frame = toframe.replicate_without_data(**kws)
else:
fwd_frame = toframe
fwd = supcall(from_diffless_fwd, fwd_frame)
back = reprwithoutdiff
diffxyz += (fwd.cartesian - back.cartesian).xyz / dt
newdiff = CartesianDifferential(diffxyz)
reprwithdiff = reprwithoutdiff.data.to_cartesian().with_differentials(
newdiff
)
return reprwithoutdiff.realize_frame(reprwithdiff)
else:
return supcall(fromcoord, toframe)
class BaseAffineTransform(CoordinateTransform):
"""Base class for common functionality between the ``AffineTransform``-type
subclasses.
This base class is needed because `~astropy.coordinates.AffineTransform`
and the matrix transform classes share the ``__call__()`` method, but
differ in how they generate the affine parameters.
`~astropy.coordinates.StaticMatrixTransform` passes in a matrix stored as a
class attribute, and both of the matrix transforms pass in ``None`` for the
offset. Hence, user subclasses would likely want to subclass this (rather
than `~astropy.coordinates.AffineTransform`) if they want to provide
alternative transformations using this machinery.
"""
def _apply_transform(self, fromcoord, matrix, offset):
from .representation import (
CartesianDifferential,
RadialDifferential,
SphericalCosLatDifferential,
SphericalDifferential,
UnitSphericalRepresentation,
)
data = fromcoord.data
has_velocity = "s" in data.differentials
# Bail out if no transform is actually requested
if matrix is None and offset is None:
return data
# list of unit differentials
_unit_diffs = (
SphericalDifferential._unit_differential,
SphericalCosLatDifferential._unit_differential,
)
unit_vel_diff = has_velocity and isinstance(
data.differentials["s"], _unit_diffs
)
rad_vel_diff = has_velocity and isinstance(
data.differentials["s"], RadialDifferential
)
# Some initial checking to short-circuit doing any re-representation if
# we're going to fail anyways:
if isinstance(data, UnitSphericalRepresentation) and offset is not None:
raise TypeError(
"Position information stored on coordinate frame "
"is insufficient to do a full-space position "
"transformation (representation class: {data.__class__})"
)
elif (
has_velocity
and (unit_vel_diff or rad_vel_diff)
and offset is not None
and "s" in offset.differentials
):
# Coordinate has a velocity, but it is not a full-space velocity
# that we need to do a velocity offset
raise TypeError(
"Velocity information stored on coordinate frame is insufficient to do"
" a full-space velocity transformation (differential class:"
f" {data.differentials['s'].__class__})"
)
elif len(data.differentials) > 1:
# We should never get here because the frame initializer shouldn't
# allow more differentials, but this just adds protection for
# subclasses that somehow skip the checks
raise ValueError(
"Representation passed to AffineTransform contains multiple associated"
" differentials. Only a single differential with velocity units is"
f" presently supported (differentials: {data.differentials})."
)
# If the representation is a UnitSphericalRepresentation, and this is
# just a MatrixTransform, we have to try to turn the differential into a
# Unit version of the differential (if no radial velocity) or a
# sphericaldifferential with zero proper motion (if only a radial
# velocity) so that the matrix operation works
if (
has_velocity
and isinstance(data, UnitSphericalRepresentation)
and not unit_vel_diff
and not rad_vel_diff
):
# retrieve just velocity differential
unit_diff = data.differentials["s"].represent_as(
data.differentials["s"]._unit_differential, data
)
data = data.with_differentials({"s": unit_diff}) # updates key
# If it's a RadialDifferential, we flat-out ignore the differentials
# This is because, by this point (past the validation above), we can
# only possibly be doing a rotation-only transformation, and that
# won't change the radial differential. We later add it back in
elif rad_vel_diff:
data = data.without_differentials()
# Convert the representation and differentials to cartesian without
# having them attached to a frame
rep = data.to_cartesian()
diffs = {
k: diff.represent_as(CartesianDifferential, data)
for k, diff in data.differentials.items()
}
rep = rep.with_differentials(diffs)
# Only do transform if matrix is specified. This is for speed in
# transformations that only specify an offset (e.g., LSR)
if matrix is not None:
# Note: this applies to both representation and differentials
rep = rep.transform(matrix)
# TODO: if we decide to allow arithmetic between representations that
# contain differentials, this can be tidied up
if offset is not None:
newrep = rep.without_differentials() + offset.without_differentials()
else:
newrep = rep.without_differentials()
# We need a velocity (time derivative) and, for now, are strict: the
# representation can only contain a velocity differential and no others.
if has_velocity and not rad_vel_diff:
veldiff = rep.differentials["s"] # already in Cartesian form
if offset is not None and "s" in offset.differentials:
veldiff = veldiff + offset.differentials["s"]
newrep = newrep.with_differentials({"s": veldiff})
if isinstance(fromcoord.data, UnitSphericalRepresentation):
# Special-case this because otherwise the return object will think
# it has a valid distance with the default return (a
# CartesianRepresentation instance)
if has_velocity and not unit_vel_diff and not rad_vel_diff:
# We have to first represent as the Unit types we converted to,
# then put the d_distance information back in to the
# differentials and re-represent as their original forms
newdiff = newrep.differentials["s"]
_unit_cls = fromcoord.data.differentials["s"]._unit_differential
newdiff = newdiff.represent_as(_unit_cls, newrep)
kwargs = {comp: getattr(newdiff, comp) for comp in newdiff.components}
kwargs["d_distance"] = fromcoord.data.differentials["s"].d_distance
diffs = {
"s": fromcoord.data.differentials["s"].__class__(
copy=False, **kwargs
)
}
elif has_velocity and unit_vel_diff:
newdiff = newrep.differentials["s"].represent_as(
fromcoord.data.differentials["s"].__class__, newrep
)
diffs = {"s": newdiff}
else:
diffs = newrep.differentials
newrep = newrep.represent_as(fromcoord.data.__class__) # drops diffs
newrep = newrep.with_differentials(diffs)
elif has_velocity and unit_vel_diff:
# Here, we're in the case where the representation is not
# UnitSpherical, but the differential *is* one of the UnitSpherical
# types. We have to convert back to that differential class or the
# resulting frame will think it has a valid radial_velocity. This
# can probably be cleaned up: we currently have to go through the
# dimensional version of the differential before representing as the
# unit differential so that the units work out (the distance length
# unit shouldn't appear in the resulting proper motions)
diff_cls = fromcoord.data.differentials["s"].__class__
newrep = newrep.represent_as(
fromcoord.data.__class__, diff_cls._dimensional_differential
)
newrep = newrep.represent_as(fromcoord.data.__class__, diff_cls)
# We pulled the radial differential off of the representation
# earlier, so now we need to put it back. But, in order to do that, we
# have to turn the representation into a repr that is compatible with
# having a RadialDifferential
if has_velocity and rad_vel_diff:
newrep = newrep.represent_as(fromcoord.data.__class__)
newrep = newrep.with_differentials({"s": fromcoord.data.differentials["s"]})
return newrep
def __call__(self, fromcoord, toframe):
params = self._affine_params(fromcoord, toframe)
newrep = self._apply_transform(fromcoord, *params)
return toframe.realize_frame(newrep)
@abstractmethod
def _affine_params(self, fromcoord, toframe):
pass
class AffineTransform(BaseAffineTransform):
"""
A coordinate transformation specified as a function that yields a 3 x 3
cartesian transformation matrix and a tuple of displacement vectors.
See `~astropy.coordinates.Galactocentric` for
an example.
Parameters
----------
transform_func : callable
A callable that has the signature ``transform_func(fromcoord, toframe)``
and returns: a (3, 3) matrix that operates on ``fromcoord`` in a
Cartesian representation, and a ``CartesianRepresentation`` with
(optionally) an attached velocity ``CartesianDifferential`` to represent
a translation and offset in velocity to apply after the matrix
operation.
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
priority : float or int
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
register_graph : `~astropy.coordinates.TransformGraph` or None
A graph to register this transformation with on creation, or
`None` to leave it unregistered.
Raises
------
TypeError
If ``transform_func`` is not callable
"""
def __init__(self, transform_func, fromsys, tosys, priority=1, register_graph=None):
if not callable(transform_func):
raise TypeError("transform_func is not callable")
self.transform_func = transform_func
super().__init__(
fromsys, tosys, priority=priority, register_graph=register_graph
)
def _affine_params(self, fromcoord, toframe):
return self.transform_func(fromcoord, toframe)
class StaticMatrixTransform(BaseAffineTransform):
"""
A coordinate transformation defined as a 3 x 3 cartesian
transformation matrix.
This is distinct from DynamicMatrixTransform in that this kind of matrix is
independent of frame attributes. That is, it depends *only* on the class of
the frame.
Parameters
----------
matrix : array-like or callable
A 3 x 3 matrix for transforming 3-vectors. In most cases will
be unitary (although this is not strictly required). If a callable,
will be called *with no arguments* to get the matrix.
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
priority : float or int
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
register_graph : `~astropy.coordinates.TransformGraph` or None
A graph to register this transformation with on creation, or
`None` to leave it unregistered.
Raises
------
ValueError
If the matrix is not 3 x 3
"""
def __init__(self, matrix, fromsys, tosys, priority=1, register_graph=None):
if callable(matrix):
matrix = matrix()
self.matrix = np.array(matrix)
if self.matrix.shape != (3, 3):
raise ValueError("Provided matrix is not 3 x 3")
super().__init__(
fromsys, tosys, priority=priority, register_graph=register_graph
)
def _affine_params(self, fromcoord, toframe):
return self.matrix, None
class DynamicMatrixTransform(BaseAffineTransform):
"""
A coordinate transformation specified as a function that yields a
3 x 3 cartesian transformation matrix.
This is similar to, but distinct from StaticMatrixTransform, in that the
matrix for this class might depend on frame attributes.
Parameters
----------
matrix_func : callable
A callable that has the signature ``matrix_func(fromcoord, toframe)`` and
returns a 3 x 3 matrix that converts ``fromcoord`` in a cartesian
representation to the new coordinate system.
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
priority : float or int
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
register_graph : `~astropy.coordinates.TransformGraph` or None
A graph to register this transformation with on creation, or
`None` to leave it unregistered.
Raises
------
TypeError
If ``matrix_func`` is not callable
"""
def __init__(self, matrix_func, fromsys, tosys, priority=1, register_graph=None):
if not callable(matrix_func):
raise TypeError("matrix_func is not callable")
self.matrix_func = matrix_func
super().__init__(
fromsys, tosys, priority=priority, register_graph=register_graph
)
def _affine_params(self, fromcoord, toframe):
return self.matrix_func(fromcoord, toframe), None
class CompositeTransform(CoordinateTransform):
"""
A transformation constructed by combining together a series of single-step
transformations.
Note that the intermediate frame objects are constructed using any frame
attributes in ``toframe`` or ``fromframe`` that overlap with the intermediate
frame (``toframe`` favored over ``fromframe`` if there's a conflict). Any frame
attributes that are not present use the defaults.
Parameters
----------
transforms : sequence of `~astropy.coordinates.CoordinateTransform` object
The sequence of transformations to apply.
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
priority : float or int
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
register_graph : `~astropy.coordinates.TransformGraph` or None
A graph to register this transformation with on creation, or
`None` to leave it unregistered.
collapse_static_mats : bool
If `True`, consecutive `~astropy.coordinates.StaticMatrixTransform`
will be collapsed into a single transformation to speed up the
calculation.
"""
def __init__(
self,
transforms,
fromsys,
tosys,
priority=1,
register_graph=None,
collapse_static_mats=True,
):
super().__init__(
fromsys, tosys, priority=priority, register_graph=register_graph
)
if collapse_static_mats:
transforms = self._combine_statics(transforms)
self.transforms = tuple(transforms)
def _combine_statics(self, transforms):
"""
Combines together sequences of StaticMatrixTransform's into a single
transform and returns it.
"""
newtrans = []
for currtrans in transforms:
lasttrans = newtrans[-1] if len(newtrans) > 0 else None
if isinstance(lasttrans, StaticMatrixTransform) and isinstance(
currtrans, StaticMatrixTransform
):
newtrans[-1] = StaticMatrixTransform(
currtrans.matrix @ lasttrans.matrix,
lasttrans.fromsys,
currtrans.tosys,
)
else:
newtrans.append(currtrans)
return newtrans
def __call__(self, fromcoord, toframe):
curr_coord = fromcoord
for t in self.transforms:
# build an intermediate frame with attributes taken from either
# `toframe`, or if not there, `fromcoord`, or if not there, use
# the defaults
# TODO: caching this information when creating the transform may
# speed things up a lot
frattrs = {}
for inter_frame_attr_nm in t.tosys.frame_attributes:
if hasattr(toframe, inter_frame_attr_nm):
attr = getattr(toframe, inter_frame_attr_nm)
frattrs[inter_frame_attr_nm] = attr
elif hasattr(fromcoord, inter_frame_attr_nm):
attr = getattr(fromcoord, inter_frame_attr_nm)
frattrs[inter_frame_attr_nm] = attr
curr_toframe = t.tosys(**frattrs)
curr_coord = t(curr_coord, curr_toframe)
# this is safe even in the case where self.transforms is empty, because
# coordinate objects are immutable, so copying is not needed
return curr_coord
def _as_single_transform(self):
"""
Return an encapsulated version of the composite transform so that it appears to
be a single transform.
The returned transform internally calls the constituent transforms. If all of
the transforms are affine, the merged transform is
`~astropy.coordinates.DynamicMatrixTransform` (if there are no
origin shifts) or `~astropy.coordinates.AffineTransform`
(otherwise). If at least one of the transforms is not affine, the merged
transform is
`~astropy.coordinates.FunctionTransformWithFiniteDifference`.
"""
# Create a list of the transforms including flattening any constituent CompositeTransform
transforms = [
t if not isinstance(t, CompositeTransform) else t._as_single_transform()
for t in self.transforms
]
if all(isinstance(t, BaseAffineTransform) for t in transforms):
# Check if there may be an origin shift
fixed_origin = all(
isinstance(t, (StaticMatrixTransform, DynamicMatrixTransform))
for t in transforms
)
# Dynamically define the transformation function
def single_transform(from_coo, to_frame):
if from_coo.is_equivalent_frame(to_frame): # loopback to the same frame
return None if fixed_origin else (None, None)
# Create a merged attribute dictionary for any intermediate frames
# For any attributes shared by the "from"/"to" frames, the "to" frame takes
# precedence because this is the same choice implemented in __call__()
merged_attr = {
name: getattr(from_coo, name) for name in from_coo.frame_attributes
}
merged_attr.update(
{
name: getattr(to_frame, name)
for name in to_frame.frame_attributes
}
)
affine_params = (None, None)
# Step through each transform step (frame A -> frame B)
for i, t in enumerate(transforms):
# Extract the relevant attributes for frame A
if i == 0:
# If frame A is actually the initial frame, preserve its attributes
a_attr = {
name: getattr(from_coo, name)
for name in from_coo.frame_attributes
}
else:
a_attr = {
k: v
for k, v in merged_attr.items()
if k in t.fromsys.frame_attributes
}
# Extract the relevant attributes for frame B
b_attr = {
k: v
for k, v in merged_attr.items()
if k in t.tosys.frame_attributes
}
# Obtain the affine parameters for the transform
# Note that we insert some dummy data into frame A because the transformation
# machinery requires there to be data present. Removing that limitation
# is a possible TODO, but some care would need to be taken because some affine
# transforms have branching code depending on the presence of differentials.
next_affine_params = t._affine_params(
t.fromsys(from_coo.data, **a_attr), t.tosys(**b_attr)
)
# Combine the affine parameters with the running set
affine_params = _combine_affine_params(
affine_params, next_affine_params
)
# If there is no origin shift, return only the matrix
return affine_params[0] if fixed_origin else affine_params
# The return type depends on whether there is any origin shift
transform_type = DynamicMatrixTransform if fixed_origin else AffineTransform
else:
# Dynamically define the transformation function
def single_transform(from_coo, to_frame):
if from_coo.is_equivalent_frame(to_frame): # loopback to the same frame
return to_frame.realize_frame(from_coo.data)
return self(from_coo, to_frame)
transform_type = FunctionTransformWithFiniteDifference
return transform_type(
single_transform, self.fromsys, self.tosys, priority=self.priority
)
def _combine_affine_params(params, next_params):
"""
Combine two sets of affine parameters.
The parameters for an affine transformation are a 3 x 3 Cartesian
transformation matrix and a displacement vector, which can include an
attached velocity. Either type of parameter can be ``None``.
"""
M, vec = params
next_M, next_vec = next_params
# Multiply the transformation matrices if they both exist
if M is not None and next_M is not None:
new_M = next_M @ M
else:
new_M = M if M is not None else next_M
if vec is not None:
# Transform the first displacement vector by the second transformation matrix
if next_M is not None:
vec = vec.transform(next_M)
# Calculate the new displacement vector
if next_vec is not None:
if "s" in vec.differentials and "s" in next_vec.differentials:
# Adding vectors with velocities takes more steps
# TODO: Add support in representation.py
new_vec_velocity = vec.differentials["s"] + next_vec.differentials["s"]
new_vec = vec.without_differentials() + next_vec.without_differentials()
new_vec = new_vec.with_differentials({"s": new_vec_velocity})
else:
new_vec = vec + next_vec
else:
new_vec = vec
else:
new_vec = next_vec
return new_M, new_vec
# map class names to colorblind-safe colors
trans_to_color = {}
trans_to_color[AffineTransform] = "#555555" # gray
trans_to_color[FunctionTransform] = "#783001" # dark red-ish/brown
trans_to_color[FunctionTransformWithFiniteDifference] = "#d95f02" # red-ish
trans_to_color[StaticMatrixTransform] = "#7570b3" # blue-ish
trans_to_color[DynamicMatrixTransform] = "#1b9e77" # green-ish
|
606b890f74a7174caf7f359a26d5047dcc75f00e86e4789ec8a06d59ae3415a3 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains convenience functions implementing some of the
algorithms contained within Jean Meeus, 'Astronomical Algorithms',
second edition, 1998, Willmann-Bell.
"""
import erfa
import numpy as np
from numpy.polynomial.polynomial import polyval
from astropy import units as u
from astropy.utils import deprecated
from . import ICRS, GeocentricTrueEcliptic, SkyCoord
from .builtin_frames.utils import get_jd12
__all__ = ["calc_moon"]
# Meeus 1998: table 47.A
# D M M' F l r
_MOON_L_R = (
(0, 0, 1, 0, 6288774, -20905355),
(2, 0, -1, 0, 1274027, -3699111),
(2, 0, 0, 0, 658314, -2955968),
(0, 0, 2, 0, 213618, -569925),
(0, 1, 0, 0, -185116, 48888),
(0, 0, 0, 2, -114332, -3149),
(2, 0, -2, 0, 58793, 246158),
(2, -1, -1, 0, 57066, -152138),
(2, 0, 1, 0, 53322, -170733),
(2, -1, 0, 0, 45758, -204586),
(0, 1, -1, 0, -40923, -129620),
(1, 0, 0, 0, -34720, 108743),
(0, 1, 1, 0, -30383, 104755),
(2, 0, 0, -2, 15327, 10321),
(0, 0, 1, 2, -12528, 0),
(0, 0, 1, -2, 10980, 79661),
(4, 0, -1, 0, 10675, -34782),
(0, 0, 3, 0, 10034, -23210),
(4, 0, -2, 0, 8548, -21636),
(2, 1, -1, 0, -7888, 24208),
(2, 1, 0, 0, -6766, 30824),
(1, 0, -1, 0, -5163, -8379),
(1, 1, 0, 0, 4987, -16675),
(2, -1, 1, 0, 4036, -12831),
(2, 0, 2, 0, 3994, -10445),
(4, 0, 0, 0, 3861, -11650),
(2, 0, -3, 0, 3665, 14403),
(0, 1, -2, 0, -2689, -7003),
(2, 0, -1, 2, -2602, 0),
(2, -1, -2, 0, 2390, 10056),
(1, 0, 1, 0, -2348, 6322),
(2, -2, 0, 0, 2236, -9884),
(0, 1, 2, 0, -2120, 5751),
(0, 2, 0, 0, -2069, 0),
(2, -2, -1, 0, 2048, -4950),
(2, 0, 1, -2, -1773, 4130),
(2, 0, 0, 2, -1595, 0),
(4, -1, -1, 0, 1215, -3958),
(0, 0, 2, 2, -1110, 0),
(3, 0, -1, 0, -892, 3258),
(2, 1, 1, 0, -810, 2616),
(4, -1, -2, 0, 759, -1897),
(0, 2, -1, 0, -713, -2117),
(2, 2, -1, 0, -700, 2354),
(2, 1, -2, 0, 691, 0),
(2, -1, 0, -2, 596, 0),
(4, 0, 1, 0, 549, -1423),
(0, 0, 4, 0, 537, -1117),
(4, -1, 0, 0, 520, -1571),
(1, 0, -2, 0, -487, -1739),
(2, 1, 0, -2, -399, 0),
(0, 0, 2, -2, -381, -4421),
(1, 1, 1, 0, 351, 0),
(3, 0, -2, 0, -340, 0),
(4, 0, -3, 0, 330, 0),
(2, -1, 2, 0, 327, 0),
(0, 2, 1, 0, -323, 1165),
(1, 1, -1, 0, 299, 0),
(2, 0, 3, 0, 294, 0),
(2, 0, -1, -2, 0, 8752),
)
# Meeus 1998: table 47.B
# D M M' F b
_MOON_B = (
(0, 0, 0, 1, 5128122),
(0, 0, 1, 1, 280602),
(0, 0, 1, -1, 277693),
(2, 0, 0, -1, 173237),
(2, 0, -1, 1, 55413),
(2, 0, -1, -1, 46271),
(2, 0, 0, 1, 32573),
(0, 0, 2, 1, 17198),
(2, 0, 1, -1, 9266),
(0, 0, 2, -1, 8822),
(2, -1, 0, -1, 8216),
(2, 0, -2, -1, 4324),
(2, 0, 1, 1, 4200),
(2, 1, 0, -1, -3359),
(2, -1, -1, 1, 2463),
(2, -1, 0, 1, 2211),
(2, -1, -1, -1, 2065),
(0, 1, -1, -1, -1870),
(4, 0, -1, -1, 1828),
(0, 1, 0, 1, -1794),
(0, 0, 0, 3, -1749),
(0, 1, -1, 1, -1565),
(1, 0, 0, 1, -1491),
(0, 1, 1, 1, -1475),
(0, 1, 1, -1, -1410),
(0, 1, 0, -1, -1344),
(1, 0, 0, -1, -1335),
(0, 0, 3, 1, 1107),
(4, 0, 0, -1, 1021),
(4, 0, -1, 1, 833),
# second column
(0, 0, 1, -3, 777),
(4, 0, -2, 1, 671),
(2, 0, 0, -3, 607),
(2, 0, 2, -1, 596),
(2, -1, 1, -1, 491),
(2, 0, -2, 1, -451),
(0, 0, 3, -1, 439),
(2, 0, 2, 1, 422),
(2, 0, -3, -1, 421),
(2, 1, -1, 1, -366),
(2, 1, 0, 1, -351),
(4, 0, 0, 1, 331),
(2, -1, 1, 1, 315),
(2, -2, 0, -1, 302),
(0, 0, 1, 3, -283),
(2, 1, 1, -1, -229),
(1, 1, 0, -1, 223),
(1, 1, 0, 1, 223),
(0, 1, -2, -1, -220),
(2, 1, -1, -1, -220),
(1, 0, 1, 1, -185),
(2, -1, -2, -1, 181),
(0, 1, 2, 1, -177),
(4, 0, -2, -1, 176),
(4, -1, -1, -1, 166),
(1, 0, 1, -1, -164),
(4, 0, 1, -1, 132),
(1, 0, -1, -1, -119),
(4, -1, 0, -1, 115),
(2, -2, 0, 1, 107),
)
"""
Coefficients of polynomials for various terms:
Lc : Mean longitude of Moon, w.r.t mean Equinox of date
D : Mean elongation of the Moon
M: Sun's mean anomaly
Mc : Moon's mean anomaly
F : Moon's argument of latitude (mean distance of Moon from its ascending node).
"""
_coLc = (2.18316448e02, 4.81267881e05, -1.57860000e-03, 1.85583502e-06, -1.53388349e-08)
_coD = (2.97850192e02, 4.45267111e05, -1.88190000e-03, 1.83194472e-06, -8.84447000e-09)
_coM = (3.57529109e02, 3.59990503e04, -1.53600000e-04, 4.08329931e-08)
_coMc = (1.34963396e02, 4.77198868e05, 8.74140000e-03, 1.43474081e-05, -6.79717238e-08)
_coF = (9.32720950e01, 4.83202018e05, -3.65390000e-03, -2.83607487e-07, 1.15833246e-09)
_coA1 = (119.75, 131.849)
_coA2 = (53.09, 479264.290)
_coA3 = (313.45, 481266.484)
_coE = (1.0, -0.002516, -0.0000074)
@deprecated(
since="5.0",
alternative="astropy.coordinates.get_body('moon')",
message=(
"The private calc_moon function has been deprecated, as its functionality is"
" now available in ERFA. Note that the coordinate system was not interpreted"
" quite correctly, leading to small inaccuracies. Please use the public"
" get_body() function instead."
),
)
def calc_moon(t):
"""
Lunar position model ELP2000-82 of (Chapront-Touze' and Chapront, 1983, 124, 50).
This is the simplified version of Jean Meeus, Astronomical Algorithms,
second edition, 1998, Willmann-Bell. Meeus claims approximate accuracy of 10"
in longitude and 4" in latitude, with no specified time range.
Tests against JPL ephemerides show accuracy of 10 arcseconds and 50 km over the
date range CE 1950-2050.
Parameters
----------
t : `~astropy.time.Time`
Time of observation.
Returns
-------
skycoord : `~astropy.coordinates.SkyCoord`
ICRS Coordinate for the body
"""
# number of centuries since J2000.0.
# This should strictly speaking be in Ephemeris Time, but TDB or TT
# will introduce error smaller than intrinsic accuracy of algorithm.
T = (t.tdb.jyear - 2000.0) / 100.0
# constants that are needed for all calculations
Lc = u.Quantity(polyval(T, _coLc), u.deg)
D = u.Quantity(polyval(T, _coD), u.deg)
M = u.Quantity(polyval(T, _coM), u.deg)
Mc = u.Quantity(polyval(T, _coMc), u.deg)
F = u.Quantity(polyval(T, _coF), u.deg)
A1 = u.Quantity(polyval(T, _coA1), u.deg)
A2 = u.Quantity(polyval(T, _coA2), u.deg)
A3 = u.Quantity(polyval(T, _coA3), u.deg)
E = polyval(T, _coE)
suml = sumr = 0.0
for DNum, MNum, McNum, FNum, LFac, RFac in _MOON_L_R:
corr = E ** abs(MNum)
suml += LFac * corr * np.sin(D * DNum + M * MNum + Mc * McNum + F * FNum)
sumr += RFac * corr * np.cos(D * DNum + M * MNum + Mc * McNum + F * FNum)
sumb = 0.0
for DNum, MNum, McNum, FNum, BFac in _MOON_B:
corr = E ** abs(MNum)
sumb += BFac * corr * np.sin(D * DNum + M * MNum + Mc * McNum + F * FNum)
suml += 3958 * np.sin(A1) + 1962 * np.sin(Lc - F) + 318 * np.sin(A2)
sumb += (
-2235 * np.sin(Lc)
+ 382 * np.sin(A3)
+ 175 * np.sin(A1 - F)
+ 175 * np.sin(A1 + F)
+ 127 * np.sin(Lc - Mc)
- 115 * np.sin(Lc + Mc)
)
# ensure units
suml = suml * u.microdegree
sumb = sumb * u.microdegree
# nutation of longitude
jd1, jd2 = get_jd12(t, "tt")
nut, _ = erfa.nut06a(jd1, jd2)
nut = nut * u.rad
# calculate ecliptic coordinates
lon = Lc + suml + nut
lat = sumb
dist = (385000.56 + sumr / 1000) * u.km
# Meeus algorithm gives GeocentricTrueEcliptic coordinates
ecliptic_coo = GeocentricTrueEcliptic(lon, lat, distance=dist, obstime=t, equinox=t)
return SkyCoord(ecliptic_coo.transform_to(ICRS()))
|
3c0a7845ce6b4af83204353d8be5797f8bbc55e98f6f1dba3fbd12bcb7bdb449 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import collections
import json
import socket
import urllib.error
import urllib.parse
import urllib.request
from warnings import warn
import erfa
import numpy as np
from astropy import constants as consts
from astropy import units as u
from astropy.units.quantity import QuantityInfoBase
from astropy.utils import data
from astropy.utils.decorators import format_doc
from astropy.utils.exceptions import AstropyUserWarning
from .angles import Angle, Latitude, Longitude
from .errors import UnknownSiteException
from .matrix_utilities import matrix_transpose
from .representation import (
BaseRepresentation,
CartesianDifferential,
CartesianRepresentation,
)
__all__ = [
"EarthLocation",
"BaseGeodeticRepresentation",
"WGS84GeodeticRepresentation",
"WGS72GeodeticRepresentation",
"GRS80GeodeticRepresentation",
]
GeodeticLocation = collections.namedtuple("GeodeticLocation", ["lon", "lat", "height"])
ELLIPSOIDS = {}
"""Available ellipsoids (defined in erfam.h, with numbers exposed in erfa)."""
# Note: they get filled by the creation of the geodetic classes.
OMEGA_EARTH = (1.002_737_811_911_354_48 * u.cycle / u.day).to(
1 / u.s, u.dimensionless_angles()
)
"""
Rotational velocity of Earth, following SOFA's pvtob.
In UT1 seconds, this would be 2 pi / (24 * 3600), but we need the value
in SI seconds, so multiply by the ratio of stellar to solar day.
See Explanatory Supplement to the Astronomical Almanac, ed. P. Kenneth
Seidelmann (1992), University Science Books. The constant is the
conventional, exact one (IERS conventions 2003); see
http://hpiers.obspm.fr/eop-pc/index.php?index=constants.
"""
def _check_ellipsoid(ellipsoid=None, default="WGS84"):
if ellipsoid is None:
ellipsoid = default
if ellipsoid not in ELLIPSOIDS:
raise ValueError(f"Ellipsoid {ellipsoid} not among known ones ({ELLIPSOIDS})")
return ellipsoid
def _get_json_result(url, err_str, use_google):
# need to do this here to prevent a series of complicated circular imports
from .name_resolve import NameResolveError
try:
# Retrieve JSON response from Google maps API
resp = urllib.request.urlopen(url, timeout=data.conf.remote_timeout)
resp_data = json.loads(resp.read().decode("utf8"))
except urllib.error.URLError as e:
# This catches a timeout error, see:
# http://stackoverflow.com/questions/2712524/handling-urllib2s-timeout-python
if isinstance(e.reason, socket.timeout):
raise NameResolveError(err_str.format(msg="connection timed out")) from e
else:
raise NameResolveError(err_str.format(msg=e.reason)) from e
except socket.timeout:
# There are some cases where urllib2 does not catch socket.timeout
# especially while receiving response data on an already previously
# working request
raise NameResolveError(err_str.format(msg="connection timed out"))
if use_google:
results = resp_data.get("results", [])
if resp_data.get("status", None) != "OK":
raise NameResolveError(
err_str.format(msg="unknown failure with Google API")
)
else: # OpenStreetMap returns a list
results = resp_data
if not results:
raise NameResolveError(err_str.format(msg="no results returned"))
return results
class EarthLocationInfo(QuantityInfoBase):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
"""
_represent_as_dict_attrs = ("x", "y", "z", "ellipsoid")
def _construct_from_dict(self, map):
# Need to pop ellipsoid off and update post-instantiation. This is
# on the to-fix list in #4261.
ellipsoid = map.pop("ellipsoid")
out = self._parent_cls(**map)
out.ellipsoid = ellipsoid
return out
def new_like(self, cols, length, metadata_conflicts="warn", name=None):
"""
Return a new EarthLocation instance which is consistent with the
input ``cols`` and has ``length`` rows.
This is intended for creating an empty column object whose elements can
be set in-place for table operations like join or vstack.
Parameters
----------
cols : list
List of input columns
length : int
Length of the output column object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output column name
Returns
-------
col : EarthLocation (or subclass)
Empty instance of this class consistent with ``cols``
"""
# Very similar to QuantityInfo.new_like, but the creation of the
# map is different enough that this needs its own rouinte.
# Get merged info attributes shape, dtype, format, description.
attrs = self.merge_cols_attributes(
cols, metadata_conflicts, name, ("meta", "format", "description")
)
# The above raises an error if the dtypes do not match, but returns
# just the string representation, which is not useful, so remove.
attrs.pop("dtype")
# Make empty EarthLocation using the dtype and unit of the last column.
# Use zeros so we do not get problems for possible conversion to
# geodetic coordinates.
shape = (length,) + attrs.pop("shape")
data = u.Quantity(
np.zeros(shape=shape, dtype=cols[0].dtype), unit=cols[0].unit, copy=False
)
# Get arguments needed to reconstruct class
map = {
key: (data[key] if key in "xyz" else getattr(cols[-1], key))
for key in self._represent_as_dict_attrs
}
out = self._construct_from_dict(map)
# Set remaining info attributes
for attr, value in attrs.items():
setattr(out.info, attr, value)
return out
class EarthLocation(u.Quantity):
"""
Location on the Earth.
Initialization is first attempted assuming geocentric (x, y, z) coordinates
are given; if that fails, another attempt is made assuming geodetic
coordinates (longitude, latitude, height above a reference ellipsoid).
When using the geodetic forms, Longitudes are measured increasing to the
east, so west longitudes are negative. Internally, the coordinates are
stored as geocentric.
To ensure a specific type of coordinates is used, use the corresponding
class methods (`from_geocentric` and `from_geodetic`) or initialize the
arguments with names (``x``, ``y``, ``z`` for geocentric; ``lon``, ``lat``,
``height`` for geodetic). See the class methods for details.
Notes
-----
This class fits into the coordinates transformation framework in that it
encodes a position on the `~astropy.coordinates.ITRS` frame. To get a
proper `~astropy.coordinates.ITRS` object from this object, use the ``itrs``
property.
"""
_ellipsoid = "WGS84"
_location_dtype = np.dtype({"names": ["x", "y", "z"], "formats": [np.float64] * 3})
_array_dtype = np.dtype((np.float64, (3,)))
_site_registry = None
info = EarthLocationInfo()
def __new__(cls, *args, **kwargs):
# TODO: needs copy argument and better dealing with inputs.
if len(args) == 1 and len(kwargs) == 0 and isinstance(args[0], EarthLocation):
return args[0].copy()
try:
self = cls.from_geocentric(*args, **kwargs)
except (u.UnitsError, TypeError) as exc_geocentric:
try:
self = cls.from_geodetic(*args, **kwargs)
except Exception as exc_geodetic:
raise TypeError(
"Coordinates could not be parsed as either "
"geocentric or geodetic, with respective "
f'exceptions "{exc_geocentric}" and "{exc_geodetic}"'
)
return self
@classmethod
def from_geocentric(cls, x, y, z, unit=None):
"""
Location on Earth, initialized from geocentric coordinates.
Parameters
----------
x, y, z : `~astropy.units.Quantity` or array-like
Cartesian coordinates. If not quantities, ``unit`` should be given.
unit : unit-like or None
Physical unit of the coordinate values. If ``x``, ``y``, and/or
``z`` are quantities, they will be converted to this unit.
Raises
------
astropy.units.UnitsError
If the units on ``x``, ``y``, and ``z`` do not match or an invalid
unit is given.
ValueError
If the shapes of ``x``, ``y``, and ``z`` do not match.
TypeError
If ``x`` is not a `~astropy.units.Quantity` and no unit is given.
"""
if unit is None:
try:
unit = x.unit
except AttributeError:
raise TypeError(
"Geocentric coordinates should be Quantities "
"unless an explicit unit is given."
) from None
else:
unit = u.Unit(unit)
if unit.physical_type != "length":
raise u.UnitsError("Geocentric coordinates should be in units of length.")
try:
x = u.Quantity(x, unit, copy=False)
y = u.Quantity(y, unit, copy=False)
z = u.Quantity(z, unit, copy=False)
except u.UnitsError:
raise u.UnitsError("Geocentric coordinate units should all be consistent.")
x, y, z = np.broadcast_arrays(x, y, z)
struc = np.empty(x.shape, cls._location_dtype)
struc["x"], struc["y"], struc["z"] = x, y, z
return super().__new__(cls, struc, unit, copy=False)
@classmethod
def from_geodetic(cls, lon, lat, height=0.0, ellipsoid=None):
"""
Location on Earth, initialized from geodetic coordinates.
Parameters
----------
lon : `~astropy.coordinates.Longitude` or float
Earth East longitude. Can be anything that initialises an
`~astropy.coordinates.Angle` object (if float, in degrees).
lat : `~astropy.coordinates.Latitude` or float
Earth latitude. Can be anything that initialises an
`~astropy.coordinates.Latitude` object (if float, in degrees).
height : `~astropy.units.Quantity` ['length'] or float, optional
Height above reference ellipsoid (if float, in meters; default: 0).
ellipsoid : str, optional
Name of the reference ellipsoid to use (default: 'WGS84').
Available ellipsoids are: 'WGS84', 'GRS80', 'WGS72'.
Raises
------
astropy.units.UnitsError
If the units on ``lon`` and ``lat`` are inconsistent with angular
ones, or that on ``height`` with a length.
ValueError
If ``lon``, ``lat``, and ``height`` do not have the same shape, or
if ``ellipsoid`` is not recognized as among the ones implemented.
Notes
-----
For the conversion to geocentric coordinates, the ERFA routine
``gd2gc`` is used. See https://github.com/liberfa/erfa
"""
ellipsoid = _check_ellipsoid(ellipsoid, default=cls._ellipsoid)
# As wrapping fails on readonly input, we do so manually
lon = Angle(lon, u.degree, copy=False).wrap_at(180 * u.degree)
lat = Latitude(lat, u.degree, copy=False)
# don't convert to m by default, so we can use the height unit below.
if not isinstance(height, u.Quantity):
height = u.Quantity(height, u.m, copy=False)
# get geocentric coordinates.
geodetic = ELLIPSOIDS[ellipsoid](lon, lat, height, copy=False)
xyz = geodetic.to_cartesian().get_xyz(xyz_axis=-1) << height.unit
self = xyz.view(cls._location_dtype, cls).reshape(geodetic.shape)
self._ellipsoid = ellipsoid
return self
@classmethod
def of_site(cls, site_name, *, refresh_cache=False):
"""
Return an object of this class for a known observatory/site by name.
This is intended as a quick convenience function to get basic site
information, not a fully-featured exhaustive registry of observatories
and all their properties.
Additional information about the site is stored in the ``.info.meta``
dictionary of sites obtained using this method (see the examples below).
.. note::
This function is meant to access the site registry from the astropy
data server, which is saved in the user's local cache. If you would
like a site to be added there, issue a pull request to the
`astropy-data repository <https://github.com/astropy/astropy-data>`_ .
If the cache already exists the function will use it even if the
version in the astropy-data repository has been updated unless the
``refresh_cache=True`` option is used. If there is no cache and the
online version cannot be reached, this function falls back on a
built-in list, which currently only contains the Greenwich Royal
Observatory as an example case.
Parameters
----------
site_name : str
Name of the observatory (case-insensitive).
refresh_cache : bool, optional
If `True`, force replacement of the cached registry with a
newly downloaded version. (Default: `False`)
.. versionadded:: 5.3
Returns
-------
site : `~astropy.coordinates.EarthLocation` (or subclass) instance
The location of the observatory. The returned class will be the same
as this class.
Examples
--------
>>> from astropy.coordinates import EarthLocation
>>> keck = EarthLocation.of_site('Keck Observatory') # doctest: +REMOTE_DATA
>>> keck.geodetic # doctest: +REMOTE_DATA +FLOAT_CMP
GeodeticLocation(lon=<Longitude -155.47833333 deg>, lat=<Latitude 19.82833333 deg>, height=<Quantity 4160. m>)
>>> keck.info # doctest: +REMOTE_DATA
name = W. M. Keck Observatory
dtype = (float64, float64, float64)
unit = m
class = EarthLocation
n_bad = 0
>>> keck.info.meta # doctest: +REMOTE_DATA
{'source': 'IRAF Observatory Database', 'timezone': 'US/Hawaii'}
See Also
--------
get_site_names : the list of sites that this function can access
"""
registry = cls._get_site_registry(force_download=refresh_cache)
try:
el = registry[site_name]
except UnknownSiteException as e:
raise UnknownSiteException(
e.site, "EarthLocation.get_site_names", close_names=e.close_names
) from e
if cls is el.__class__:
return el
else:
newel = cls.from_geodetic(*el.to_geodetic())
newel.info.name = el.info.name
return newel
@classmethod
def of_address(cls, address, get_height=False, google_api_key=None):
"""
Return an object of this class for a given address by querying either
the OpenStreetMap Nominatim tool [1]_ (default) or the Google geocoding
API [2]_, which requires a specified API key.
This is intended as a quick convenience function to get easy access to
locations. If you need to specify a precise location, you should use the
initializer directly and pass in a longitude, latitude, and elevation.
In the background, this just issues a web query to either of
the APIs noted above. This is not meant to be abused! Both
OpenStreetMap and Google use IP-based query limiting and will ban your
IP if you send more than a few thousand queries per hour [2]_.
.. warning::
If the query returns more than one location (e.g., searching on
``address='springfield'``), this function will use the **first**
returned location.
Parameters
----------
address : str
The address to get the location for. As per the Google maps API,
this can be a fully specified street address (e.g., 123 Main St.,
New York, NY) or a city name (e.g., Danbury, CT), or etc.
get_height : bool, optional
This only works when using the Google API! See the ``google_api_key``
block below. Use the retrieved location to perform a second query to
the Google maps elevation API to retrieve the height of the input
address [3]_.
google_api_key : str, optional
A Google API key with the Geocoding API and (optionally) the
elevation API enabled. See [4]_ for more information.
Returns
-------
location : `~astropy.coordinates.EarthLocation` (or subclass) instance
The location of the input address.
Will be type(this class)
References
----------
.. [1] https://nominatim.openstreetmap.org/
.. [2] https://developers.google.com/maps/documentation/geocoding/start
.. [3] https://developers.google.com/maps/documentation/elevation/start
.. [4] https://developers.google.com/maps/documentation/geocoding/get-api-key
"""
use_google = google_api_key is not None
# Fail fast if invalid options are passed:
if not use_google and get_height:
raise ValueError(
"Currently, `get_height` only works when using the Google geocoding"
" API, which requires passing a Google API key with `google_api_key`."
" See:"
" https://developers.google.com/maps/documentation/geocoding/get-api-key"
" for information on obtaining an API key."
)
if use_google: # Google
pars = urllib.parse.urlencode({"address": address, "key": google_api_key})
geo_url = f"https://maps.googleapis.com/maps/api/geocode/json?{pars}"
else: # OpenStreetMap
pars = urllib.parse.urlencode({"q": address, "format": "json"})
geo_url = f"https://nominatim.openstreetmap.org/search?{pars}"
# get longitude and latitude location
err_str = f"Unable to retrieve coordinates for address '{address}'; {{msg}}"
geo_result = _get_json_result(geo_url, err_str=err_str, use_google=use_google)
if use_google:
loc = geo_result[0]["geometry"]["location"]
lat = loc["lat"]
lon = loc["lng"]
else:
loc = geo_result[0]
lat = float(loc["lat"]) # strings are returned by OpenStreetMap
lon = float(loc["lon"])
if get_height:
pars = {"locations": f"{lat:.8f},{lon:.8f}", "key": google_api_key}
pars = urllib.parse.urlencode(pars)
ele_url = f"https://maps.googleapis.com/maps/api/elevation/json?{pars}"
err_str = f"Unable to retrieve elevation for address '{address}'; {{msg}}"
ele_result = _get_json_result(
ele_url, err_str=err_str, use_google=use_google
)
height = ele_result[0]["elevation"] * u.meter
else:
height = 0.0
return cls.from_geodetic(lon=lon * u.deg, lat=lat * u.deg, height=height)
@classmethod
def get_site_names(cls, *, refresh_cache=False):
"""
Get list of names of observatories for use with
`~astropy.coordinates.EarthLocation.of_site`.
.. note::
This function is meant to access the site registry from the astropy
data server, which is saved in the user's local cache. If you would
like a site to be added there, issue a pull request to the
`astropy-data repository <https://github.com/astropy/astropy-data>`_ .
If the cache already exists the function will use it even if the
version in the astropy-data repository has been updated unless the
``refresh_cache=True`` option is used. If there is no cache and the
online version cannot be reached, this function falls back on a
built-in list, which currently only contains the Greenwich Royal
Observatory as an example case.
Parameters
----------
refresh_cache : bool, optional
If `True`, force replacement of the cached registry with a
newly downloaded version. (Default: `False`)
.. versionadded:: 5.3
Returns
-------
names : list of str
List of valid observatory names
See Also
--------
of_site : Gets the actual location object for one of the sites names
this returns.
"""
return cls._get_site_registry(force_download=refresh_cache).names
@classmethod
def _get_site_registry(cls, force_download=False, force_builtin=False):
"""
Gets the site registry. The first time this either downloads or loads
from the data file packaged with astropy. Subsequent calls will use the
cached version unless explicitly overridden.
Parameters
----------
force_download : bool or str
If not False, force replacement of the cached registry with a
downloaded version. If a str, that will be used as the URL to
download from (if just True, the default URL will be used).
force_builtin : bool
If True, load from the data file bundled with astropy and set the
cache to that.
Returns
-------
reg : astropy.coordinates.sites.SiteRegistry
"""
# need to do this here at the bottom to avoid circular dependencies
from .sites import get_builtin_sites, get_downloaded_sites
if force_builtin and force_download:
raise ValueError("Cannot have both force_builtin and force_download True")
if force_builtin:
cls._site_registry = get_builtin_sites()
else:
if force_download or not cls._site_registry:
try:
if isinstance(force_download, str):
cls._site_registry = get_downloaded_sites(force_download)
else:
cls._site_registry = get_downloaded_sites()
except OSError:
if force_download:
raise
msg = (
"Could not access the main site list. Falling back on the "
"built-in version, which is rather limited. If you want to "
"retry the download, use the option 'refresh_cache=True'."
)
warn(msg, AstropyUserWarning)
cls._site_registry = get_builtin_sites()
return cls._site_registry
@property
def ellipsoid(self):
"""The default ellipsoid used to convert to geodetic coordinates."""
return self._ellipsoid
@ellipsoid.setter
def ellipsoid(self, ellipsoid):
self._ellipsoid = _check_ellipsoid(ellipsoid)
@property
def geodetic(self):
"""Convert to geodetic coordinates for the default ellipsoid."""
return self.to_geodetic()
def to_geodetic(self, ellipsoid=None):
"""Convert to geodetic coordinates.
Parameters
----------
ellipsoid : str, optional
Reference ellipsoid to use. Default is the one the coordinates
were initialized with. Available are: 'WGS84', 'GRS80', 'WGS72'
Returns
-------
lon, lat, height : `~astropy.units.Quantity`
The tuple is a ``GeodeticLocation`` namedtuple and is comprised of
instances of `~astropy.coordinates.Longitude`,
`~astropy.coordinates.Latitude`, and `~astropy.units.Quantity`.
Raises
------
ValueError
if ``ellipsoid`` is not recognized as among the ones implemented.
Notes
-----
For the conversion to geodetic coordinates, the ERFA routine
``gc2gd`` is used. See https://github.com/liberfa/erfa
"""
ellipsoid = _check_ellipsoid(ellipsoid, default=self.ellipsoid)
xyz = self.view(self._array_dtype, u.Quantity)
llh = CartesianRepresentation(xyz, xyz_axis=-1, copy=False).represent_as(
ELLIPSOIDS[ellipsoid]
)
return GeodeticLocation(
Longitude(llh.lon, u.deg, wrap_angle=180 * u.deg, copy=False),
llh.lat << u.deg,
llh.height << self.unit,
)
@property
def lon(self):
"""Longitude of the location, for the default ellipsoid."""
return self.geodetic[0]
@property
def lat(self):
"""Latitude of the location, for the default ellipsoid."""
return self.geodetic[1]
@property
def height(self):
"""Height of the location, for the default ellipsoid."""
return self.geodetic[2]
# mostly for symmetry with geodetic and to_geodetic.
@property
def geocentric(self):
"""Convert to a tuple with X, Y, and Z as quantities."""
return self.to_geocentric()
def to_geocentric(self):
"""Convert to a tuple with X, Y, and Z as quantities."""
return (self.x, self.y, self.z)
def get_itrs(self, obstime=None, location=None):
"""
Generates an `~astropy.coordinates.ITRS` object with the location of
this object at the requested ``obstime``, either geocentric, or
topocentric relative to a given ``location``.
Parameters
----------
obstime : `~astropy.time.Time` or None
The ``obstime`` to apply to the new `~astropy.coordinates.ITRS`, or
if None, the default ``obstime`` will be used.
location : `~astropy.coordinates.EarthLocation` or None
A possible observer's location, for a topocentric ITRS position.
If not given (default), a geocentric ITRS object will be created.
Returns
-------
itrs : `~astropy.coordinates.ITRS`
The new object in the ITRS frame, either geocentric or topocentric
relative to the given ``location``.
"""
# Broadcast for a single position at multiple times, but don't attempt
# to be more general here.
if obstime and self.size == 1 and obstime.shape:
self = np.broadcast_to(self, obstime.shape, subok=True)
# do this here to prevent a series of complicated circular imports
from .builtin_frames import ITRS
if location is None:
# No location provided, return geocentric ITRS coordinates
return ITRS(x=self.x, y=self.y, z=self.z, obstime=obstime)
else:
return ITRS(
self.x - location.x,
self.y - location.y,
self.z - location.z,
copy=False,
obstime=obstime,
location=location,
)
itrs = property(
get_itrs,
doc="""An `~astropy.coordinates.ITRS` object
for the location of this object at the
default ``obstime``.""",
)
def get_gcrs(self, obstime):
"""GCRS position with velocity at ``obstime`` as a GCRS coordinate.
Parameters
----------
obstime : `~astropy.time.Time`
The ``obstime`` to calculate the GCRS position/velocity at.
Returns
-------
gcrs : `~astropy.coordinates.GCRS` instance
With velocity included.
"""
# do this here to prevent a series of complicated circular imports
from .builtin_frames import GCRS
loc, vel = self.get_gcrs_posvel(obstime)
loc.differentials["s"] = CartesianDifferential.from_cartesian(vel)
return GCRS(loc, obstime=obstime)
def _get_gcrs_posvel(self, obstime, ref_to_itrs, gcrs_to_ref):
"""Calculate GCRS position and velocity given transformation matrices.
The reference frame z axis must point to the Celestial Intermediate Pole
(as is the case for CIRS and TETE).
This private method is used in intermediate_rotation_transforms,
where some of the matrices are already available for the coordinate
transformation.
The method is faster by an order of magnitude than just adding a zero
velocity to ITRS and transforming to GCRS, because it avoids calculating
the velocity via finite differencing of the results of the transformation
at three separate times.
"""
# The simplest route is to transform to the reference frame where the
# z axis is properly aligned with the Earth's rotation axis (CIRS or
# TETE), then calculate the velocity, and then transform this
# reference position and velocity to GCRS. For speed, though, we
# transform the coordinates to GCRS in one step, and calculate the
# velocities by rotating around the earth's axis transformed to GCRS.
ref_to_gcrs = matrix_transpose(gcrs_to_ref)
itrs_to_gcrs = ref_to_gcrs @ matrix_transpose(ref_to_itrs)
# Earth's rotation vector in the ref frame is rot_vec_ref = (0,0,OMEGA_EARTH),
# so in GCRS it is rot_vec_gcrs[..., 2] @ OMEGA_EARTH.
rot_vec_gcrs = CartesianRepresentation(
ref_to_gcrs[..., 2] * OMEGA_EARTH, xyz_axis=-1, copy=False
)
# Get the position in the GCRS frame.
# Since we just need the cartesian representation of ITRS, avoid get_itrs().
itrs_cart = CartesianRepresentation(self.x, self.y, self.z, copy=False)
pos = itrs_cart.transform(itrs_to_gcrs)
vel = rot_vec_gcrs.cross(pos)
return pos, vel
def get_gcrs_posvel(self, obstime):
"""
Calculate the GCRS position and velocity of this object at the
requested ``obstime``.
Parameters
----------
obstime : `~astropy.time.Time`
The ``obstime`` to calculate the GCRS position/velocity at.
Returns
-------
obsgeoloc : `~astropy.coordinates.CartesianRepresentation`
The GCRS position of the object
obsgeovel : `~astropy.coordinates.CartesianRepresentation`
The GCRS velocity of the object
"""
# Local import to prevent circular imports.
from .builtin_frames.intermediate_rotation_transforms import (
cirs_to_itrs_mat,
gcrs_to_cirs_mat,
)
# Get gcrs_posvel by transforming via CIRS (slightly faster than TETE).
return self._get_gcrs_posvel(
obstime, cirs_to_itrs_mat(obstime), gcrs_to_cirs_mat(obstime)
)
def gravitational_redshift(
self, obstime, bodies=["sun", "jupiter", "moon"], masses={}
):
"""Return the gravitational redshift at this EarthLocation.
Calculates the gravitational redshift, of order 3 m/s, due to the
requested solar system bodies.
Parameters
----------
obstime : `~astropy.time.Time`
The ``obstime`` to calculate the redshift at.
bodies : iterable, optional
The bodies (other than the Earth) to include in the redshift
calculation. List elements should be any body name
`get_body_barycentric` accepts. Defaults to Jupiter, the Sun, and
the Moon. Earth is always included (because the class represents
an *Earth* location).
masses : dict[str, `~astropy.units.Quantity`], optional
The mass or gravitational parameters (G * mass) to assume for the
bodies requested in ``bodies``. Can be used to override the
defaults for the Sun, Jupiter, the Moon, and the Earth, or to
pass in masses for other bodies.
Returns
-------
redshift : `~astropy.units.Quantity`
Gravitational redshift in velocity units at given obstime.
"""
# needs to be here to avoid circular imports
from .solar_system import get_body_barycentric
bodies = list(bodies)
# Ensure earth is included and last in the list.
if "earth" in bodies:
bodies.remove("earth")
bodies.append("earth")
_masses = {
"sun": consts.GM_sun,
"jupiter": consts.GM_jup,
"moon": consts.G * 7.34767309e22 * u.kg,
"earth": consts.GM_earth,
}
_masses.update(masses)
GMs = []
M_GM_equivalency = (u.kg, u.Unit(consts.G * u.kg))
for body in bodies:
try:
GMs.append(_masses[body].to(u.m**3 / u.s**2, [M_GM_equivalency]))
except KeyError as err:
raise KeyError(f'body "{body}" does not have a mass.') from err
except u.UnitsError as exc:
exc.args += (
(
'"masses" argument values must be masses or '
"gravitational parameters."
),
)
raise
positions = [get_body_barycentric(name, obstime) for name in bodies]
# Calculate distances to objects other than earth.
distances = [(pos - positions[-1]).norm() for pos in positions[:-1]]
# Append distance from Earth's center for Earth's contribution.
distances.append(CartesianRepresentation(self.geocentric).norm())
# Get redshifts due to all objects.
redshifts = [
-GM / consts.c / distance for (GM, distance) in zip(GMs, distances)
]
# Reverse order of summing, to go from small to big, and to get
# "earth" first, which gives m/s as unit.
return sum(redshifts[::-1])
@property
def x(self):
"""The X component of the geocentric coordinates."""
return self["x"]
@property
def y(self):
"""The Y component of the geocentric coordinates."""
return self["y"]
@property
def z(self):
"""The Z component of the geocentric coordinates."""
return self["z"]
def __getitem__(self, item):
result = super().__getitem__(item)
if result.dtype is self.dtype:
return result.view(self.__class__)
else:
return result.view(u.Quantity)
def __array_finalize__(self, obj):
super().__array_finalize__(obj)
if hasattr(obj, "_ellipsoid"):
self._ellipsoid = obj._ellipsoid
def __len__(self):
if self.shape == ():
raise IndexError("0-d EarthLocation arrays cannot be indexed")
else:
return super().__len__()
def _to_value(self, unit, equivalencies=[]):
"""Helper method for to and to_value."""
# Conversion to another unit in both ``to`` and ``to_value`` goes
# via this routine. To make the regular quantity routines work, we
# temporarily turn the structured array into a regular one.
array_view = self.view(self._array_dtype, np.ndarray)
if equivalencies == []:
equivalencies = self._equivalencies
new_array = self.unit.to(unit, array_view, equivalencies=equivalencies)
return new_array.view(self.dtype).reshape(self.shape)
geodetic_base_doc = """{__doc__}
Parameters
----------
lon, lat : angle-like
The longitude and latitude of the point(s), in angular units. The
latitude should be between -90 and 90 degrees, and the longitude will
be wrapped to an angle between 0 and 360 degrees. These can also be
instances of `~astropy.coordinates.Angle` and either
`~astropy.coordinates.Longitude` not `~astropy.coordinates.Latitude`,
depending on the parameter.
height : `~astropy.units.Quantity` ['length']
The height to the point(s).
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
@format_doc(geodetic_base_doc)
class BaseGeodeticRepresentation(BaseRepresentation):
"""Base geodetic representation."""
attr_classes = {"lon": Longitude, "lat": Latitude, "height": u.Quantity}
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
if "_ellipsoid" in cls.__dict__:
ELLIPSOIDS[cls._ellipsoid] = cls
def __init__(self, lon, lat=None, height=None, copy=True):
if height is None and not isinstance(lon, self.__class__):
height = 0 << u.m
super().__init__(lon, lat, height, copy=copy)
if not self.height.unit.is_equivalent(u.m):
raise u.UnitTypeError(
f"{self.__class__.__name__} requires height with units of length."
)
def to_cartesian(self):
"""
Converts WGS84 geodetic coordinates to 3D rectangular (geocentric)
cartesian coordinates.
"""
xyz = erfa.gd2gc(
getattr(erfa, self._ellipsoid), self.lon, self.lat, self.height
)
return CartesianRepresentation(xyz, xyz_axis=-1, copy=False)
@classmethod
def from_cartesian(cls, cart):
"""
Converts 3D rectangular cartesian coordinates (assumed geocentric) to
WGS84 geodetic coordinates.
"""
lon, lat, height = erfa.gc2gd(
getattr(erfa, cls._ellipsoid), cart.get_xyz(xyz_axis=-1)
)
return cls(lon, lat, height, copy=False)
@format_doc(geodetic_base_doc)
class WGS84GeodeticRepresentation(BaseGeodeticRepresentation):
"""Representation of points in WGS84 3D geodetic coordinates."""
_ellipsoid = "WGS84"
@format_doc(geodetic_base_doc)
class WGS72GeodeticRepresentation(BaseGeodeticRepresentation):
"""Representation of points in WGS72 3D geodetic coordinates."""
_ellipsoid = "WGS72"
@format_doc(geodetic_base_doc)
class GRS80GeodeticRepresentation(BaseGeodeticRepresentation):
"""Representation of points in GRS80 3D geodetic coordinates."""
_ellipsoid = "GRS80"
|
602cc5bfcfd4f808a3106ff210612e46cdf8117517a4ec2bf4d8ca1e5f2fe299 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains the fundamental classes used for representing
coordinates in astropy.
"""
import functools
from collections import namedtuple
import numpy as np
from astropy import units as u
from astropy.utils import isiterable
from . import angle_formats as form
__all__ = ["Angle", "Latitude", "Longitude"]
# these are used by the `hms` and `dms` attributes
hms_tuple = namedtuple("hms_tuple", ("h", "m", "s"))
dms_tuple = namedtuple("dms_tuple", ("d", "m", "s"))
signed_dms_tuple = namedtuple("signed_dms_tuple", ("sign", "d", "m", "s"))
class Angle(u.SpecificTypeQuantity):
"""
One or more angular value(s) with units equivalent to radians or degrees.
An angle can be specified either as an array, scalar, tuple (see
below), string, `~astropy.units.Quantity` or another
:class:`~astropy.coordinates.Angle`.
The input parser is flexible and supports a variety of formats.
The examples below illustrate common ways of initializing an
`~astropy.coordinates.Angle` object. First some imports::
>>> from astropy.coordinates import Angle
>>> from astropy import units as u
The angle values can now be provided::
>>> Angle('10.2345d')
<Angle 10.2345 deg>
>>> Angle(['10.2345d', '-20d'])
<Angle [ 10.2345, -20. ] deg>
>>> Angle('1:2:30.43 degrees')
<Angle 1.04178611 deg>
>>> Angle('1 2 0 hours')
<Angle 1.03333333 hourangle>
>>> Angle(np.arange(1, 8), unit=u.deg)
<Angle [1., 2., 3., 4., 5., 6., 7.] deg>
>>> Angle('1°2′3″')
<Angle 1.03416667 deg>
>>> Angle('1°2′3″N')
<Angle 1.03416667 deg>
>>> Angle('1d2m3.4s')
<Angle 1.03427778 deg>
>>> Angle('1d2m3.4sS')
<Angle -1.03427778 deg>
>>> Angle('-1h2m3s')
<Angle -1.03416667 hourangle>
>>> Angle('-1h2m3sE')
<Angle -1.03416667 hourangle>
>>> Angle('-1h2.5m')
<Angle -1.04166667 hourangle>
>>> Angle('-1h2.5mW')
<Angle 1.04166667 hourangle>
>>> Angle('-1:2.5', unit=u.deg)
<Angle -1.04166667 deg>
>>> Angle(10.2345 * u.deg)
<Angle 10.2345 deg>
>>> Angle(Angle(10.2345 * u.deg))
<Angle 10.2345 deg>
Parameters
----------
angle : `~numpy.array`, scalar, `~astropy.units.Quantity`, `~astropy.coordinates.Angle`
The angle value. If a tuple, will be interpreted as ``(h, m,
s)`` or ``(d, m, s)`` depending on ``unit``. If a string, it
will be interpreted following the rules described above.
If ``angle`` is a sequence or array of strings, the resulting
values will be in the given ``unit``, or if `None` is provided,
the unit will be taken from the first given value.
unit : unit-like, optional
The unit of the value specified for the angle. This may be
any string that `~astropy.units.Unit` understands, but it is
better to give an actual unit object. Must be an angular
unit.
dtype : `~numpy.dtype`, optional
See `~astropy.units.Quantity`.
copy : bool, optional
See `~astropy.units.Quantity`.
Raises
------
`~astropy.units.UnitsError`
If a unit is not provided or it is not an angular unit.
"""
_equivalent_unit = u.radian
_include_easy_conversion_members = True
def __new__(cls, angle, unit=None, dtype=np.inexact, copy=True, **kwargs):
if not isinstance(angle, u.Quantity):
if unit is not None:
unit = cls._convert_unit_to_angle_unit(u.Unit(unit))
if isinstance(angle, tuple):
angle = cls._tuple_to_float(angle, unit)
elif isinstance(angle, str):
angle, angle_unit = form.parse_angle(angle, unit)
if angle_unit is None:
angle_unit = unit
if isinstance(angle, tuple):
if angle_unit == u.hourangle:
form._check_hour_range(angle[0])
form._check_minute_range(angle[1])
a = np.abs(angle[0]) + angle[1] / 60.0
if len(angle) == 3:
form._check_second_range(angle[2])
a += angle[2] / 3600.0
angle = np.copysign(a, angle[0])
if angle_unit is not unit:
# Possible conversion to `unit` will be done below.
angle = u.Quantity(angle, angle_unit, copy=False)
elif isiterable(angle) and not (
isinstance(angle, np.ndarray) and angle.dtype.kind not in "SUVO"
):
angle = [Angle(x, unit, copy=False) for x in angle]
return super().__new__(cls, angle, unit, dtype=dtype, copy=copy, **kwargs)
@staticmethod
def _tuple_to_float(angle, unit):
"""
Converts an angle represented as a 3-tuple or 2-tuple into a floating
point number in the given unit.
"""
# TODO: Numpy array of tuples?
if unit == u.hourangle:
return form.hms_to_hours(*angle)
elif unit == u.degree:
return form.dms_to_degrees(*angle)
else:
raise u.UnitsError(f"Can not parse '{angle}' as unit '{unit}'")
@staticmethod
def _convert_unit_to_angle_unit(unit):
return u.hourangle if unit == u.hour else unit
def _set_unit(self, unit):
super()._set_unit(self._convert_unit_to_angle_unit(unit))
@property
def hour(self):
"""
The angle's value in hours (read-only property).
"""
return self.hourangle
@property
def hms(self):
"""The angle's value in hours, as a named tuple with ``(h, m, s)`` members."""
return hms_tuple(*form.hours_to_hms(self.hourangle))
@property
def dms(self):
"""The angle's value in degrees, as a ``(d, m, s)`` named tuple."""
return dms_tuple(*form.degrees_to_dms(self.degree))
@property
def signed_dms(self):
"""The angle's value in degrees, as a ``(sign, d, m, s)`` named tuple.
The ``d``, ``m``, ``s`` are thus always positive, and the sign of
the angle is given by ``sign``.
This is primarily intended for use with `dms` to generate string
representations of coordinates that are correct for negative angles.
"""
return signed_dms_tuple(
np.sign(self.degree), *form.degrees_to_dms(np.abs(self.degree))
)
def to_string(
self,
unit=None,
decimal=False,
sep="fromunit",
precision=None,
alwayssign=False,
pad=False,
fields=3,
format=None,
):
"""A string representation of the angle.
Parameters
----------
unit : `~astropy.units.UnitBase`, optional
Specifies the unit. Must be an angular unit. If not
provided, the unit used to initialize the angle will be
used.
decimal : bool, optional
If `False`, the returned string will be in sexagesimal form
if possible (for units of degrees or hourangle). If `True`,
a decimal representation will be used. In that case, no unit
will be appended if ``format`` is not explicitly given.
sep : str, optional
The separator between numbers in a sexagesimal
representation. E.g., if it is ':', the result is
``'12:41:11.1241'``. Also accepts 2 or 3 separators. E.g.,
``sep='hms'`` would give the result ``'12h41m11.1241s'``, or
sep='-:' would yield ``'11-21:17.124'``. Alternatively, the
special string 'fromunit' means 'dms' if the unit is
degrees, or 'hms' if the unit is hours.
precision : int, optional
The level of decimal precision. If ``decimal`` is `True`,
this is the raw precision, otherwise it gives the
precision of the last place of the sexagesimal
representation (seconds). If `None`, or not provided, the
number of decimal places is determined by the value, and
will be between 0-8 decimal places as required.
alwayssign : bool, optional
If `True`, include the sign no matter what. If `False`,
only include the sign if it is negative.
pad : bool, optional
If `True`, include leading zeros when needed to ensure a
fixed number of characters for sexagesimal representation.
fields : int, optional
Specifies the number of fields to display when outputting
sexagesimal notation. For example:
- fields == 1: ``'5d'``
- fields == 2: ``'5d45m'``
- fields == 3: ``'5d45m32.5s'``
By default, all fields are displayed.
format : str, optional
The format of the result. If not provided, an unadorned
string is returned. Supported values are:
- 'latex': Return a LaTeX-formatted string
- 'latex_inline': Return a LaTeX-formatted string which is the
same as with ``format='latex'`` for |Angle| instances
- 'unicode': Return a string containing non-ASCII unicode
characters, such as the degree symbol
Returns
-------
strrepr : str or array
A string representation of the angle. If the angle is an array, this
will be an array with a unicode dtype.
"""
if unit is None:
unit = self.unit
else:
unit = self._convert_unit_to_angle_unit(u.Unit(unit))
separators = {
"generic": {u.degree: "dms", u.hourangle: "hms"},
"latex": {
u.degree: [r"^\circ", r"{}^\prime", r"{}^{\prime\prime}"],
u.hourangle: [r"^{\mathrm{h}}", r"^{\mathrm{m}}", r"^{\mathrm{s}}"],
},
"unicode": {u.degree: "°′″", u.hourangle: "ʰᵐˢ"},
}
# 'latex_inline' provides no functionality beyond what 'latex' offers,
# but it should be implemented to avoid ValueErrors in user code.
separators["latex_inline"] = separators["latex"]
# Default separators are as for generic.
separators[None] = separators["generic"]
# Create an iterator so we can format each element of what
# might be an array.
if not decimal and (unit_is_deg := unit == u.degree or unit == u.hourangle):
# Sexagesimal.
if sep == "fromunit":
if format not in separators:
raise ValueError(f"Unknown format '{format}'")
sep = separators[format][unit]
func = functools.partial(
form.degrees_to_string if unit_is_deg else form.hours_to_string,
precision=precision,
sep=sep,
pad=pad,
fields=fields,
)
else:
if sep != "fromunit":
raise ValueError(
f"'{unit}' can not be represented in sexagesimal notation"
)
func = ("{:g}" if precision is None else f"{{0:0.{precision}f}}").format
# Don't add unit by default for decimal.
if not (decimal and format is None):
unit_string = unit.to_string(format=format)
if format == "latex" or format == "latex_inline":
unit_string = unit_string[1:-1]
format_func = func
func = lambda x: format_func(x) + unit_string
def do_format(val):
# Check if value is not nan to avoid ValueErrors when turning it into
# a hexagesimal string.
if not np.isnan(val):
s = func(float(val))
if alwayssign and not s.startswith("-"):
s = "+" + s
if format == "latex" or format == "latex_inline":
s = f"${s}$"
return s
s = f"{val}"
return s
values = self.to_value(unit)
format_ufunc = np.vectorize(do_format, otypes=["U"])
result = format_ufunc(values)
if result.ndim == 0:
result = result[()]
return result
def _wrap_at(self, wrap_angle):
"""
Implementation that assumes ``angle`` is already validated
and that wrapping is inplace.
"""
# Convert the wrap angle and 360 degrees to the native unit of
# this Angle, then do all the math on raw Numpy arrays rather
# than Quantity objects for speed.
a360 = u.degree.to(self.unit, 360.0)
wrap_angle = wrap_angle.to_value(self.unit)
wrap_angle_floor = wrap_angle - a360
self_angle = self.view(np.ndarray)
# Do the wrapping, but only if any angles need to be wrapped
#
# Catch any invalid warnings from the floor division.
with np.errstate(invalid="ignore"):
wraps = (self_angle - wrap_angle_floor) // a360
valid = np.isfinite(wraps) & (wraps != 0)
if np.any(valid):
self_angle -= wraps * a360
# Rounding errors can cause problems.
self_angle[self_angle >= wrap_angle] -= a360
self_angle[self_angle < wrap_angle_floor] += a360
def wrap_at(self, wrap_angle, inplace=False):
"""
Wrap the `~astropy.coordinates.Angle` object at the given ``wrap_angle``.
This method forces all the angle values to be within a contiguous
360 degree range so that ``wrap_angle - 360d <= angle <
wrap_angle``. By default a new Angle object is returned, but if the
``inplace`` argument is `True` then the `~astropy.coordinates.Angle`
object is wrapped in place and nothing is returned.
For instance::
>>> from astropy.coordinates import Angle
>>> import astropy.units as u
>>> a = Angle([-20.0, 150.0, 350.0] * u.deg)
>>> a.wrap_at(360 * u.deg).degree # Wrap into range 0 to 360 degrees # doctest: +FLOAT_CMP
array([340., 150., 350.])
>>> a.wrap_at('180d', inplace=True) # Wrap into range -180 to 180 degrees # doctest: +FLOAT_CMP
>>> a.degree # doctest: +FLOAT_CMP
array([-20., 150., -10.])
Parameters
----------
wrap_angle : angle-like
Specifies a single value for the wrap angle. This can be any
object that can initialize an `~astropy.coordinates.Angle` object,
e.g. ``'180d'``, ``180 * u.deg``, or ``Angle(180, unit=u.deg)``.
inplace : bool
If `True` then wrap the object in place instead of returning
a new `~astropy.coordinates.Angle`
Returns
-------
out : Angle or None
If ``inplace is False`` (default), return new
`~astropy.coordinates.Angle` object with angles wrapped accordingly.
Otherwise wrap in place and return `None`.
"""
wrap_angle = Angle(wrap_angle, copy=False) # Convert to an Angle
if not inplace:
self = self.copy()
self._wrap_at(wrap_angle)
return None if inplace else self
def is_within_bounds(self, lower=None, upper=None):
"""
Check if all angle(s) satisfy ``lower <= angle < upper``.
If ``lower`` is not specified (or `None`) then no lower bounds check is
performed. Likewise ``upper`` can be left unspecified. For example::
>>> from astropy.coordinates import Angle
>>> import astropy.units as u
>>> a = Angle([-20, 150, 350] * u.deg)
>>> a.is_within_bounds('0d', '360d')
False
>>> a.is_within_bounds(None, '360d')
True
>>> a.is_within_bounds(-30 * u.deg, None)
True
Parameters
----------
lower : angle-like or None
Specifies lower bound for checking. This can be any object
that can initialize an `~astropy.coordinates.Angle` object, e.g. ``'180d'``,
``180 * u.deg``, or ``Angle(180, unit=u.deg)``.
upper : angle-like or None
Specifies upper bound for checking. This can be any object
that can initialize an `~astropy.coordinates.Angle` object, e.g. ``'180d'``,
``180 * u.deg``, or ``Angle(180, unit=u.deg)``.
Returns
-------
is_within_bounds : bool
`True` if all angles satisfy ``lower <= angle < upper``
"""
ok = True
if lower is not None:
ok &= np.all(Angle(lower) <= self)
if ok and upper is not None:
ok &= np.all(self < Angle(upper))
return bool(ok)
def _str_helper(self, format=None):
if self.isscalar:
return self.to_string(format=format)
def formatter(x):
return x.to_string(format=format)
return np.array2string(self, formatter={"all": formatter})
def __str__(self):
return self._str_helper()
def _repr_latex_(self):
return self._str_helper(format="latex")
def _no_angle_subclass(obj):
"""Return any Angle subclass objects as an Angle objects.
This is used to ensure that Latitude and Longitude change to Angle
objects when they are used in calculations (such as lon/2.)
"""
if isinstance(obj, tuple):
return tuple(_no_angle_subclass(_obj) for _obj in obj)
return obj.view(Angle) if isinstance(obj, (Latitude, Longitude)) else obj
class Latitude(Angle):
"""
Latitude-like angle(s) which must be in the range -90 to +90 deg.
A Latitude object is distinguished from a pure
:class:`~astropy.coordinates.Angle` by virtue of being constrained
so that::
-90.0 * u.deg <= angle(s) <= +90.0 * u.deg
Any attempt to set a value outside that range will result in a
`ValueError`.
The input angle(s) can be specified either as an array, list,
scalar, tuple (see below), string,
:class:`~astropy.units.Quantity` or another
:class:`~astropy.coordinates.Angle`.
The input parser is flexible and supports all of the input formats
supported by :class:`~astropy.coordinates.Angle`.
Parameters
----------
angle : array, list, scalar, `~astropy.units.Quantity`, `~astropy.coordinates.Angle`
The angle value(s). If a tuple, will be interpreted as ``(h, m, s)``
or ``(d, m, s)`` depending on ``unit``. If a string, it will be
interpreted following the rules described for
:class:`~astropy.coordinates.Angle`.
If ``angle`` is a sequence or array of strings, the resulting
values will be in the given ``unit``, or if `None` is provided,
the unit will be taken from the first given value.
unit : unit-like, optional
The unit of the value specified for the angle. This may be
any string that `~astropy.units.Unit` understands, but it is
better to give an actual unit object. Must be an angular
unit.
Raises
------
`~astropy.units.UnitsError`
If a unit is not provided or it is not an angular unit.
`TypeError`
If the angle parameter is an instance of :class:`~astropy.coordinates.Longitude`.
"""
def __new__(cls, angle, unit=None, **kwargs):
# Forbid creating a Lat from a Long.
if isinstance(angle, Longitude):
raise TypeError("A Latitude angle cannot be created from a Longitude angle")
self = super().__new__(cls, angle, unit=unit, **kwargs)
self._validate_angles()
return self
def _validate_angles(self, angles=None):
"""Check that angles are between -90 and 90 degrees.
If not given, the check is done on the object itself.
"""
# Convert the lower and upper bounds to the "native" unit of
# this angle. This limits multiplication to two values,
# rather than the N values in `self.value`. Also, the
# comparison is performed on raw arrays, rather than Quantity
# objects, for speed.
if angles is None:
angles = self
# For speed, compare using "is", which is not strictly guaranteed to hold,
# but if it doesn't we'll just convert correctly in the 'else' clause.
if angles.unit is u.deg:
limit = 90
elif angles.unit is u.rad:
limit = 0.5 * np.pi
else:
limit = u.degree.to(angles.unit, 90.0)
# Ensure ndim>=1 so that comparison is done using the angle dtype.
# Otherwise, e.g., np.array(np.pi/2, 'f4') > np.pi/2 will yield True.
# (This feels like a bug -- see https://github.com/numpy/numpy/issues/23247)
# Note that we should avoid using `angles.dtype` directly since for
# structured arrays like Distribution this will be `void`.
angles_view = angles.view(np.ndarray)[np.newaxis]
invalid_angles = np.any(angles_view < -limit) or np.any(angles_view > limit)
if invalid_angles:
raise ValueError(
"Latitude angle(s) must be within -90 deg <= angle <= 90 deg, "
f"got {angles.to(u.degree)}"
)
def __setitem__(self, item, value):
# Forbid assigning a Long to a Lat.
if isinstance(value, Longitude):
raise TypeError("A Longitude angle cannot be assigned to a Latitude angle")
# first check bounds
if value is not np.ma.masked:
self._validate_angles(value)
super().__setitem__(item, value)
# Any calculation should drop to Angle
def __array_ufunc__(self, *args, **kwargs):
results = super().__array_ufunc__(*args, **kwargs)
return _no_angle_subclass(results)
class LongitudeInfo(u.QuantityInfo):
_represent_as_dict_attrs = u.QuantityInfo._represent_as_dict_attrs + ("wrap_angle",)
class Longitude(Angle):
"""
Longitude-like angle(s) which are wrapped within a contiguous 360 degree range.
A ``Longitude`` object is distinguished from a pure
:class:`~astropy.coordinates.Angle` by virtue of a ``wrap_angle``
property. The ``wrap_angle`` specifies that all angle values
represented by the object will be in the range::
wrap_angle - 360 * u.deg <= angle(s) < wrap_angle
The default ``wrap_angle`` is 360 deg. Setting ``wrap_angle=180 *
u.deg`` would instead result in values between -180 and +180 deg.
Setting the ``wrap_angle`` attribute of an existing ``Longitude``
object will result in re-wrapping the angle values in-place.
The input angle(s) can be specified either as an array, list,
scalar, tuple, string, :class:`~astropy.units.Quantity`
or another :class:`~astropy.coordinates.Angle`.
The input parser is flexible and supports all of the input formats
supported by :class:`~astropy.coordinates.Angle`.
Parameters
----------
angle : tuple or angle-like
The angle value(s). If a tuple, will be interpreted as ``(h, m s)`` or
``(d, m, s)`` depending on ``unit``. If a string, it will be interpreted
following the rules described for :class:`~astropy.coordinates.Angle`.
If ``angle`` is a sequence or array of strings, the resulting
values will be in the given ``unit``, or if `None` is provided,
the unit will be taken from the first given value.
unit : unit-like ['angle'], optional
The unit of the value specified for the angle. This may be
any string that `~astropy.units.Unit` understands, but it is
better to give an actual unit object. Must be an angular
unit.
wrap_angle : angle-like or None, optional
Angle at which to wrap back to ``wrap_angle - 360 deg``.
If ``None`` (default), it will be taken to be 360 deg unless ``angle``
has a ``wrap_angle`` attribute already (i.e., is a ``Longitude``),
in which case it will be taken from there.
Raises
------
`~astropy.units.UnitsError`
If a unit is not provided or it is not an angular unit.
`TypeError`
If the angle parameter is an instance of :class:`~astropy.coordinates.Latitude`.
"""
_wrap_angle = None
_default_wrap_angle = Angle(360 * u.deg)
info = LongitudeInfo()
def __new__(cls, angle, unit=None, wrap_angle=None, **kwargs):
# Forbid creating a Long from a Lat.
if isinstance(angle, Latitude):
raise TypeError(
"A Longitude angle cannot be created from a Latitude angle."
)
self = super().__new__(cls, angle, unit=unit, **kwargs)
if wrap_angle is None:
wrap_angle = getattr(angle, "wrap_angle", self._default_wrap_angle)
self.wrap_angle = wrap_angle # angle-like b/c property setter
return self
def __setitem__(self, item, value):
# Forbid assigning a Lat to a Long.
if isinstance(value, Latitude):
raise TypeError("A Latitude angle cannot be assigned to a Longitude angle")
super().__setitem__(item, value)
self._wrap_at(self.wrap_angle)
@property
def wrap_angle(self):
return self._wrap_angle
@wrap_angle.setter
def wrap_angle(self, value):
self._wrap_angle = Angle(value, copy=False)
self._wrap_at(self.wrap_angle)
def __array_finalize__(self, obj):
super().__array_finalize__(obj)
self._wrap_angle = getattr(obj, "_wrap_angle", self._default_wrap_angle)
# Any calculation should drop to Angle
def __array_ufunc__(self, *args, **kwargs):
results = super().__array_ufunc__(*args, **kwargs)
return _no_angle_subclass(results)
|
3e3285e5a6fd88ed33f74255eacdee6e2a77cd33cfee40a7bf8a55d8c61808f4 | """Implements the wrapper for the Astropy test runner.
This is for backward-compatibility for other downstream packages and can be removed
once astropy-helpers has reached end-of-life.
"""
import os
import shutil
import stat
import subprocess
import sys
import tempfile
from contextlib import contextmanager
from setuptools import Command
from astropy.logger import log
@contextmanager
def _suppress_stdout():
"""
A context manager to temporarily disable stdout.
Used later when installing a temporary copy of astropy to avoid a
very verbose output.
"""
with open(os.devnull, "w") as devnull:
old_stdout = sys.stdout
sys.stdout = devnull
try:
yield
finally:
sys.stdout = old_stdout
class FixRemoteDataOption(type):
"""
This metaclass is used to catch cases where the user is running the tests
with --remote-data. We've now changed the --remote-data option so that it
takes arguments, but we still want --remote-data to work as before and to
enable all remote tests. With this metaclass, we can modify sys.argv
before setuptools try to parse the command-line options.
"""
def __init__(cls, name, bases, dct):
try:
idx = sys.argv.index("--remote-data")
except ValueError:
pass
else:
sys.argv[idx] = "--remote-data=any"
try:
idx = sys.argv.index("-R")
except ValueError:
pass
else:
sys.argv[idx] = "-R=any"
return super().__init__(name, bases, dct)
class AstropyTest(Command, metaclass=FixRemoteDataOption):
description = "Run the tests for this package"
user_options = [
(
"package=",
"P",
"The name of a specific package to test, e.g. 'io.fits' or 'utils'. "
"Accepts comma separated string to specify multiple packages. "
"If nothing is specified, all default tests are run.",
),
(
"test-path=",
"t",
"Specify a test location by path. If a relative path to a .py file, "
'it is relative to the built package, so e.g., a leading "astropy/" '
"is necessary. If a relative path to a .rst file, it is relative to "
"the directory *below* the --docs-path directory, so a leading "
'"docs/" is usually necessary. May also be an absolute path.',
),
("verbose-results", "V", "Turn on verbose output from pytest."),
("plugins=", "p", "Plugins to enable when running pytest."),
("pastebin=", "b", "Enable pytest pastebin output. Either 'all' or 'failed'."),
("args=", "a", "Additional arguments to be passed to pytest."),
(
"remote-data=",
"R",
"Run tests that download remote data. Should be "
"one of none/astropy/any (defaults to none).",
),
(
"pep8",
"8",
"Enable PEP8 checking and disable regular tests. "
"Requires the pytest-pep8 plugin.",
),
("pdb", "d", "Start the interactive Python debugger on errors."),
("coverage", "c", "Create a coverage report. Requires the coverage package."),
(
"parallel=",
"j",
"Run the tests in parallel on the specified number of "
'CPUs. If "auto", all the cores on the machine will be '
"used. Requires the pytest-xdist plugin.",
),
(
"docs-path=",
None,
"The path to the documentation .rst files. If not provided, and "
'the current directory contains a directory called "docs", that '
"will be used.",
),
("skip-docs", None, "Don't test the documentation .rst files."),
(
"repeat=",
None,
"How many times to repeat each test (can be used to check for "
"sporadic failures).",
),
(
"temp-root=",
None,
"The root directory in which to create the temporary testing files. "
"If unspecified the system default is used (e.g. /tmp) as explained "
"in the documentation for tempfile.mkstemp.",
),
(
"verbose-install",
None,
"Turn on terminal output from the installation of astropy in a "
"temporary folder.",
),
("readonly", None, "Make the temporary installation being tested read-only."),
]
package_name = ""
def initialize_options(self):
self.package = None
self.test_path = None
self.verbose_results = False
self.plugins = None
self.pastebin = None
self.args = None
self.remote_data = "none"
self.pep8 = False
self.pdb = False
self.coverage = False
self.parallel = 0
self.docs_path = None
self.skip_docs = False
self.repeat = None
self.temp_root = None
self.verbose_install = False
self.readonly = False
def finalize_options(self):
# Normally we would validate the options here, but that's handled in
# run_tests
pass
def generate_testing_command(self):
"""
Build a Python script to run the tests.
"""
cmd_pre = "" # Commands to run before the test function
cmd_post = "" # Commands to run after the test function
if self.coverage:
pre, post = self._generate_coverage_commands()
cmd_pre += pre
cmd_post += post
set_flag = "import builtins; builtins._ASTROPY_TEST_ = True"
cmd = ( # see _build_temp_install below
"{cmd_pre}{0}; import {1.package_name}, sys; result = ("
"{1.package_name}.test("
"package={1.package!r}, "
"test_path={1.test_path!r}, "
"args={1.args!r}, "
"plugins={1.plugins!r}, "
"verbose={1.verbose_results!r}, "
"pastebin={1.pastebin!r}, "
"remote_data={1.remote_data!r}, "
"pep8={1.pep8!r}, "
"pdb={1.pdb!r}, "
"parallel={1.parallel!r}, "
"docs_path={1.docs_path!r}, "
"skip_docs={1.skip_docs!r}, "
"add_local_eggs_to_path=True, "
"repeat={1.repeat!r})); "
"{cmd_post}"
"sys.exit(result)"
)
return cmd.format(set_flag, self, cmd_pre=cmd_pre, cmd_post=cmd_post)
def run(self):
"""Run the tests!"""
# Install the runtime dependencies.
if self.distribution.install_requires:
self.distribution.fetch_build_eggs(self.distribution.install_requires)
# Ensure there is a doc path
if self.docs_path is None:
cfg_docs_dir = self.distribution.get_option_dict("build_docs").get(
"source_dir", None
)
# Some affiliated packages use this.
# See astropy/package-template#157
if cfg_docs_dir is not None and os.path.exists(cfg_docs_dir[1]):
self.docs_path = os.path.abspath(cfg_docs_dir[1])
# fall back on a default path of "docs"
elif os.path.exists("docs"): # pragma: no cover
self.docs_path = os.path.abspath("docs")
# Build a testing install of the package
self._build_temp_install()
# Install the test dependencies
# NOTE: we do this here after _build_temp_install because there is
# a weird but which occurs if psutil is installed in this way before
# astropy is built, Cython can have segmentation fault. Strange, eh?
if self.distribution.tests_require:
self.distribution.fetch_build_eggs(self.distribution.tests_require)
# Copy any additional dependencies that may have been installed via
# tests_requires or install_requires. We then pass the
# add_local_eggs_to_path=True option to package.test() to make sure the
# eggs get included in the path.
if os.path.exists(".eggs"):
shutil.copytree(".eggs", os.path.join(self.testing_path, ".eggs"))
# This option exists so that we can make sure that the tests don't
# write to an installed location.
if self.readonly:
log.info("changing permissions of temporary installation to read-only")
self._change_permissions_testing_path(writable=False)
# Run everything in a try: finally: so that the tmp dir gets deleted.
try:
# Construct this modules testing command
cmd = self.generate_testing_command()
# Run the tests in a subprocess--this is necessary since
# new extension modules may have appeared, and this is the
# easiest way to set up a new environment
testproc = subprocess.Popen(
[sys.executable, "-c", cmd], cwd=self.testing_path, close_fds=False
)
retcode = testproc.wait()
except KeyboardInterrupt:
import signal
# If a keyboard interrupt is handled, pass it to the test
# subprocess to prompt pytest to initiate its teardown
testproc.send_signal(signal.SIGINT)
retcode = testproc.wait()
finally:
# Remove temporary directory
if self.readonly:
self._change_permissions_testing_path(writable=True)
shutil.rmtree(self.tmp_dir)
raise SystemExit(retcode)
def _build_temp_install(self):
"""
Install the package and to a temporary directory for the purposes of
testing. This allows us to test the install command, include the
entry points, and also avoids creating pyc and __pycache__ directories
inside the build directory.
"""
# On OSX the default path for temp files is under /var, but in most
# cases on OSX /var is actually a symlink to /private/var; ensure we
# dereference that link, because pytest is very sensitive to relative
# paths...
tmp_dir = tempfile.mkdtemp(
prefix=self.package_name + "-test-", dir=self.temp_root
)
self.tmp_dir = os.path.realpath(tmp_dir)
log.info(f"installing to temporary directory: {self.tmp_dir}")
# We now install the package to the temporary directory. We do this
# rather than build and copy because this will ensure that e.g. entry
# points work.
self.reinitialize_command("install")
install_cmd = self.distribution.get_command_obj("install")
install_cmd.prefix = self.tmp_dir
if self.verbose_install:
self.run_command("install")
else:
with _suppress_stdout():
self.run_command("install")
# We now get the path to the site-packages directory that was created
# inside self.tmp_dir
install_cmd = self.get_finalized_command("install")
self.testing_path = install_cmd.install_lib
# Ideally, docs_path is set properly in run(), but if it is still
# not set here, do not pretend it is, otherwise bad things happen.
# See astropy/package-template#157
if self.docs_path is not None:
new_docs_path = os.path.join(
self.testing_path, os.path.basename(self.docs_path)
)
shutil.copytree(self.docs_path, new_docs_path)
self.docs_path = new_docs_path
shutil.copy("setup.cfg", self.testing_path)
def _change_permissions_testing_path(self, writable=False):
if writable:
basic_flags = stat.S_IRUSR | stat.S_IWUSR
else:
basic_flags = stat.S_IRUSR
for root, dirs, files in os.walk(self.testing_path):
for dirname in dirs:
os.chmod(os.path.join(root, dirname), basic_flags | stat.S_IXUSR)
for filename in files:
os.chmod(os.path.join(root, filename), basic_flags)
def _generate_coverage_commands(self):
"""
This method creates the post and pre commands if coverage is to be
generated.
"""
if self.parallel != 0:
raise ValueError("--coverage can not be used with --parallel")
try:
import coverage # noqa: F401
except ImportError:
raise ImportError(
"--coverage requires that the coverage package is installed."
)
# Don't use get_pkg_data_filename here, because it
# requires importing astropy.config and thus screwing
# up coverage results for those packages.
coveragerc = os.path.join(
self.testing_path,
self.package_name.replace(".", "/"),
"tests",
"coveragerc",
)
with open(coveragerc) as fd:
coveragerc_content = fd.read()
coveragerc_content = coveragerc_content.replace(
"{packagename}", self.package_name.replace(".", "/")
)
tmp_coveragerc = os.path.join(self.tmp_dir, "coveragerc")
with open(tmp_coveragerc, "wb") as tmp:
tmp.write(coveragerc_content.encode("utf-8"))
cmd_pre = (
"import coverage; cov ="
f' coverage.coverage(data_file=r"{os.path.abspath(".coverage")}",'
f' config_file=r"{os.path.abspath(tmp_coveragerc)}"); cov.start();'
)
cmd_post = (
"cov.stop(); from astropy.tests.helper import _save_coverage;"
f' _save_coverage(cov, result, r"{os.path.abspath(".")}",'
f' r"{os.path.abspath(self.testing_path)}");'
)
return cmd_pre, cmd_post
|
1c8e12abb993f7e75ff203f093ae81357585e9a45ac8f79afb34ef1a2495389a | """Implements the Astropy TestRunner which is a thin wrapper around pytest."""
import copy
import glob
import inspect
import os
import shlex
import sys
import tempfile
import warnings
from collections import OrderedDict
from functools import wraps
from importlib.util import find_spec
from astropy.config.paths import set_temp_cache, set_temp_config
from astropy.utils import find_current_module
from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyWarning
__all__ = ["TestRunner", "TestRunnerBase", "keyword"]
class keyword:
"""
A decorator to mark a method as keyword argument for the ``TestRunner``.
Parameters
----------
default_value : `object`
The default value for the keyword argument. (Default: `None`)
priority : `int`
keyword argument methods are executed in order of descending priority.
"""
def __init__(self, default_value=None, priority=0):
self.default_value = default_value
self.priority = priority
def __call__(self, f):
def keyword(*args, **kwargs):
return f(*args, **kwargs)
keyword._default_value = self.default_value
keyword._priority = self.priority
# Set __doc__ explicitly here rather than using wraps because we want
# to keep the function name as keyword so we can inspect it later.
keyword.__doc__ = f.__doc__
return keyword
class TestRunnerBase:
"""
The base class for the TestRunner.
A test runner can be constructed by creating a subclass of this class and
defining 'keyword' methods. These are methods that have the
:class:`~astropy.tests.runner.keyword` decorator, these methods are used to
construct allowed keyword arguments to the
`~astropy.tests.runner.TestRunnerBase.run_tests` method as a way to allow
customization of individual keyword arguments (and associated logic)
without having to re-implement the whole
`~astropy.tests.runner.TestRunnerBase.run_tests` method.
Examples
--------
A simple keyword method::
class MyRunner(TestRunnerBase):
@keyword('default_value'):
def spam(self, spam, kwargs):
\"\"\"
spam : `str`
The parameter description for the run_tests docstring.
\"\"\"
# Return value must be a list with a CLI parameter for pytest.
return ['--spam={}'.format(spam)]
"""
def __init__(self, base_path):
self.base_path = os.path.abspath(base_path)
def __new__(cls, *args, **kwargs):
# Before constructing the class parse all the methods that have been
# decorated with ``keyword``.
# The objective of this method is to construct a default set of keyword
# arguments to the ``run_tests`` method. It does this by inspecting the
# methods of the class for functions with the name ``keyword`` which is
# the name of the decorator wrapping function. Once it has created this
# dictionary, it also formats the docstring of ``run_tests`` to be
# comprised of the docstrings for the ``keyword`` methods.
# To add a keyword argument to the ``run_tests`` method, define a new
# method decorated with ``@keyword`` and with the ``self, name, kwargs``
# signature.
# Get all 'function' members as the wrapped methods are functions
functions = inspect.getmembers(cls, predicate=inspect.isfunction)
# Filter out anything that's not got the name 'keyword'
keywords = filter(lambda func: func[1].__name__ == "keyword", functions)
# Sort all keywords based on the priority flag.
sorted_keywords = sorted(keywords, key=lambda x: x[1]._priority, reverse=True)
cls.keywords = OrderedDict()
doc_keywords = ""
for name, func in sorted_keywords:
# Here we test if the function has been overloaded to return
# NotImplemented which is the way to disable arguments on
# subclasses. If it has been disabled we need to remove it from the
# default keywords dict. We do it in the try except block because
# we do not have access to an instance of the class, so this is
# going to error unless the method is just doing `return
# NotImplemented`.
try:
# Second argument is False, as it is normally a bool.
# The other two are placeholders for objects.
if func(None, False, None) is NotImplemented:
continue
except Exception:
pass
# Construct the default kwargs dict and docstring
cls.keywords[name] = func._default_value
if func.__doc__:
doc_keywords += " " * 8
doc_keywords += func.__doc__.strip()
doc_keywords += "\n\n"
cls.run_tests.__doc__ = cls.RUN_TESTS_DOCSTRING.format(keywords=doc_keywords)
return super().__new__(cls)
def _generate_args(self, **kwargs):
# Update default values with passed kwargs
# but don't modify the defaults
keywords = copy.deepcopy(self.keywords)
keywords.update(kwargs)
# Iterate through the keywords (in order of priority)
args = []
for keyword in keywords.keys():
func = getattr(self, keyword)
result = func(keywords[keyword], keywords)
# Allow disabling of options in a subclass
if result is NotImplemented:
raise TypeError(
f"run_tests() got an unexpected keyword argument {keyword}"
)
# keyword methods must return a list
if not isinstance(result, list):
raise TypeError(f"{keyword} keyword method must return a list")
args += result
return args
RUN_TESTS_DOCSTRING = """
Run the tests for the package.
This method builds arguments for and then calls ``pytest.main``.
Parameters
----------
{keywords}
"""
_required_dependencies = [
"pytest",
"pytest_remotedata",
"pytest_doctestplus",
"pytest_astropy_header",
]
_missing_dependancy_error = (
"Test dependencies are missing: {}. You should install the "
"'pytest-astropy' package (you may need to update the package if you "
"have a previous version installed, e.g., "
"'pip install pytest-astropy --upgrade' or the equivalent with conda)."
)
@classmethod
def _has_test_dependencies(cls): # pragma: no cover
# Using the test runner will not work without these dependencies.
for module in cls._required_dependencies:
spec = find_spec(module)
# Checking loader accounts for packages that were uninstalled.
# pytest plugins are special, it's enough if they are picked up the
# pytest independently of how they are installed.
if spec is None or spec.loader is None:
# Don't import pytest until it's actually needed
import pytest
pluginmanager = pytest.PytestPluginManager()
try:
pluginmanager.import_plugin(module)
except ImportError:
raise RuntimeError(cls._missing_dependancy_error.format(module))
def run_tests(self, **kwargs):
# The following option will include eggs inside a .eggs folder in
# sys.path when running the tests. This is possible so that when
# running pytest, test dependencies installed via e.g.
# tests_requires are available here. This is not an advertised option
# since it is only for internal use
if kwargs.pop("add_local_eggs_to_path", False):
# Add each egg to sys.path individually
for egg in glob.glob(os.path.join(".eggs", "*.egg")):
sys.path.insert(0, egg)
self._has_test_dependencies() # pragma: no cover
# The docstring for this method is defined as a class variable.
# This allows it to be built for each subclass in __new__.
# Don't import pytest until it's actually needed to run the tests
import pytest
# Raise error for undefined kwargs
allowed_kwargs = set(self.keywords.keys())
passed_kwargs = set(kwargs.keys())
if not passed_kwargs.issubset(allowed_kwargs):
wrong_kwargs = list(passed_kwargs.difference(allowed_kwargs))
raise TypeError(
f"run_tests() got an unexpected keyword argument {wrong_kwargs[0]}"
)
args = self._generate_args(**kwargs)
if kwargs.get("plugins", None) is not None:
plugins = kwargs.pop("plugins")
elif self.keywords.get("plugins", None) is not None:
plugins = self.keywords["plugins"]
else:
plugins = []
# Override the config locations to not make a new directory nor use
# existing cache or config. Note that we need to do this here in
# addition to in conftest.py - for users running tests interactively
# in e.g. IPython, conftest.py would get read in too late, so we need
# to do it here - but at the same time the code here doesn't work when
# running tests in parallel mode because this uses subprocesses which
# don't know about the temporary config/cache.
astropy_config = tempfile.mkdtemp("astropy_config")
astropy_cache = tempfile.mkdtemp("astropy_cache")
# Have to use nested with statements for cross-Python support
# Note, using these context managers here is superfluous if the
# config_dir or cache_dir options to pytest are in use, but it's
# also harmless to nest the contexts
with set_temp_config(astropy_config, delete=True):
with set_temp_cache(astropy_cache, delete=True):
return pytest.main(args=args, plugins=plugins)
@classmethod
def make_test_runner_in(cls, path):
"""
Constructs a `TestRunner` to run in the given path, and returns a
``test()`` function which takes the same arguments as
`~astropy.tests.runner.TestRunner.run_tests`.
The returned ``test()`` function will be defined in the module this
was called from. This is used to implement the ``astropy.test()``
function (or the equivalent for affiliated packages).
"""
runner = cls(path)
@wraps(runner.run_tests, ("__doc__",))
def test(**kwargs):
return runner.run_tests(**kwargs)
module = find_current_module(2)
if module is not None:
test.__module__ = module.__name__
# A somewhat unusual hack, but delete the attached __wrapped__
# attribute--although this is normally used to tell if the function
# was wrapped with wraps, on some version of Python this is also
# used to determine the signature to display in help() which is
# not useful in this case. We don't really care in this case if the
# function was wrapped either
if hasattr(test, "__wrapped__"):
del test.__wrapped__
test.__test__ = False
return test
class TestRunner(TestRunnerBase):
"""
A test runner for astropy tests.
"""
def packages_path(self, packages, base_path, error=None, warning=None):
"""
Generates the path for multiple packages.
Parameters
----------
packages : str
Comma separated string of packages.
base_path : str
Base path to the source code or documentation.
error : str
Error message to be raised as ``ValueError``. Individual package
name and path can be accessed by ``{name}`` and ``{path}``
respectively. No error is raised if `None`. (Default: `None`)
warning : str
Warning message to be issued. Individual package
name and path can be accessed by ``{name}`` and ``{path}``
respectively. No warning is issues if `None`. (Default: `None`)
Returns
-------
paths : list of str
List of strings of existing package paths.
"""
packages = packages.split(",")
paths = []
for package in packages:
path = os.path.join(base_path, package.replace(".", os.path.sep))
if not os.path.isdir(path):
info = {"name": package, "path": path}
if error is not None:
raise ValueError(error.format(**info))
if warning is not None:
warnings.warn(warning.format(**info))
else:
paths.append(path)
return paths
# Increase priority so this warning is displayed first.
@keyword(priority=1000)
def coverage(self, coverage, kwargs):
if coverage:
warnings.warn(
"The coverage option is ignored on run_tests, since it "
"can not be made to work in that context. Use "
"'python setup.py test --coverage' instead.",
AstropyWarning,
)
return []
# test_path depends on self.package_path so make sure this runs before
# test_path.
@keyword(priority=1)
def package(self, package, kwargs):
"""
package : str, optional
The name of a specific package to test, e.g. 'io.fits' or
'utils'. Accepts comma separated string to specify multiple
packages. If nothing is specified all default tests are run.
"""
if package is None:
self.package_path = [self.base_path]
else:
error_message = "package to test is not found: {name} (at path {path})."
self.package_path = self.packages_path(
package, self.base_path, error=error_message
)
if not kwargs["test_path"]:
return self.package_path
return []
@keyword()
def test_path(self, test_path, kwargs):
"""
test_path : str, optional
Specify location to test by path. May be a single file or
directory. Must be specified absolutely or relative to the
calling directory.
"""
all_args = []
# Ensure that the package kwarg has been run.
self.package(kwargs["package"], kwargs)
if test_path:
base, ext = os.path.splitext(test_path)
if ext in (".rst", ""):
if kwargs["docs_path"] is None:
# This shouldn't happen from "python setup.py test"
raise ValueError(
"Can not test .rst files without a docs_path specified."
)
abs_docs_path = os.path.abspath(kwargs["docs_path"])
abs_test_path = os.path.abspath(
os.path.join(abs_docs_path, os.pardir, test_path)
)
common = os.path.commonprefix((abs_docs_path, abs_test_path))
if os.path.exists(abs_test_path) and common == abs_docs_path:
# Turn on the doctest_rst plugin
all_args.append("--doctest-rst")
test_path = abs_test_path
# Check that the extensions are in the path and not at the end to
# support specifying the name of the test, i.e.
# test_quantity.py::test_unit
if not (
os.path.isdir(test_path) or (".py" in test_path or ".rst" in test_path)
):
raise ValueError(
"Test path must be a directory or a path to a .py or .rst file"
)
return all_args + [test_path]
return []
@keyword()
def args(self, args, kwargs):
"""
args : str, optional
Additional arguments to be passed to ``pytest.main`` in the ``args``
keyword argument.
"""
if args:
return shlex.split(args, posix=not sys.platform.startswith("win"))
return []
@keyword(default_value=[])
def plugins(self, plugins, kwargs):
"""
plugins : list, optional
Plugins to be passed to ``pytest.main`` in the ``plugins`` keyword
argument.
"""
# Plugins are handled independently by `run_tests` so we define this
# keyword just for the docstring
return []
@keyword()
def verbose(self, verbose, kwargs):
"""
verbose : bool, optional
Convenience option to turn on verbose output from pytest. Passing
True is the same as specifying ``-v`` in ``args``.
"""
if verbose:
return ["-v"]
return []
@keyword()
def pastebin(self, pastebin, kwargs):
"""
pastebin : ('failed', 'all', None), optional
Convenience option for turning on pytest pastebin output. Set to
'failed' to upload info for failed tests, or 'all' to upload info
for all tests.
"""
if pastebin is not None:
if pastebin in ["failed", "all"]:
return [f"--pastebin={pastebin}"]
else:
raise ValueError("pastebin should be 'failed' or 'all'")
return []
@keyword(default_value="none")
def remote_data(self, remote_data, kwargs):
"""
remote_data : {'none', 'astropy', 'any'}, optional
Controls whether to run tests marked with @pytest.mark.remote_data. This can be
set to run no tests with remote data (``none``), only ones that use
data from http://data.astropy.org (``astropy``), or all tests that
use remote data (``any``). The default is ``none``.
"""
if remote_data is True:
remote_data = "any"
elif remote_data is False:
remote_data = "none"
elif remote_data not in ("none", "astropy", "any"):
warnings.warn(
"The remote_data option should be one of "
f"none/astropy/any (found {remote_data}). For backward-compatibility, "
"assuming 'any', but you should change the option to be "
"one of the supported ones to avoid issues in "
"future.",
AstropyDeprecationWarning,
)
remote_data = "any"
return [f"--remote-data={remote_data}"]
@keyword()
def pep8(self, pep8, kwargs):
"""
pep8 : bool, optional
Turn on PEP8 checking via the pytest-pep8 plugin and disable normal
tests. Same as specifying ``--pep8 -k pep8`` in ``args``.
"""
if pep8:
try:
import pytest_pep8 # noqa: F401
except ImportError:
raise ImportError(
"PEP8 checking requires pytest-pep8 plugin: "
"https://pypi.org/project/pytest-pep8"
)
else:
return ["--pep8", "-k", "pep8"]
return []
@keyword()
def pdb(self, pdb, kwargs):
"""
pdb : bool, optional
Turn on PDB post-mortem analysis for failing tests. Same as
specifying ``--pdb`` in ``args``.
"""
if pdb:
return ["--pdb"]
return []
@keyword(0)
def parallel(self, parallel, kwargs):
"""
parallel : int or 'auto', optional
When provided, run the tests in parallel on the specified
number of CPUs. If parallel is ``'auto'``, it will use the all
the cores on the machine. Requires the ``pytest-xdist`` plugin.
"""
if parallel != 0:
try:
from xdist import plugin # noqa: F401
except ImportError:
raise SystemError(
"running tests in parallel requires the pytest-xdist package"
)
return ["-n", str(parallel)]
return []
@keyword()
def docs_path(self, docs_path, kwargs):
"""
docs_path : str, optional
The path to the documentation .rst files.
"""
paths = []
if docs_path is not None and not kwargs["skip_docs"]:
if kwargs["package"] is not None:
warning_message = (
"Can not test .rst docs for {name}, since "
"docs path ({path}) does not exist."
)
paths = self.packages_path(
kwargs["package"], docs_path, warning=warning_message
)
elif not kwargs["test_path"]:
paths = [docs_path]
if len(paths) and not kwargs["test_path"]:
paths.append("--doctest-rst")
return paths
@keyword()
def skip_docs(self, skip_docs, kwargs):
"""
skip_docs : `bool`, optional
When `True`, skips running the doctests in the .rst files.
"""
# Skip docs is a bool used by docs_path only.
return []
@keyword()
def repeat(self, repeat, kwargs):
"""
repeat : `int`, optional
If set, specifies how many times each test should be run. This is
useful for diagnosing sporadic failures.
"""
if repeat:
return [f"--repeat={repeat}"]
return []
# Override run_tests for astropy-specific fixes
def run_tests(self, **kwargs):
# This prevents cyclical import problems that make it
# impossible to test packages that define Table types on their
# own.
from astropy.table import Table # noqa: F401
return super().run_tests(**kwargs)
|
1f464e42210d84e4fc553a7c4950e8bfe344ac706376181fd05cd73e5af10ff8 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import copy
import gc
import os
import pathlib
import pickle
import sys
from collections import OrderedDict
from io import StringIO
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_array_equal
from astropy import table
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.io import fits
from astropy.table import (
Column,
MaskedColumn,
QTable,
Table,
TableAttribute,
TableReplaceWarning,
)
from astropy.tests.helper import assert_follows_unicode_guidelines
from astropy.time import Time, TimeDelta
from astropy.utils.compat import NUMPY_LT_1_25
from astropy.utils.compat.optional_deps import HAS_PANDAS
from astropy.utils.data import get_pkg_data_filename
from astropy.utils.exceptions import AstropyUserWarning
from astropy.utils.tests.test_metadata import MetaBaseTest
from .conftest import MIXIN_COLS, MaskedTable
@pytest.fixture
def home_is_tmpdir(monkeypatch, tmp_path):
"""
Pytest fixture to run a test case with tilde-prefixed paths.
In the tilde-path case, environment variables are temporarily
modified so that '~' resolves to the temp directory.
"""
# For Unix
monkeypatch.setenv("HOME", str(tmp_path))
# For Windows
monkeypatch.setenv("USERPROFILE", str(tmp_path))
class SetupData:
def _setup(self, table_types):
self._table_type = table_types.Table
self._column_type = table_types.Column
@property
def a(self):
if self._column_type is not None:
if not hasattr(self, "_a"):
self._a = self._column_type(
[1, 2, 3], name="a", format="%d", meta={"aa": [0, 1, 2, 3, 4]}
)
return self._a
@property
def b(self):
if self._column_type is not None:
if not hasattr(self, "_b"):
self._b = self._column_type(
[4, 5, 6], name="b", format="%d", meta={"aa": 1}
)
return self._b
@property
def c(self):
if self._column_type is not None:
if not hasattr(self, "_c"):
self._c = self._column_type([7, 8, 9], "c")
return self._c
@property
def d(self):
if self._column_type is not None:
if not hasattr(self, "_d"):
self._d = self._column_type([7, 8, 7], "d")
return self._d
@property
def obj(self):
if self._column_type is not None:
if not hasattr(self, "_obj"):
self._obj = self._column_type([1, "string", 3], "obj", dtype="O")
return self._obj
@property
def t(self):
if self._table_type is not None:
if not hasattr(self, "_t"):
self._t = self._table_type([self.a, self.b])
return self._t
@pytest.mark.usefixtures("table_types")
class TestSetTableColumn(SetupData):
def test_set_row(self, table_types):
"""Set a row from a tuple of values"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t[1] = (20, 21)
assert t["a"][0] == 1
assert t["a"][1] == 20
assert t["a"][2] == 3
assert t["b"][0] == 4
assert t["b"][1] == 21
assert t["b"][2] == 6
def test_set_row_existing(self, table_types):
"""Set a row from another existing row"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t[0] = t[1]
assert t[0][0] == 2
assert t[0][1] == 5
def test_set_row_fail_1(self, table_types):
"""Set a row from an incorrectly-sized or typed set of values"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
with pytest.raises(ValueError):
t[1] = (20, 21, 22)
with pytest.raises(ValueError):
t[1] = 0
def test_set_row_fail_2(self, table_types):
"""Set a row from an incorrectly-typed tuple of values"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
with pytest.raises(ValueError):
t[1] = ("abc", "def")
def test_set_new_col_new_table(self, table_types):
"""Create a new column in empty table using the item access syntax"""
self._setup(table_types)
t = table_types.Table()
t["aa"] = self.a
# Test that the new column name is 'aa' and that the values match
assert np.all(t["aa"] == self.a)
assert t.colnames == ["aa"]
def test_set_new_col_new_table_quantity(self, table_types):
"""Create a new column (from a quantity) in empty table using the item access syntax"""
self._setup(table_types)
t = table_types.Table()
t["aa"] = np.array([1, 2, 3]) * u.m
assert np.all(t["aa"] == np.array([1, 2, 3]))
assert t["aa"].unit == u.m
t["bb"] = 3 * u.m
assert np.all(t["bb"] == 3)
assert t["bb"].unit == u.m
def test_set_new_col_existing_table(self, table_types):
"""Create a new column in an existing table using the item access syntax"""
self._setup(table_types)
t = table_types.Table([self.a])
# Add a column
t["bb"] = self.b
assert np.all(t["bb"] == self.b)
assert t.colnames == ["a", "bb"]
assert t["bb"].meta == self.b.meta
assert t["bb"].format == self.b.format
# Add another column
t["c"] = t["a"]
assert np.all(t["c"] == t["a"])
assert t.colnames == ["a", "bb", "c"]
assert t["c"].meta == t["a"].meta
assert t["c"].format == t["a"].format
# Add a multi-dimensional column
t["d"] = table_types.Column(np.arange(12).reshape(3, 2, 2))
assert t["d"].shape == (3, 2, 2)
assert t["d"][0, 0, 1] == 1
# Add column from a list
t["e"] = ["hello", "the", "world"]
assert np.all(t["e"] == np.array(["hello", "the", "world"]))
# Make sure setting existing column still works
t["e"] = ["world", "hello", "the"]
assert np.all(t["e"] == np.array(["world", "hello", "the"]))
# Add a column via broadcasting
t["f"] = 10
assert np.all(t["f"] == 10)
# Add a column from a Quantity
t["g"] = np.array([1, 2, 3]) * u.m
assert np.all(t["g"].data == np.array([1, 2, 3]))
assert t["g"].unit == u.m
# Add a column from a (scalar) Quantity
t["g"] = 3 * u.m
assert np.all(t["g"].data == 3)
assert t["g"].unit == u.m
def test_set_new_unmasked_col_existing_table(self, table_types):
"""Create a new column in an existing table using the item access syntax"""
self._setup(table_types)
t = table_types.Table([self.a]) # masked or unmasked
b = table.Column(name="b", data=[1, 2, 3]) # unmasked
t["b"] = b
assert np.all(t["b"] == b)
def test_set_new_masked_col_existing_table(self, table_types):
"""Create a new column in an existing table using the item access syntax"""
self._setup(table_types)
t = table_types.Table([self.a]) # masked or unmasked
b = table.MaskedColumn(name="b", data=[1, 2, 3]) # masked
t["b"] = b
assert np.all(t["b"] == b)
def test_set_new_col_existing_table_fail(self, table_types):
"""Generate failure when creating a new column using the item access syntax"""
self._setup(table_types)
t = table_types.Table([self.a])
# Wrong size
with pytest.raises(ValueError):
t["b"] = [1, 2]
@pytest.mark.usefixtures("table_types")
class TestEmptyData:
def test_1(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name="a", dtype=int, length=100))
assert len(t["a"]) == 100
def test_2(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name="a", dtype=int, shape=(3,), length=100))
assert len(t["a"]) == 100
def test_3(self, table_types):
t = table_types.Table() # length is not given
t.add_column(table_types.Column(name="a", dtype=int))
assert len(t["a"]) == 0
def test_4(self, table_types):
t = table_types.Table() # length is not given
t.add_column(table_types.Column(name="a", dtype=int, shape=(3, 4)))
assert len(t["a"]) == 0
def test_5(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name="a")) # dtype is not specified
assert len(t["a"]) == 0
def test_scalar(self, table_types):
"""Test related to #3811 where setting empty tables to scalar values
should raise an error instead of having an error raised when accessing
the table."""
t = table_types.Table()
with pytest.raises(
TypeError, match="Empty table cannot have column set to scalar value"
):
t.add_column(0)
def test_add_via_setitem_and_slice(self, table_types):
"""Test related to #3023 where a MaskedColumn is created with name=None
and then gets changed to name='a'. After PR #2790 this test fails
without the #3023 fix."""
t = table_types.Table()
t["a"] = table_types.Column([1, 2, 3])
t2 = t[:]
assert t2.colnames == t.colnames
@pytest.mark.usefixtures("table_types")
class TestNewFromColumns:
def test_simple(self, table_types):
cols = [
table_types.Column(name="a", data=[1, 2, 3]),
table_types.Column(name="b", data=[4, 5, 6], dtype=np.float32),
]
t = table_types.Table(cols)
assert np.all(t["a"].data == np.array([1, 2, 3]))
assert np.all(t["b"].data == np.array([4, 5, 6], dtype=np.float32))
assert type(t["b"][1]) is np.float32
def test_from_np_array(self, table_types):
cols = [
table_types.Column(
name="a", data=np.array([1, 2, 3], dtype=np.int64), dtype=np.float64
),
table_types.Column(name="b", data=np.array([4, 5, 6], dtype=np.float32)),
]
t = table_types.Table(cols)
assert np.all(t["a"] == np.array([1, 2, 3], dtype=np.float64))
assert np.all(t["b"] == np.array([4, 5, 6], dtype=np.float32))
assert type(t["a"][1]) is np.float64
assert type(t["b"][1]) is np.float32
def test_size_mismatch(self, table_types):
cols = [
table_types.Column(name="a", data=[1, 2, 3]),
table_types.Column(name="b", data=[4, 5, 6, 7]),
]
with pytest.raises(ValueError):
table_types.Table(cols)
def test_name_none(self, table_types):
"""Column with name=None can init a table whether or not names are supplied"""
c = table_types.Column(data=[1, 2], name="c")
d = table_types.Column(data=[3, 4])
t = table_types.Table([c, d], names=(None, "d"))
assert t.colnames == ["c", "d"]
t = table_types.Table([c, d])
assert t.colnames == ["c", "col1"]
@pytest.mark.usefixtures("table_types")
class TestReverse:
def test_reverse(self, table_types):
t = table_types.Table(
[
[1, 2, 3],
["a", "b", "cc"],
]
)
t.reverse()
assert np.all(t["col0"] == np.array([3, 2, 1]))
assert np.all(t["col1"] == np.array(["cc", "b", "a"]))
t2 = table_types.Table(t, copy=False)
assert np.all(t2["col0"] == np.array([3, 2, 1]))
assert np.all(t2["col1"] == np.array(["cc", "b", "a"]))
t2 = table_types.Table(t, copy=True)
assert np.all(t2["col0"] == np.array([3, 2, 1]))
assert np.all(t2["col1"] == np.array(["cc", "b", "a"]))
t2.sort("col0")
assert np.all(t2["col0"] == np.array([1, 2, 3]))
assert np.all(t2["col1"] == np.array(["a", "b", "cc"]))
def test_reverse_big(self, table_types):
x = np.arange(10000)
y = x + 1
t = table_types.Table([x, y], names=("x", "y"))
t.reverse()
assert np.all(t["x"] == x[::-1])
assert np.all(t["y"] == y[::-1])
def test_reverse_mixin(self):
"""Test reverse for a mixin with no item assignment, fix for #9836"""
sc = SkyCoord([1, 2], [3, 4], unit="deg")
t = Table([[2, 1], sc], names=["a", "sc"])
t.reverse()
assert np.all(t["a"] == [1, 2])
assert np.allclose(t["sc"].ra.to_value("deg"), [2, 1])
@pytest.mark.usefixtures("table_types")
class TestRound:
def test_round_int(self, table_types):
t = table_types.Table(
[
["a", "b", "c"],
[1.11, 2.3, 3.0],
[1.123456, 2.9876, 3.901],
]
)
t.round()
assert np.all(t["col0"] == ["a", "b", "c"])
assert np.all(t["col1"] == [1.0, 2.0, 3.0])
assert np.all(t["col2"] == [1.0, 3.0, 4.0])
def test_round_dict(self, table_types):
t = table_types.Table(
[
["a", "b", "c"],
[1.5, 2.5, 3.0111],
[1.123456, 2.9876, 3.901],
]
)
t.round({"col1": 0, "col2": 3})
assert np.all(t["col0"] == ["a", "b", "c"])
assert np.all(t["col1"] == [2.0, 2.0, 3.0])
assert np.all(t["col2"] == [1.123, 2.988, 3.901])
def test_round_invalid(self, table_types):
t = table_types.Table([[1, 2, 3]])
with pytest.raises(
ValueError, match="'decimals' argument must be an int or a dict"
):
t.round(0.5)
def test_round_kind(self, table_types):
for typecode in "bBhHiIlLqQpPefdgFDG": # AllInteger, AllFloat
arr = np.array([4, 16], dtype=typecode)
t = Table([arr])
col0 = t["col0"]
t.round(decimals=-1) # Round to nearest 10
assert np.all(t["col0"] == [0, 20])
assert t["col0"] is col0
@pytest.mark.usefixtures("table_types")
class TestColumnAccess:
def test_1(self, table_types):
t = table_types.Table()
with pytest.raises(KeyError):
t["a"]
def test_2(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name="a", data=[1, 2, 3]))
assert np.all(t["a"] == np.array([1, 2, 3]))
with pytest.raises(KeyError):
t["b"] # column does not exist
def test_itercols(self, table_types):
names = ["a", "b", "c"]
t = table_types.Table([[1], [2], [3]], names=names)
for name, col in zip(names, t.itercols()):
assert name == col.name
assert isinstance(col, table_types.Column)
@pytest.mark.usefixtures("table_types")
class TestAddLength(SetupData):
def test_right_length(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
t.add_column(self.b)
def test_too_long(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
with pytest.raises(ValueError):
t.add_column(
table_types.Column(name="b", data=[4, 5, 6, 7])
) # data too long
def test_too_short(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
with pytest.raises(ValueError):
t.add_column(table_types.Column(name="b", data=[4, 5])) # data too short
@pytest.mark.usefixtures("table_types")
class TestAddPosition(SetupData):
def test_1(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a, 0)
def test_2(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a, 1)
def test_3(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a, -1)
def test_5(self, table_types):
self._setup(table_types)
t = table_types.Table()
with pytest.raises(ValueError):
t.index_column("b")
def test_6(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a)
t.add_column(self.b)
assert t.colnames == ["a", "b"]
def test_7(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
t.add_column(self.b, t.index_column("a"))
assert t.colnames == ["b", "a"]
def test_8(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
t.add_column(self.b, t.index_column("a") + 1)
assert t.colnames == ["a", "b"]
def test_9(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a)
t.add_column(self.b, t.index_column("a") + 1)
t.add_column(self.c, t.index_column("b"))
assert t.colnames == ["a", "c", "b"]
def test_10(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a)
ia = t.index_column("a")
t.add_column(self.b, ia + 1)
t.add_column(self.c, ia)
assert t.colnames == ["c", "a", "b"]
@pytest.mark.usefixtures("table_types")
class TestAddName(SetupData):
def test_override_name(self, table_types):
self._setup(table_types)
t = table_types.Table()
# Check that we can override the name of the input column in the Table
t.add_column(self.a, name="b")
t.add_column(self.b, name="a")
assert t.colnames == ["b", "a"]
# Check that we did not change the name of the input column
assert self.a.info.name == "a"
assert self.b.info.name == "b"
# Now test with an input column from another table
t2 = table_types.Table()
t2.add_column(t["a"], name="c")
assert t2.colnames == ["c"]
# Check that we did not change the name of the input column
assert t.colnames == ["b", "a"]
# Check that we can give a name if none was present
col = table_types.Column([1, 2, 3])
t.add_column(col, name="c")
assert t.colnames == ["b", "a", "c"]
def test_default_name(self, table_types):
t = table_types.Table()
col = table_types.Column([1, 2, 3])
t.add_column(col)
assert t.colnames == ["col0"]
@pytest.mark.usefixtures("table_types")
class TestInitFromTable(SetupData):
def test_from_table_cols(self, table_types):
"""Ensure that using cols from an existing table gives
a clean copy.
"""
self._setup(table_types)
t = self.t
cols = t.columns
# Construct Table with cols via Table._new_from_cols
t2a = table_types.Table([cols["a"], cols["b"], self.c])
# Construct with add_column
t2b = table_types.Table()
t2b.add_column(cols["a"])
t2b.add_column(cols["b"])
t2b.add_column(self.c)
t["a"][1] = 20
t["b"][1] = 21
for t2 in [t2a, t2b]:
t2["a"][2] = 10
t2["b"][2] = 11
t2["c"][2] = 12
t2.columns["a"].meta["aa"][3] = 10
assert np.all(t["a"] == np.array([1, 20, 3]))
assert np.all(t["b"] == np.array([4, 21, 6]))
assert np.all(t2["a"] == np.array([1, 2, 10]))
assert np.all(t2["b"] == np.array([4, 5, 11]))
assert np.all(t2["c"] == np.array([7, 8, 12]))
assert t2["a"].name == "a"
assert t2.columns["a"].meta["aa"][3] == 10
assert t.columns["a"].meta["aa"][3] == 3
@pytest.mark.usefixtures("table_types")
class TestAddColumns(SetupData):
def test_add_columns1(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_columns([self.a, self.b, self.c])
assert t.colnames == ["a", "b", "c"]
def test_add_columns2(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.add_columns([self.c, self.d])
assert t.colnames == ["a", "b", "c", "d"]
assert np.all(t["c"] == np.array([7, 8, 9]))
def test_add_columns3(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.add_columns([self.c, self.d], indexes=[1, 0])
assert t.colnames == ["d", "a", "c", "b"]
def test_add_columns4(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.add_columns([self.c, self.d], indexes=[0, 0])
assert t.colnames == ["c", "d", "a", "b"]
def test_add_columns5(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.add_columns([self.c, self.d], indexes=[2, 2])
assert t.colnames == ["a", "b", "c", "d"]
def test_add_columns6(self, table_types):
"""Check that we can override column names."""
self._setup(table_types)
t = table_types.Table()
t.add_columns([self.a, self.b, self.c], names=["b", "c", "a"])
assert t.colnames == ["b", "c", "a"]
def test_add_columns7(self, table_types):
"""Check that default names are used when appropriate."""
t = table_types.Table()
col0 = table_types.Column([1, 2, 3])
col1 = table_types.Column([4, 5, 3])
t.add_columns([col0, col1])
assert t.colnames == ["col0", "col1"]
def test_add_duplicate_column(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a)
with pytest.raises(ValueError):
t.add_column(table_types.Column(name="a", data=[0, 1, 2]))
t.add_column(
table_types.Column(name="a", data=[0, 1, 2]), rename_duplicate=True
)
t.add_column(self.b)
t.add_column(self.c)
assert t.colnames == ["a", "a_1", "b", "c"]
t.add_column(
table_types.Column(name="a", data=[0, 1, 2]), rename_duplicate=True
)
assert t.colnames == ["a", "a_1", "b", "c", "a_2"]
# test adding column from a separate Table
t1 = table_types.Table()
t1.add_column(self.a)
with pytest.raises(ValueError):
t.add_column(t1["a"])
t.add_column(t1["a"], rename_duplicate=True)
t1["a"][0] = 100 # Change original column
assert t.colnames == ["a", "a_1", "b", "c", "a_2", "a_3"]
assert t1.colnames == ["a"]
# Check new column didn't change (since name conflict forced a copy)
assert t["a_3"][0] == self.a[0]
# Check that rename_duplicate=True is ok if there are no duplicates
t.add_column(
table_types.Column(name="q", data=[0, 1, 2]), rename_duplicate=True
)
assert t.colnames == ["a", "a_1", "b", "c", "a_2", "a_3", "q"]
def test_add_duplicate_columns(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b, self.c])
with pytest.raises(ValueError):
t.add_columns(
[
table_types.Column(name="a", data=[0, 1, 2]),
table_types.Column(name="b", data=[0, 1, 2]),
]
)
t.add_columns(
[
table_types.Column(name="a", data=[0, 1, 2]),
table_types.Column(name="b", data=[0, 1, 2]),
],
rename_duplicate=True,
)
t.add_column(self.d)
assert t.colnames == ["a", "b", "c", "a_1", "b_1", "d"]
@pytest.mark.usefixtures("table_types")
class TestAddRow(SetupData):
@property
def b(self):
if self._column_type is not None:
if not hasattr(self, "_b"):
self._b = self._column_type(name="b", data=[4.0, 5.1, 6.2])
return self._b
@property
def c(self):
if self._column_type is not None:
if not hasattr(self, "_c"):
self._c = self._column_type(name="c", data=["7", "8", "9"])
return self._c
@property
def d(self):
if self._column_type is not None:
if not hasattr(self, "_d"):
self._d = self._column_type(name="d", data=[[1, 2], [3, 4], [5, 6]])
return self._d
@property
def t(self):
if self._table_type is not None:
if not hasattr(self, "_t"):
self._t = self._table_type([self.a, self.b, self.c])
return self._t
def test_add_none_to_empty_table(self, table_types):
self._setup(table_types)
t = table_types.Table(names=("a", "b", "c"), dtype=("(2,)i", "S4", "O"))
t.add_row()
assert np.all(t["a"][0] == [0, 0])
assert t["b"][0] == ""
assert t["c"][0] == 0
t.add_row()
assert np.all(t["a"][1] == [0, 0])
assert t["b"][1] == ""
assert t["c"][1] == 0
def test_add_stuff_to_empty_table(self, table_types):
self._setup(table_types)
t = table_types.Table(names=("a", "b", "obj"), dtype=("(2,)i", "S8", "O"))
t.add_row([[1, 2], "hello", "world"])
assert np.all(t["a"][0] == [1, 2])
assert t["b"][0] == "hello"
assert t["obj"][0] == "world"
# Make sure it is not repeating last row but instead
# adding zeros (as documented)
t.add_row()
assert np.all(t["a"][1] == [0, 0])
assert t["b"][1] == ""
assert t["obj"][1] == 0
def test_add_table_row(self, table_types):
self._setup(table_types)
t = self.t
t["d"] = self.d
t2 = table_types.Table([self.a, self.b, self.c, self.d])
t.add_row(t2[0])
assert len(t) == 4
assert np.all(t["a"] == np.array([1, 2, 3, 1]))
assert np.allclose(t["b"], np.array([4.0, 5.1, 6.2, 4.0]))
assert np.all(t["c"] == np.array(["7", "8", "9", "7"]))
assert np.all(t["d"] == np.array([[1, 2], [3, 4], [5, 6], [1, 2]]))
def test_add_table_row_obj(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b, self.obj])
t.add_row([1, 4.0, [10]])
assert len(t) == 4
assert np.all(t["a"] == np.array([1, 2, 3, 1]))
assert np.allclose(t["b"], np.array([4.0, 5.1, 6.2, 4.0]))
assert np.all(t["obj"] == np.array([1, "string", 3, [10]], dtype="O"))
def test_add_qtable_row_multidimensional(self):
q = [[1, 2], [3, 4]] * u.m
qt = table.QTable([q])
qt.add_row(([5, 6] * u.km,))
assert np.all(qt["col0"] == [[1, 2], [3, 4], [5000, 6000]] * u.m)
def test_add_with_tuple(self, table_types):
self._setup(table_types)
t = self.t
t.add_row((4, 7.2, "1"))
assert len(t) == 4
assert np.all(t["a"] == np.array([1, 2, 3, 4]))
assert np.allclose(t["b"], np.array([4.0, 5.1, 6.2, 7.2]))
assert np.all(t["c"] == np.array(["7", "8", "9", "1"]))
def test_add_with_list(self, table_types):
self._setup(table_types)
t = self.t
t.add_row([4, 7.2, "10"])
assert len(t) == 4
assert np.all(t["a"] == np.array([1, 2, 3, 4]))
assert np.allclose(t["b"], np.array([4.0, 5.1, 6.2, 7.2]))
assert np.all(t["c"] == np.array(["7", "8", "9", "10"]))
def test_add_with_dict(self, table_types):
self._setup(table_types)
t = self.t
t.add_row({"a": 4, "b": 7.2})
assert len(t) == 4
assert np.all(t["a"] == np.array([1, 2, 3, 4]))
assert np.allclose(t["b"], np.array([4.0, 5.1, 6.2, 7.2]))
if t.masked:
assert np.all(t["c"] == np.array(["7", "8", "9", "7"]))
else:
assert np.all(t["c"] == np.array(["7", "8", "9", ""]))
def test_add_with_none(self, table_types):
self._setup(table_types)
t = self.t
t.add_row()
assert len(t) == 4
assert np.all(t["a"].data == np.array([1, 2, 3, 0]))
assert np.allclose(t["b"], np.array([4.0, 5.1, 6.2, 0.0]))
assert np.all(t["c"].data == np.array(["7", "8", "9", ""]))
def test_add_missing_column(self, table_types):
self._setup(table_types)
t = self.t
with pytest.raises(ValueError):
t.add_row({"bad_column": 1})
def test_wrong_size_tuple(self, table_types):
self._setup(table_types)
t = self.t
with pytest.raises(ValueError):
t.add_row((1, 2))
def test_wrong_vals_type(self, table_types):
self._setup(table_types)
t = self.t
with pytest.raises(TypeError):
t.add_row(1)
def test_add_row_failures(self, table_types):
self._setup(table_types)
t = self.t
t_copy = table_types.Table(t, copy=True)
# Wrong number of columns
try:
t.add_row([1, 2, 3, 4])
except ValueError:
pass
assert len(t) == 3
assert np.all(t.as_array() == t_copy.as_array())
# Wrong data type
try:
t.add_row(["one", 2, 3])
except ValueError:
pass
assert len(t) == 3
assert np.all(t.as_array() == t_copy.as_array())
def test_insert_table_row(self, table_types):
"""
Light testing of Table.insert_row() method. The deep testing is done via
the add_row() tests which calls insert_row(index=len(self), ...), so
here just test that the added index parameter is handled correctly.
"""
self._setup(table_types)
row = (10, 40.0, "x", [10, 20])
for index in range(-3, 4):
indices = np.insert(np.arange(3), index, 3)
t = table_types.Table([self.a, self.b, self.c, self.d])
t2 = t.copy()
t.add_row(row) # By now we know this works
t2.insert_row(index, row)
for name in t.colnames:
if t[name].dtype.kind == "f":
assert np.allclose(t[name][indices], t2[name])
else:
assert np.all(t[name][indices] == t2[name])
for index in (-4, 4):
t = table_types.Table([self.a, self.b, self.c, self.d])
with pytest.raises(IndexError):
t.insert_row(index, row)
@pytest.mark.usefixtures("table_types")
class TestTableColumn(SetupData):
def test_column_view(self, table_types):
self._setup(table_types)
t = self.t
a = t.columns["a"]
a[2] = 10
assert t["a"][2] == 10
@pytest.mark.usefixtures("table_types")
class TestArrayColumns(SetupData):
def test_1d(self, table_types):
self._setup(table_types)
b = table_types.Column(name="b", dtype=int, shape=(2,), length=3)
t = table_types.Table([self.a])
t.add_column(b)
assert t["b"].shape == (3, 2)
assert t["b"][0].shape == (2,)
def test_2d(self, table_types):
self._setup(table_types)
b = table_types.Column(name="b", dtype=int, shape=(2, 4), length=3)
t = table_types.Table([self.a])
t.add_column(b)
assert t["b"].shape == (3, 2, 4)
assert t["b"][0].shape == (2, 4)
def test_3d(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
b = table_types.Column(name="b", dtype=int, shape=(2, 4, 6), length=3)
t.add_column(b)
assert t["b"].shape == (3, 2, 4, 6)
assert t["b"][0].shape == (2, 4, 6)
@pytest.mark.usefixtures("table_types")
class TestRemove(SetupData):
@property
def t(self):
if self._table_type is not None:
if not hasattr(self, "_t"):
self._t = self._table_type([self.a])
return self._t
@property
def t2(self):
if self._table_type is not None:
if not hasattr(self, "_t2"):
self._t2 = self._table_type([self.a, self.b, self.c])
return self._t2
def test_1(self, table_types):
self._setup(table_types)
self.t.remove_columns("a")
assert self.t.colnames == []
assert self.t.as_array().size == 0
# Regression test for gh-8640
assert not self.t
assert isinstance(self.t == None, np.ndarray)
assert (self.t == None).size == 0
def test_2(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.remove_columns("a")
assert self.t.colnames == ["b"]
assert self.t.dtype.names == ("b",)
assert np.all(self.t["b"] == np.array([4, 5, 6]))
def test_3(self, table_types):
"""Check remove_columns works for a single column with a name of
more than one character. Regression test against #2699"""
self._setup(table_types)
self.t["new_column"] = self.t["a"]
assert "new_column" in self.t.columns.keys()
self.t.remove_columns("new_column")
assert "new_column" not in self.t.columns.keys()
def test_remove_nonexistent_row(self, table_types):
self._setup(table_types)
with pytest.raises(IndexError):
self.t.remove_row(4)
def test_remove_row_0(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
self.t.remove_row(0)
assert self.t.colnames == ["a", "b", "c"]
assert np.all(self.t["b"] == np.array([5, 6]))
def test_remove_row_1(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
self.t.remove_row(1)
assert self.t.colnames == ["a", "b", "c"]
assert np.all(self.t["a"] == np.array([1, 3]))
def test_remove_row_2(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
self.t.remove_row(2)
assert self.t.colnames == ["a", "b", "c"]
assert np.all(self.t["c"] == np.array([7, 8]))
def test_remove_row_slice(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
self.t.remove_rows(slice(0, 2, 1))
assert self.t.colnames == ["a", "b", "c"]
assert np.all(self.t["c"] == np.array([9]))
def test_remove_row_list(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
self.t.remove_rows([0, 2])
assert self.t.colnames == ["a", "b", "c"]
assert np.all(self.t["c"] == np.array([8]))
def test_remove_row_preserves_meta(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.remove_rows([0, 2])
assert self.t["a"].meta == {"aa": [0, 1, 2, 3, 4]}
assert self.t.dtype == np.dtype([("a", "int"), ("b", "int")])
def test_delitem_row(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
del self.t[1]
assert self.t.colnames == ["a", "b", "c"]
assert np.all(self.t["a"] == np.array([1, 3]))
@pytest.mark.parametrize("idx", [[0, 2], np.array([0, 2])])
def test_delitem_row_list(self, table_types, idx):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
del self.t[idx]
assert self.t.colnames == ["a", "b", "c"]
assert np.all(self.t["c"] == np.array([8]))
def test_delitem_row_slice(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
del self.t[0:2]
assert self.t.colnames == ["a", "b", "c"]
assert np.all(self.t["c"] == np.array([9]))
def test_delitem_row_fail(self, table_types):
self._setup(table_types)
with pytest.raises(IndexError):
del self.t[4]
def test_delitem_row_float(self, table_types):
self._setup(table_types)
with pytest.raises(IndexError):
del self.t[1.0]
def test_delitem1(self, table_types):
self._setup(table_types)
del self.t["a"]
assert self.t.colnames == []
assert self.t.as_array().size == 0
# Regression test for gh-8640
assert not self.t
assert isinstance(self.t == None, np.ndarray)
assert (self.t == None).size == 0
def test_delitem2(self, table_types):
self._setup(table_types)
del self.t2["b"]
assert self.t2.colnames == ["a", "c"]
def test_delitems(self, table_types):
self._setup(table_types)
del self.t2["a", "b"]
assert self.t2.colnames == ["c"]
def test_delitem_fail(self, table_types):
self._setup(table_types)
with pytest.raises(KeyError):
del self.t["d"]
@pytest.mark.usefixtures("table_types")
class TestKeep(SetupData):
def test_1(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.keep_columns([])
assert t.colnames == []
assert t.as_array().size == 0
# Regression test for gh-8640
assert not t
assert isinstance(t == None, np.ndarray)
assert (t == None).size == 0
def test_2(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.keep_columns("b")
assert t.colnames == ["b"]
assert t.dtype.names == ("b",)
assert np.all(t["b"] == np.array([4, 5, 6]))
@pytest.mark.usefixtures("table_types")
class TestRename(SetupData):
def test_1(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
t.rename_column("a", "b")
assert t.colnames == ["b"]
assert t.dtype.names == ("b",)
assert np.all(t["b"] == np.array([1, 2, 3]))
def test_2(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.rename_column("a", "c")
t.rename_column("b", "a")
assert t.colnames == ["c", "a"]
assert t.dtype.names == ("c", "a")
if t.masked:
assert t.mask.dtype.names == ("c", "a")
assert np.all(t["c"] == np.array([1, 2, 3]))
assert np.all(t["a"] == np.array([4, 5, 6]))
def test_rename_by_attr(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t["a"].name = "c"
t["b"].name = "a"
assert t.colnames == ["c", "a"]
assert t.dtype.names == ("c", "a")
assert np.all(t["c"] == np.array([1, 2, 3]))
assert np.all(t["a"] == np.array([4, 5, 6]))
def test_rename_columns(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b, self.c])
t.rename_columns(("a", "b", "c"), ("aa", "bb", "cc"))
assert t.colnames == ["aa", "bb", "cc"]
t.rename_columns(["bb", "cc"], ["b", "c"])
assert t.colnames == ["aa", "b", "c"]
with pytest.raises(TypeError):
t.rename_columns("aa", ["a"])
with pytest.raises(ValueError):
t.rename_columns(["a"], ["b", "c"])
@pytest.mark.usefixtures("table_types")
class TestSort:
def test_single(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name="a", data=[2, 1, 3]))
t.add_column(table_types.Column(name="b", data=[6, 5, 4]))
t.add_column(
table_types.Column(
name="c",
data=[
(1, 2),
(3, 4),
(4, 5),
],
)
)
assert np.all(t["a"] == np.array([2, 1, 3]))
assert np.all(t["b"] == np.array([6, 5, 4]))
t.sort("a")
assert np.all(t["a"] == np.array([1, 2, 3]))
assert np.all(t["b"] == np.array([5, 6, 4]))
assert np.all(
t["c"]
== np.array(
[
[3, 4],
[1, 2],
[4, 5],
]
)
)
t.sort("b")
assert np.all(t["a"] == np.array([3, 1, 2]))
assert np.all(t["b"] == np.array([4, 5, 6]))
assert np.all(
t["c"]
== np.array(
[
[4, 5],
[3, 4],
[1, 2],
]
)
)
@pytest.mark.parametrize("create_index", [False, True])
def test_single_reverse(self, table_types, create_index):
t = table_types.Table()
t.add_column(table_types.Column(name="a", data=[2, 1, 3]))
t.add_column(table_types.Column(name="b", data=[6, 5, 4]))
t.add_column(table_types.Column(name="c", data=[(1, 2), (3, 4), (4, 5)]))
assert np.all(t["a"] == np.array([2, 1, 3]))
assert np.all(t["b"] == np.array([6, 5, 4]))
t.sort("a", reverse=True)
assert np.all(t["a"] == np.array([3, 2, 1]))
assert np.all(t["b"] == np.array([4, 6, 5]))
assert np.all(t["c"] == np.array([[4, 5], [1, 2], [3, 4]]))
t.sort("b", reverse=True)
assert np.all(t["a"] == np.array([2, 1, 3]))
assert np.all(t["b"] == np.array([6, 5, 4]))
assert np.all(t["c"] == np.array([[1, 2], [3, 4], [4, 5]]))
def test_single_big(self, table_types):
"""Sort a big-ish table with a non-trivial sort order"""
x = np.arange(10000)
y = np.sin(x)
t = table_types.Table([x, y], names=("x", "y"))
t.sort("y")
idx = np.argsort(y)
assert np.all(t["x"] == x[idx])
assert np.all(t["y"] == y[idx])
@pytest.mark.parametrize("reverse", [True, False])
def test_empty_reverse(self, table_types, reverse):
t = table_types.Table([[], []], dtype=["f4", "U1"])
t.sort("col1", reverse=reverse)
def test_multiple(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name="a", data=[2, 1, 3, 2, 3, 1]))
t.add_column(table_types.Column(name="b", data=[6, 5, 4, 3, 5, 4]))
assert np.all(t["a"] == np.array([2, 1, 3, 2, 3, 1]))
assert np.all(t["b"] == np.array([6, 5, 4, 3, 5, 4]))
t.sort(["a", "b"])
assert np.all(t["a"] == np.array([1, 1, 2, 2, 3, 3]))
assert np.all(t["b"] == np.array([4, 5, 3, 6, 4, 5]))
t.sort(["b", "a"])
assert np.all(t["a"] == np.array([2, 1, 3, 1, 3, 2]))
assert np.all(t["b"] == np.array([3, 4, 4, 5, 5, 6]))
t.sort(("a", "b"))
assert np.all(t["a"] == np.array([1, 1, 2, 2, 3, 3]))
assert np.all(t["b"] == np.array([4, 5, 3, 6, 4, 5]))
def test_multiple_reverse(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name="a", data=[2, 1, 3, 2, 3, 1]))
t.add_column(table_types.Column(name="b", data=[6, 5, 4, 3, 5, 4]))
assert np.all(t["a"] == np.array([2, 1, 3, 2, 3, 1]))
assert np.all(t["b"] == np.array([6, 5, 4, 3, 5, 4]))
t.sort(["a", "b"], reverse=True)
assert np.all(t["a"] == np.array([3, 3, 2, 2, 1, 1]))
assert np.all(t["b"] == np.array([5, 4, 6, 3, 5, 4]))
t.sort(["b", "a"], reverse=True)
assert np.all(t["a"] == np.array([2, 3, 1, 3, 1, 2]))
assert np.all(t["b"] == np.array([6, 5, 5, 4, 4, 3]))
t.sort(("a", "b"), reverse=True)
assert np.all(t["a"] == np.array([3, 3, 2, 2, 1, 1]))
assert np.all(t["b"] == np.array([5, 4, 6, 3, 5, 4]))
def test_multiple_with_bytes(self, table_types):
t = table_types.Table()
t.add_column(
table_types.Column(name="firstname", data=[b"Max", b"Jo", b"John"])
)
t.add_column(
table_types.Column(name="name", data=[b"Miller", b"Miller", b"Jackson"])
)
t.add_column(table_types.Column(name="tel", data=[12, 15, 19]))
t.sort(["name", "firstname"])
assert np.all([t["firstname"] == np.array([b"John", b"Jo", b"Max"])])
assert np.all([t["name"] == np.array([b"Jackson", b"Miller", b"Miller"])])
assert np.all([t["tel"] == np.array([19, 15, 12])])
def test_multiple_with_unicode(self, table_types):
# Before Numpy 1.6.2, sorting with multiple column names
# failed when a unicode column was present.
t = table_types.Table()
t.add_column(
table_types.Column(
name="firstname", data=[str(x) for x in ["Max", "Jo", "John"]]
)
)
t.add_column(
table_types.Column(
name="name", data=[str(x) for x in ["Miller", "Miller", "Jackson"]]
)
)
t.add_column(table_types.Column(name="tel", data=[12, 15, 19]))
t.sort(["name", "firstname"])
assert np.all(
[t["firstname"] == np.array([str(x) for x in ["John", "Jo", "Max"]])]
)
assert np.all(
[t["name"] == np.array([str(x) for x in ["Jackson", "Miller", "Miller"]])]
)
assert np.all([t["tel"] == np.array([19, 15, 12])])
def test_argsort(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name="a", data=[2, 1, 3, 2, 3, 1]))
t.add_column(table_types.Column(name="b", data=[6, 5, 4, 3, 5, 4]))
assert np.all(t.argsort() == t.as_array().argsort())
i0 = t.argsort("a")
i1 = t.as_array().argsort(order=["a"])
assert np.all(t["a"][i0] == t["a"][i1])
i0 = t.argsort(["a", "b"])
i1 = t.as_array().argsort(order=["a", "b"])
assert np.all(t["a"][i0] == t["a"][i1])
assert np.all(t["b"][i0] == t["b"][i1])
@pytest.mark.parametrize("add_index", [False, True])
def test_argsort_reverse(self, table_types, add_index):
t = table_types.Table()
t.add_column(table_types.Column(name="a", data=[2, 1, 3, 2, 3, 1]))
t.add_column(table_types.Column(name="b", data=[6, 5, 4, 3, 5, 4]))
if add_index:
t.add_index("a")
assert np.all(t.argsort(reverse=True) == np.array([4, 2, 0, 3, 1, 5]))
i0 = t.argsort("a", reverse=True)
i1 = np.array([4, 2, 3, 0, 5, 1])
assert np.all(t["a"][i0] == t["a"][i1])
i0 = t.argsort(["a", "b"], reverse=True)
i1 = np.array([4, 2, 0, 3, 1, 5])
assert np.all(t["a"][i0] == t["a"][i1])
assert np.all(t["b"][i0] == t["b"][i1])
def test_argsort_bytes(self, table_types):
t = table_types.Table()
t.add_column(
table_types.Column(name="firstname", data=[b"Max", b"Jo", b"John"])
)
t.add_column(
table_types.Column(name="name", data=[b"Miller", b"Miller", b"Jackson"])
)
t.add_column(table_types.Column(name="tel", data=[12, 15, 19]))
assert np.all(t.argsort(["name", "firstname"]) == np.array([2, 1, 0]))
def test_argsort_unicode(self, table_types):
# Before Numpy 1.6.2, sorting with multiple column names
# failed when a unicode column was present.
t = table_types.Table()
t.add_column(
table_types.Column(
name="firstname", data=[str(x) for x in ["Max", "Jo", "John"]]
)
)
t.add_column(
table_types.Column(
name="name", data=[str(x) for x in ["Miller", "Miller", "Jackson"]]
)
)
t.add_column(table_types.Column(name="tel", data=[12, 15, 19]))
assert np.all(t.argsort(["name", "firstname"]) == np.array([2, 1, 0]))
def test_rebuild_column_view_then_rename(self, table_types):
"""
Issue #2039 where renaming fails after any method that calls
_rebuild_table_column_view (this includes sort and add_row).
"""
t = table_types.Table([[1]], names=("a",))
assert t.colnames == ["a"]
assert t.dtype.names == ("a",)
t.add_row((2,))
assert t.colnames == ["a"]
assert t.dtype.names == ("a",)
t.rename_column("a", "b")
assert t.colnames == ["b"]
assert t.dtype.names == ("b",)
t.sort("b")
assert t.colnames == ["b"]
assert t.dtype.names == ("b",)
t.rename_column("b", "c")
assert t.colnames == ["c"]
assert t.dtype.names == ("c",)
@pytest.mark.parametrize("kwargs", [{}, {"kind": "stable"}, {"kind": "quicksort"}])
def test_sort_kind(kwargs):
t = Table()
t["a"] = [2, 1, 3, 2, 3, 1]
t["b"] = [6, 5, 4, 3, 5, 4]
t_struct = t.as_array()
# Since sort calls Table.argsort this covers `kind` for both methods
t.sort(["a", "b"], **kwargs)
assert np.all(t.as_array() == np.sort(t_struct, **kwargs))
@pytest.mark.usefixtures("table_types")
class TestIterator:
def test_iterator(self, table_types):
d = np.array(
[
(2, 1),
(3, 6),
(4, 5),
],
dtype=[("a", "i4"), ("b", "i4")],
)
t = table_types.Table(d)
if t.masked:
with pytest.raises(ValueError):
t[0] == d[0]
else:
for row, np_row in zip(t, d):
assert np.all(row == np_row)
@pytest.mark.usefixtures("table_types")
class TestSetMeta:
def test_set_meta(self, table_types):
d = table_types.Table(names=("a", "b"))
d.meta["a"] = 1
d.meta["b"] = 1
d.meta["c"] = 1
d.meta["d"] = 1
assert list(d.meta.keys()) == ["a", "b", "c", "d"]
@pytest.mark.usefixtures("table_types")
class TestConvertNumpyArray:
def test_convert_numpy_array(self, table_types):
d = table_types.Table([[1, 2], [3, 4]], names=("a", "b"))
np_data = np.array(d)
if table_types.Table is not MaskedTable:
assert np.all(np_data == d.as_array())
assert np_data is not d.as_array()
assert d.colnames == list(np_data.dtype.names)
np_data = np.array(d, copy=False)
if table_types.Table is not MaskedTable:
assert np.all(np_data == d.as_array())
assert d.colnames == list(np_data.dtype.names)
with pytest.raises(ValueError):
np_data = np.array(d, dtype=[("c", "i8"), ("d", "i8")])
def test_as_array_byteswap(self, table_types):
"""Test for https://github.com/astropy/astropy/pull/4080"""
byte_orders = (">", "<")
native_order = byte_orders[sys.byteorder == "little"]
for order in byte_orders:
col = table_types.Column([1.0, 2.0], name="a", dtype=order + "f8")
t = table_types.Table([col])
arr = t.as_array()
assert arr["a"].dtype.byteorder in (native_order, "=")
arr = t.as_array(keep_byteorder=True)
if order == native_order:
assert arr["a"].dtype.byteorder in (order, "=")
else:
assert arr["a"].dtype.byteorder == order
def test_byteswap_fits_array(self, table_types):
"""
Test for https://github.com/astropy/astropy/pull/4080, demonstrating
that FITS tables are converted to native byte order.
"""
non_native_order = (">", "<")[sys.byteorder != "little"]
filename = get_pkg_data_filename("data/tb.fits", "astropy.io.fits.tests")
t = table_types.Table.read(filename)
arr = t.as_array()
for idx in range(len(arr.dtype)):
assert arr.dtype[idx].byteorder != non_native_order
with fits.open(filename, character_as_bytes=True) as hdul:
data = hdul[1].data
for colname in data.columns.names:
assert np.all(data[colname] == arr[colname])
arr2 = t.as_array(keep_byteorder=True)
for colname in data.columns.names:
assert data[colname].dtype.byteorder == arr2[colname].dtype.byteorder
def test_convert_numpy_object_array(self, table_types):
d = table_types.Table([[1, 2], [3, 4]], names=("a", "b"))
# Single table
np_d = np.array(d, dtype=object)
assert isinstance(np_d, np.ndarray)
assert np_d[()] is d
def test_convert_list_numpy_object_array(self, table_types):
d = table_types.Table([[1, 2], [3, 4]], names=("a", "b"))
ds = [d, d, d]
np_ds = np.array(ds, dtype=object)
assert all(isinstance(t, table_types.Table) for t in np_ds)
assert all(np.array_equal(t, d) for t in np_ds)
def _assert_copies(t, t2, deep=True):
assert t.colnames == t2.colnames
np.testing.assert_array_equal(t.as_array(), t2.as_array())
assert t.meta == t2.meta
for col, col2 in zip(t.columns.values(), t2.columns.values()):
if deep:
assert not np.may_share_memory(col, col2)
else:
assert np.may_share_memory(col, col2)
def test_copy():
t = table.Table([[1, 2, 3], [2, 3, 4]], names=["x", "y"])
t2 = t.copy()
_assert_copies(t, t2)
def test_copy_masked():
t = table.Table(
[[1, 2, 3], [2, 3, 4]], names=["x", "y"], masked=True, meta={"name": "test"}
)
t["x"].mask == [True, False, True]
t2 = t.copy()
_assert_copies(t, t2)
def test_copy_protocol():
t = table.Table([[1, 2, 3], [2, 3, 4]], names=["x", "y"])
t2 = copy.copy(t)
t3 = copy.deepcopy(t)
_assert_copies(t, t2, deep=False)
_assert_copies(t, t3)
def test_disallow_inequality_comparisons():
"""
Regression test for #828 - disallow comparison operators on whole Table
"""
t = table.Table()
with pytest.raises(TypeError):
t > 2
with pytest.raises(TypeError):
t < 1.1
with pytest.raises(TypeError):
t >= 5.5
with pytest.raises(TypeError):
t <= -1.1
def test_values_equal_part1():
col1 = [1, 2]
col2 = [1.0, 2.0]
col3 = ["a", "b"]
t1 = table.Table([col1, col2, col3], names=["a", "b", "c"])
t2 = table.Table([col1, col2], names=["a", "b"])
t3 = table.table_helpers.simple_table()
tm = t1.copy()
tm["time"] = Time([1, 2], format="cxcsec")
tm1 = tm.copy()
tm1["time"][0] = np.ma.masked
tq = table.table_helpers.simple_table()
tq["quantity"] = [1.0, 2.0, 3.0] * u.m
tsk = table.table_helpers.simple_table()
tsk["sk"] = SkyCoord(1, 2, unit="deg")
eqsk = tsk.values_equal(tsk)
for col in eqsk.itercols():
assert np.all(col)
with pytest.raises(
ValueError, match="cannot compare tables with different column names"
):
t2.values_equal(t1)
with pytest.raises(ValueError, match="unable to compare column a"):
# Shape mismatch
t3.values_equal(t1)
if NUMPY_LT_1_25:
with pytest.raises(ValueError, match="unable to compare column c"):
# Type mismatch in column c causes FutureWarning
t1.values_equal(2)
with pytest.raises(ValueError, match="unable to compare column c"):
t1.values_equal([1, 2])
else:
eq = t2.values_equal(2)
for col in eq.colnames:
assert np.all(eq[col] == [False, True])
eq = t2.values_equal([1, 2])
for col in eq.colnames:
assert np.all(eq[col] == [True, True])
eq = t2.values_equal(t2)
for col in eq.colnames:
assert np.all(eq[col] == [True, True])
eq1 = tm1.values_equal(tm)
for col in eq1.colnames:
assert np.all(eq1[col] == [True, True])
eq2 = tq.values_equal(tq)
for col in eq2.colnames:
assert np.all(eq2[col] == [True, True, True])
eq3 = t2.values_equal(2)
for col in eq3.colnames:
assert np.all(eq3[col] == [False, True])
eq4 = t2.values_equal([1, 2])
for col in eq4.colnames:
assert np.all(eq4[col] == [True, True])
# Compare table to its first row
t = table.Table(rows=[(1, "a"), (1, "b")])
eq = t.values_equal(t[0])
assert np.all(eq["col0"] == [True, True])
assert np.all(eq["col1"] == [True, False])
def test_rows_equal():
t = table.Table.read(
[
" a b c d",
" 2 c 7.0 0",
" 2 b 5.0 1",
" 2 b 6.0 2",
" 2 a 4.0 3",
" 0 a 0.0 4",
" 1 b 3.0 5",
" 1 a 2.0 6",
" 1 a 1.0 7",
],
format="ascii",
)
# All rows are equal
assert np.all(t == t)
# Assert no rows are different
assert not np.any(t != t)
# Check equality result for a given row
assert np.all((t == t[3]) == np.array([0, 0, 0, 1, 0, 0, 0, 0], dtype=bool))
# Check inequality result for a given row
assert np.all((t != t[3]) == np.array([1, 1, 1, 0, 1, 1, 1, 1], dtype=bool))
t2 = table.Table.read(
[
" a b c d",
" 2 c 7.0 0",
" 2 b 5.0 1",
" 3 b 6.0 2",
" 2 a 4.0 3",
" 0 a 1.0 4",
" 1 b 3.0 5",
" 1 c 2.0 6",
" 1 a 1.0 7",
],
format="ascii",
)
# In the above cases, Row.__eq__ gets called, but now need to make sure
# Table.__eq__ also gets called.
assert np.all((t == t2) == np.array([1, 1, 0, 1, 0, 1, 0, 1], dtype=bool))
assert np.all((t != t2) == np.array([0, 0, 1, 0, 1, 0, 1, 0], dtype=bool))
# Check that comparing to a structured array works
assert np.all(
(t == t2.as_array()) == np.array([1, 1, 0, 1, 0, 1, 0, 1], dtype=bool)
)
assert np.all(
(t.as_array() == t2) == np.array([1, 1, 0, 1, 0, 1, 0, 1], dtype=bool)
)
def test_equality_masked():
t = table.Table.read(
[
" a b c d",
" 2 c 7.0 0",
" 2 b 5.0 1",
" 2 b 6.0 2",
" 2 a 4.0 3",
" 0 a 0.0 4",
" 1 b 3.0 5",
" 1 a 2.0 6",
" 1 a 1.0 7",
],
format="ascii",
)
# Make into masked table
t = table.Table(t, masked=True)
# All rows are equal
assert np.all(t == t)
# Assert no rows are different
assert not np.any(t != t)
# Check equality result for a given row
assert np.all((t == t[3]) == np.array([0, 0, 0, 1, 0, 0, 0, 0], dtype=bool))
# Check inequality result for a given row
assert np.all((t != t[3]) == np.array([1, 1, 1, 0, 1, 1, 1, 1], dtype=bool))
t2 = table.Table.read(
[
" a b c d",
" 2 c 7.0 0",
" 2 b 5.0 1",
" 3 b 6.0 2",
" 2 a 4.0 3",
" 0 a 1.0 4",
" 1 b 3.0 5",
" 1 c 2.0 6",
" 1 a 1.0 7",
],
format="ascii",
)
# In the above cases, Row.__eq__ gets called, but now need to make sure
# Table.__eq__ also gets called.
assert np.all((t == t2) == np.array([1, 1, 0, 1, 0, 1, 0, 1], dtype=bool))
assert np.all((t != t2) == np.array([0, 0, 1, 0, 1, 0, 1, 0], dtype=bool))
# Check that masking a value causes the row to differ
t.mask["a"][0] = True
assert np.all((t == t2) == np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=bool))
assert np.all((t != t2) == np.array([1, 0, 1, 0, 1, 0, 1, 0], dtype=bool))
# Check that comparing to a structured array works
assert np.all(
(t == t2.as_array()) == np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=bool)
)
@pytest.mark.xfail
def test_equality_masked_bug():
"""
This highlights a Numpy bug. Once it works, it can be moved into the
test_equality_masked test. Related Numpy bug report:
https://github.com/numpy/numpy/issues/3840
"""
t = table.Table.read(
[
" a b c d",
" 2 c 7.0 0",
" 2 b 5.0 1",
" 2 b 6.0 2",
" 2 a 4.0 3",
" 0 a 0.0 4",
" 1 b 3.0 5",
" 1 a 2.0 6",
" 1 a 1.0 7",
],
format="ascii",
)
t = table.Table(t, masked=True)
t2 = table.Table.read(
[
" a b c d",
" 2 c 7.0 0",
" 2 b 5.0 1",
" 3 b 6.0 2",
" 2 a 4.0 3",
" 0 a 1.0 4",
" 1 b 3.0 5",
" 1 c 2.0 6",
" 1 a 1.0 7",
],
format="ascii",
)
assert np.all(
(t.as_array() == t2) == np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=bool)
)
# Check that the meta descriptor is working as expected. The MetaBaseTest class
# takes care of defining all the tests, and we simply have to define the class
# and any minimal set of args to pass.
class TestMetaTable(MetaBaseTest):
test_class = table.Table
args = ()
def test_unicode_content():
# If we don't have unicode literals then return
if isinstance("", bytes):
return
# Define unicode literals
string_a = "астрономическая питона"
string_b = "миллиарды световых лет"
a = table.Table([[string_a, 2], [string_b, 3]], names=("a", "b"))
assert string_a in str(a)
# This only works because the coding of this file is utf-8, which
# matches the default encoding of Table.__str__
assert string_a.encode("utf-8") in bytes(a)
def test_unicode_policy():
t = table.Table.read(
[
" a b c d",
" 2 c 7.0 0",
" 2 b 5.0 1",
" 2 b 6.0 2",
" 2 a 4.0 3",
" 0 a 0.0 4",
" 1 b 3.0 5",
" 1 a 2.0 6",
" 1 a 1.0 7",
],
format="ascii",
)
assert_follows_unicode_guidelines(t)
@pytest.mark.parametrize("uni", ["питона", "ascii"])
def test_unicode_bytestring_conversion(table_types, uni):
"""
Test converting columns to all unicode or all bytestring. This
makes two columns, one which is unicode (str in Py3) and one which
is bytes (UTF-8 encoded). There are two code paths in the conversions,
a faster one where the data are actually ASCII and a slower one where
UTF-8 conversion is required. This tests both via the ``uni`` param.
"""
byt = uni.encode("utf-8")
t = table_types.Table([[byt], [uni], [1]], dtype=("S", "U", "i"))
assert t["col0"].dtype.kind == "S"
assert t["col1"].dtype.kind == "U"
assert t["col2"].dtype.kind == "i"
t["col0"].description = "col0"
t["col1"].description = "col1"
t["col0"].meta["val"] = "val0"
t["col1"].meta["val"] = "val1"
# Unicode to bytestring
t1 = t.copy()
t1.convert_unicode_to_bytestring()
assert t1["col0"].dtype.kind == "S"
assert t1["col1"].dtype.kind == "S"
assert t1["col2"].dtype.kind == "i"
# Meta made it through
assert t1["col0"].description == "col0"
assert t1["col1"].description == "col1"
assert t1["col0"].meta["val"] == "val0"
assert t1["col1"].meta["val"] == "val1"
# Need to de-fang the automatic unicode sandwiching of Table
assert np.array(t1["col0"])[0] == byt
assert np.array(t1["col1"])[0] == byt
assert np.array(t1["col2"])[0] == 1
# Bytestring to unicode
t1 = t.copy()
t1.convert_bytestring_to_unicode()
assert t1["col0"].dtype.kind == "U"
assert t1["col1"].dtype.kind == "U"
assert t1["col2"].dtype.kind == "i"
# Meta made it through
assert t1["col0"].description == "col0"
assert t1["col1"].description == "col1"
assert t1["col0"].meta["val"] == "val0"
assert t1["col1"].meta["val"] == "val1"
# No need to de-fang the automatic unicode sandwiching of Table here, but
# do just for consistency to prove things are working.
assert np.array(t1["col0"])[0] == uni
assert np.array(t1["col1"])[0] == uni
assert np.array(t1["col2"])[0] == 1
def test_table_deletion():
"""
Regression test for the reference cycle discussed in
https://github.com/astropy/astropy/issues/2877
"""
deleted = set()
# A special table subclass which leaves a record when it is finalized
class TestTable(table.Table):
def __del__(self):
deleted.add(id(self))
t = TestTable({"a": [1, 2, 3]})
the_id = id(t)
assert t["a"].parent_table is t
del t
# Cleanup
gc.collect()
assert the_id in deleted
def test_nested_iteration():
"""
Regression test for issue 3358 where nested iteration over a single table fails.
"""
t = table.Table([[0, 1]], names=["a"])
out = []
for r1 in t:
for r2 in t:
out.append((r1["a"], r2["a"]))
assert out == [(0, 0), (0, 1), (1, 0), (1, 1)]
def test_table_init_from_degenerate_arrays(table_types):
t = table_types.Table(np.array([]))
assert len(t.columns) == 0
with pytest.raises(ValueError):
t = table_types.Table(np.array(0))
t = table_types.Table(np.array([1, 2, 3]))
assert len(t.columns) == 3
@pytest.mark.skipif(not HAS_PANDAS, reason="requires pandas")
class TestPandas:
def test_simple(self):
t = table.Table()
for endian in ["<", ">", "="]:
for kind in ["f", "i"]:
for byte in ["2", "4", "8"]:
dtype = np.dtype(endian + kind + byte)
x = np.array([1, 2, 3], dtype=dtype)
t[endian + kind + byte] = x.newbyteorder(endian)
t["u"] = ["a", "b", "c"]
t["s"] = ["a", "b", "c"]
d = t.to_pandas()
for column in t.columns:
if column == "u":
assert np.all(t["u"] == np.array(["a", "b", "c"]))
assert d[column].dtype == np.dtype("O") # upstream feature of pandas
elif column == "s":
assert np.all(t["s"] == np.array(["a", "b", "c"]))
assert d[column].dtype == np.dtype("O") # upstream feature of pandas
else:
# We should be able to compare exact values here
assert np.all(t[column] == d[column])
if t[column].dtype.isnative:
assert d[column].dtype == t[column].dtype
else:
assert d[column].dtype == t[column].byteswap().newbyteorder().dtype
# Regression test for astropy/astropy#1156 - the following code gave a
# ValueError: Big-endian buffer not supported on little-endian
# compiler. We now automatically swap the endian-ness to native order
# upon adding the arrays to the data frame.
# Explicitly testing little/big/native endian separately -
# regression for a case in astropy/astropy#11286 not caught by #3729.
d[["<i4", ">i4"]]
d[["<f4", ">f4"]]
t2 = table.Table.from_pandas(d)
for column in t.columns:
if column in ("u", "s"):
assert np.all(t[column] == t2[column])
else:
assert_allclose(t[column], t2[column])
if t[column].dtype.isnative:
assert t[column].dtype == t2[column].dtype
else:
assert t[column].byteswap().newbyteorder().dtype == t2[column].dtype
@pytest.mark.parametrize("unsigned", ["u", ""])
@pytest.mark.parametrize("bits", [8, 16, 32, 64])
def test_nullable_int(self, unsigned, bits):
np_dtype = f"{unsigned}int{bits}"
c = MaskedColumn([1, 2], mask=[False, True], dtype=np_dtype)
t = Table([c])
df = t.to_pandas()
pd_dtype = np_dtype.replace("i", "I").replace("u", "U")
assert str(df["col0"].dtype) == pd_dtype
t2 = Table.from_pandas(df)
assert str(t2["col0"].dtype) == np_dtype
assert np.all(t2["col0"].mask == [False, True])
assert np.all(t2["col0"] == c)
def test_2d(self):
t = table.Table()
t["a"] = [1, 2, 3]
t["b"] = np.ones((3, 2))
with pytest.raises(
ValueError, match="Cannot convert a table with multidimensional columns"
):
t.to_pandas()
def test_mixin_pandas(self):
t = table.QTable()
for name in sorted(MIXIN_COLS):
if not name.startswith("ndarray"):
t[name] = MIXIN_COLS[name]
t["dt"] = TimeDelta([0, 2, 4, 6], format="sec")
tp = t.to_pandas()
t2 = table.Table.from_pandas(tp)
assert np.allclose(t2["quantity"], [0, 1, 2, 3])
assert np.allclose(t2["longitude"], [0.0, 1.0, 5.0, 6.0])
assert np.allclose(t2["latitude"], [5.0, 6.0, 10.0, 11.0])
assert np.allclose(t2["skycoord.ra"], [0, 1, 2, 3])
assert np.allclose(t2["skycoord.dec"], [0, 1, 2, 3])
assert np.allclose(t2["arraywrap"], [0, 1, 2, 3])
assert np.allclose(t2["arrayswap"], [0, 1, 2, 3])
assert np.allclose(
t2["earthlocation.y"], [0, 110708, 547501, 654527], rtol=0, atol=1
)
# For pandas, Time, TimeDelta are the mixins that round-trip the class
assert isinstance(t2["time"], Time)
assert np.allclose(t2["time"].jyear, [2000, 2001, 2002, 2003])
assert np.all(
t2["time"].isot
== [
"2000-01-01T12:00:00.000",
"2000-12-31T18:00:00.000",
"2002-01-01T00:00:00.000",
"2003-01-01T06:00:00.000",
]
)
assert t2["time"].format == "isot"
# TimeDelta
assert isinstance(t2["dt"], TimeDelta)
assert np.allclose(t2["dt"].value, [0, 2, 4, 6])
assert t2["dt"].format == "sec"
@pytest.mark.parametrize("use_IndexedTable", [False, True])
def test_to_pandas_index(self, use_IndexedTable):
"""Test to_pandas() with different indexing options.
This also tests the fix for #12014. The exception seen there is
reproduced here without the fix.
"""
import pandas as pd
class IndexedTable(table.QTable):
"""Always index the first column"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.add_index(self.colnames[0])
row_index = pd.RangeIndex(0, 2, 1)
tm_index = pd.DatetimeIndex(
["1998-01-01", "2002-01-01"], dtype="datetime64[ns]", name="tm", freq=None
)
tm = Time([1998, 2002], format="jyear")
x = [1, 2]
table_cls = IndexedTable if use_IndexedTable else table.QTable
t = table_cls([tm, x], names=["tm", "x"])
tp = t.to_pandas()
if not use_IndexedTable:
assert np.all(tp.index == row_index)
tp = t.to_pandas(index="tm")
assert np.all(tp.index == tm_index)
t.add_index("tm")
tp = t.to_pandas()
assert np.all(tp.index == tm_index)
# Make sure writing to pandas didn't hack the original table
assert t["tm"].info.indices
tp = t.to_pandas(index=True)
assert np.all(tp.index == tm_index)
tp = t.to_pandas(index=False)
assert np.all(tp.index == row_index)
with pytest.raises(ValueError) as err:
t.to_pandas(index="not a column")
assert "index must be None, False" in str(err.value)
def test_mixin_pandas_masked(self):
tm = Time([1, 2, 3], format="cxcsec")
dt = TimeDelta([1, 2, 3], format="sec")
tm[1] = np.ma.masked
dt[1] = np.ma.masked
t = table.QTable([tm, dt], names=["tm", "dt"])
tp = t.to_pandas()
assert np.all(tp["tm"].isnull() == [False, True, False])
assert np.all(tp["dt"].isnull() == [False, True, False])
t2 = table.Table.from_pandas(tp)
assert np.all(t2["tm"].mask == tm.mask)
assert np.ma.allclose(t2["tm"].jd, tm.jd, rtol=1e-14, atol=1e-14)
assert np.all(t2["dt"].mask == dt.mask)
assert np.ma.allclose(t2["dt"].jd, dt.jd, rtol=1e-14, atol=1e-14)
def test_from_pandas_index(self):
tm = Time([1998, 2002], format="jyear")
x = [1, 2]
t = table.Table([tm, x], names=["tm", "x"])
tp = t.to_pandas(index="tm")
t2 = table.Table.from_pandas(tp)
assert t2.colnames == ["x"]
t2 = table.Table.from_pandas(tp, index=True)
assert t2.colnames == ["tm", "x"]
assert np.allclose(t2["tm"].jyear, tm.jyear)
@pytest.mark.parametrize("use_nullable_int", [True, False])
def test_masking(self, use_nullable_int):
t = table.Table(masked=True)
t["a"] = [1, 2, 3]
t["a"].mask = [True, False, True]
t["b"] = [1.0, 2.0, 3.0]
t["b"].mask = [False, False, True]
t["u"] = ["a", "b", "c"]
t["u"].mask = [False, True, False]
t["s"] = ["a", "b", "c"]
t["s"].mask = [False, True, False]
# https://github.com/astropy/astropy/issues/7741
t["Source"] = [2584290278794471936, 2584290038276303744, 2584288728310999296]
t["Source"].mask = [False, False, False]
if use_nullable_int: # Default
# No warning with the default use_nullable_int=True
d = t.to_pandas(use_nullable_int=use_nullable_int)
else:
import pandas
from packaging.version import Version
PANDAS_LT_2_0 = Version(pandas.__version__) < Version("2.0dev")
if PANDAS_LT_2_0:
with pytest.warns(
TableReplaceWarning,
match=r"converted column 'a' from int(32|64) to float64",
):
d = t.to_pandas(use_nullable_int=use_nullable_int)
else:
from pandas.core.dtypes.cast import IntCastingNaNError
with pytest.raises(
IntCastingNaNError,
match=r"Cannot convert non-finite values \(NA or inf\) to integer",
):
d = t.to_pandas(use_nullable_int=use_nullable_int)
return # Do not continue
t2 = table.Table.from_pandas(d)
for name, column in t.columns.items():
assert np.all(column.data == t2[name].data)
if hasattr(t2[name], "mask"):
assert np.all(column.mask == t2[name].mask)
if column.dtype.kind == "i":
if np.any(column.mask) and not use_nullable_int:
assert t2[name].dtype.kind == "f"
else:
assert t2[name].dtype.kind == "i"
# This warning pops up when use_nullable_int is False
# for pandas 1.5.2.
with np.errstate(invalid="ignore"):
assert_array_equal(column.data, t2[name].data.astype(column.dtype))
else:
if column.dtype.byteorder in ("=", "|"):
assert column.dtype == t2[name].dtype
else:
assert column.byteswap().newbyteorder().dtype == t2[name].dtype
def test_units(self):
import pandas as pd
import astropy.units as u
df = pd.DataFrame({"x": [1, 2, 3], "t": [1.3, 1.2, 1.8]})
t = table.Table.from_pandas(df, units={"x": u.m, "t": u.s})
assert t["x"].unit == u.m
assert t["t"].unit == u.s
# test error if not a mapping
with pytest.raises(TypeError):
table.Table.from_pandas(df, units=[u.m, u.s])
# test warning is raised if additional columns in units dict
with pytest.warns(UserWarning) as record:
table.Table.from_pandas(df, units={"x": u.m, "t": u.s, "y": u.m})
assert len(record) == 1
assert "{'y'}" in record[0].message.args[0]
def test_to_pandas_masked_int_data_with__index(self):
data = {"data": [0, 1, 2], "index": [10, 11, 12]}
t = table.Table(data=data, masked=True)
t.add_index("index")
t["data"].mask = [1, 1, 0]
df = t.to_pandas()
assert df["data"].iloc[-1] == 2
@pytest.mark.usefixtures("table_types")
class TestReplaceColumn(SetupData):
def test_fail_replace_column(self, table_types):
"""Raise exception when trying to replace column via table.columns object"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
with pytest.raises(
ValueError,
match=r"Cannot replace column 'a'. Use " "Table.replace_column.. instead.",
):
t.columns["a"] = [1, 2, 3]
with pytest.raises(
ValueError, match=r"column name not there is not in the table"
):
t.replace_column("not there", [1, 2, 3])
with pytest.raises(
ValueError, match=r"length of new column must match table length"
):
t.replace_column("a", [1, 2])
def test_replace_column(self, table_types):
"""Replace existing column with a new column"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
ta = t["a"]
tb = t["b"]
vals = [1.2, 3.4, 5.6]
for col in (
vals,
table_types.Column(vals),
table_types.Column(vals, name="a"),
table_types.Column(vals, name="b"),
):
t.replace_column("a", col)
assert np.all(t["a"] == vals)
assert t["a"] is not ta # New a column
assert t["b"] is tb # Original b column unchanged
assert t.colnames == ["a", "b"]
assert t["a"].meta == {}
assert t["a"].format is None
# Special case: replacing the only column can resize table
del t["b"]
assert len(t) == 3
t["a"] = [1, 2]
assert len(t) == 2
def test_replace_index_column(self, table_types):
"""Replace index column and generate expected exception"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.add_index("a")
with pytest.raises(ValueError) as err:
t.replace_column("a", [1, 2, 3])
assert err.value.args[0] == "cannot replace a table index column"
def test_replace_column_no_copy(self):
t = Table([[1, 2], [3, 4]], names=["a", "b"])
a = np.array([1.5, 2.5])
t.replace_column("a", a, copy=False)
assert t["a"][0] == a[0]
t["a"][0] = 10
assert t["a"][0] == a[0]
class TestQTableColumnConversionCornerCases:
def test_replace_with_masked_col_with_units_in_qtable(self):
"""This is a small regression from #8902"""
t = QTable([[1, 2], [3, 4]], names=["a", "b"])
t["a"] = MaskedColumn([5, 6], unit="m")
assert isinstance(t["a"], u.Quantity)
def test_do_not_replace_string_column_with_units_in_qtable(self):
t = QTable([[1 * u.m]])
with pytest.warns(AstropyUserWarning, match="convert it to Quantity failed"):
t["a"] = Column(["a"], unit=u.m)
assert isinstance(t["a"], Column)
class Test__Astropy_Table__:
"""
Test initializing a Table subclass from a table-like object that
implements the __astropy_table__ interface method.
"""
class SimpleTable:
def __init__(self):
self.columns = [[1, 2, 3], [4, 5, 6], [7, 8, 9] * u.m]
self.names = ["a", "b", "c"]
self.meta = OrderedDict([("a", 1), ("b", 2)])
def __astropy_table__(self, cls, copy, **kwargs):
a, b, c = self.columns
c.info.name = "c"
cols = [table.Column(a, name="a"), table.MaskedColumn(b, name="b"), c]
names = [col.info.name for col in cols]
return cls(cols, names=names, copy=copy, meta=kwargs or self.meta)
def test_simple_1(self):
"""Make a SimpleTable and convert to Table, QTable with copy=False, True"""
for table_cls in (table.Table, table.QTable):
col_c_class = u.Quantity if table_cls is table.QTable else table.Column
for cpy in (False, True):
st = self.SimpleTable()
# Test putting in a non-native kwarg `extra_meta` to Table initializer
t = table_cls(st, copy=cpy, extra_meta="extra!")
assert t.colnames == ["a", "b", "c"]
assert t.meta == {"extra_meta": "extra!"}
assert np.all(t["a"] == st.columns[0])
assert np.all(t["b"] == st.columns[1])
vals = t["c"].value if table_cls is table.QTable else t["c"]
assert np.all(st.columns[2].value == vals)
assert isinstance(t["a"], table.Column)
assert isinstance(t["b"], table.MaskedColumn)
assert isinstance(t["c"], col_c_class)
assert t["c"].unit is u.m
assert type(t) is table_cls
# Copy being respected?
t["a"][0] = 10
assert st.columns[0][0] == 1 if cpy else 10
def test_simple_2(self):
"""Test converting a SimpleTable and changing column names and types"""
st = self.SimpleTable()
dtypes = [np.int32, np.float32, np.float16]
names = ["a", "b", "c"]
meta = OrderedDict([("c", 3)])
t = table.Table(st, dtype=dtypes, names=names, meta=meta)
assert t.colnames == names
assert all(
col.dtype.type is dtype for col, dtype in zip(t.columns.values(), dtypes)
)
# The supplied meta is overrides the existing meta. Changed in astropy 3.2.
assert t.meta != st.meta
assert t.meta == meta
def test_kwargs_exception(self):
"""If extra kwargs provided but without initializing with a table-like
object, exception is raised"""
with pytest.raises(TypeError) as err:
table.Table([[1]], extra_meta="extra!")
assert "__init__() got unexpected keyword argument" in str(err.value)
class TestUpdate:
def _setup(self):
self.a = Column((1, 2, 3), name="a")
self.b = Column((4, 5, 6), name="b")
self.c = Column((7, 8, 9), name="c")
self.d = Column((10, 11, 12), name="d")
def test_different_lengths(self):
self._setup()
t1 = Table([self.a])
t2 = Table([self.b[:-1]])
msg = "Inconsistent data column lengths"
with pytest.raises(ValueError, match=msg):
t1.update(t2)
# If update didn't succeed then t1 and t2 should not have changed.
assert t1.colnames == ["a"]
assert np.all(t1["a"] == self.a)
assert t2.colnames == ["b"]
assert np.all(t2["b"] == self.b[:-1])
def test_invalid_inputs(self):
# If input is invalid then nothing should be modified.
self._setup()
t = Table([self.a])
d = {"b": self.b, "c": [0]}
msg = "Inconsistent data column lengths: {1, 3}"
with pytest.raises(ValueError, match=msg):
t.update(d)
assert t.colnames == ["a"]
assert np.all(t["a"] == self.a)
assert d == {"b": self.b, "c": [0]}
def test_metadata_conflict(self):
self._setup()
t1 = Table([self.a], meta={"a": 0, "b": [0], "c": True})
t2 = Table([self.b], meta={"a": 1, "b": [1]})
t2meta = copy.deepcopy(t2.meta)
t1.update(t2)
assert t1.meta == {"a": 1, "b": [0, 1], "c": True}
# t2 metadata should not have changed.
assert t2.meta == t2meta
def test_update(self):
self._setup()
t1 = Table([self.a, self.b])
t2 = Table([self.b, self.c])
t2["b"] += 1
t1.update(t2)
assert t1.colnames == ["a", "b", "c"]
assert np.all(t1["a"] == self.a)
assert np.all(t1["b"] == self.b + 1)
assert np.all(t1["c"] == self.c)
# t2 should not have changed.
assert t2.colnames == ["b", "c"]
assert np.all(t2["b"] == self.b + 1)
assert np.all(t2["c"] == self.c)
d = {"b": list(self.b), "d": list(self.d)}
dc = copy.deepcopy(d)
t2.update(d)
assert t2.colnames == ["b", "c", "d"]
assert np.all(t2["b"] == self.b)
assert np.all(t2["c"] == self.c)
assert np.all(t2["d"] == self.d)
# d should not have changed.
assert d == dc
# Columns were copied, so changing t2 shouldn't have affected t1.
assert t1.colnames == ["a", "b", "c"]
assert np.all(t1["a"] == self.a)
assert np.all(t1["b"] == self.b + 1)
assert np.all(t1["c"] == self.c)
def test_update_without_copy(self):
self._setup()
t1 = Table([self.a, self.b])
t2 = Table([self.b, self.c])
t1.update(t2, copy=False)
t2["b"] -= 1
assert t1.colnames == ["a", "b", "c"]
assert np.all(t1["a"] == self.a)
assert np.all(t1["b"] == self.b - 1)
assert np.all(t1["c"] == self.c)
d = {"b": np.array(self.b), "d": np.array(self.d)}
t2.update(d, copy=False)
d["b"] *= 2
assert t2.colnames == ["b", "c", "d"]
assert np.all(t2["b"] == 2 * self.b)
assert np.all(t2["c"] == self.c)
assert np.all(t2["d"] == self.d)
def test_merge_operator(self):
self._setup()
t1 = Table([self.a, self.b])
t2 = Table([self.b, self.c])
with pytest.raises(TypeError):
_ = 1 | t1
with pytest.raises(TypeError):
_ = t1 | 1
t1_copy = t1.copy(True)
t3 = t1 | t2
assert t1.colnames == ["a", "b"] # t1 should remain unchanged
assert np.all(t1["a"] == self.a)
assert np.all(t1["b"] == self.b)
t1_copy.update(t2)
assert t3.colnames == ["a", "b", "c"]
assert np.all(t3["a"] == t1_copy["a"])
assert np.all(t3["b"] == t1_copy["b"])
assert np.all(t3["c"] == t1_copy["c"])
def test_update_operator(self):
self._setup()
t1 = Table([self.a, self.b])
t2 = Table([self.b, self.c])
with pytest.raises(ValueError):
t1 |= 1
t1_copy = t1.copy(True)
t1 |= t2
t1_copy.update(t2)
assert t1.colnames == ["a", "b", "c"]
assert np.all(t1["a"] == t1_copy["a"])
assert np.all(t1["b"] == t1_copy["b"])
assert np.all(t1["c"] == t1_copy["c"])
def test_table_meta_copy():
"""
Test no copy vs light (key) copy vs deep copy of table meta for different
situations. #8404.
"""
t = table.Table([[1]])
meta = {1: [1, 2]}
# Assigning meta directly implies using direct object reference
t.meta = meta
assert t.meta is meta
# Table slice implies key copy, so values are unchanged
t2 = t[:]
assert t2.meta is not t.meta # NOT the same OrderedDict object but equal
assert t2.meta == t.meta
assert t2.meta[1] is t.meta[1] # Value IS the list same object
# Table init with copy=False implies key copy
t2 = table.Table(t, copy=False)
assert t2.meta is not t.meta # NOT the same OrderedDict object but equal
assert t2.meta == t.meta
assert t2.meta[1] is t.meta[1] # Value IS the same list object
# Table init with copy=True implies deep copy
t2 = table.Table(t, copy=True)
assert t2.meta is not t.meta # NOT the same OrderedDict object but equal
assert t2.meta == t.meta
assert t2.meta[1] is not t.meta[1] # Value is NOT the same list object
def test_table_meta_copy_with_meta_arg():
"""
Test no copy vs light (key) copy vs deep copy of table meta when meta is
supplied as a table init argument. #8404.
"""
meta = {1: [1, 2]}
meta2 = {2: [3, 4]}
t = table.Table([[1]], meta=meta, copy=False)
assert t.meta is meta
t = table.Table([[1]], meta=meta) # default copy=True
assert t.meta is not meta
assert t.meta == meta
# Test initializing from existing table with meta with copy=False
t2 = table.Table(t, meta=meta2, copy=False)
assert t2.meta is meta2
assert t2.meta != t.meta # Change behavior in #8404
# Test initializing from existing table with meta with default copy=True
t2 = table.Table(t, meta=meta2)
assert t2.meta is not meta2
assert t2.meta != t.meta # Change behavior in #8404
# Table init with copy=True and empty dict meta gets that empty dict
t2 = table.Table(t, copy=True, meta={})
assert t2.meta == {}
# Table init with copy=True and kwarg meta=None gets the original table dict.
# This is a somewhat ambiguous case because it could be interpreted as the
# user wanting NO meta set on the output. This could be implemented by inspecting
# call args.
t2 = table.Table(t, copy=True, meta=None)
assert t2.meta == t.meta
# Test initializing empty table with meta with copy=False
t = table.Table(meta=meta, copy=False)
assert t.meta is meta
assert t.meta[1] is meta[1]
# Test initializing empty table with meta with default copy=True (deepcopy meta)
t = table.Table(meta=meta)
assert t.meta is not meta
assert t.meta == meta
assert t.meta[1] is not meta[1]
def test_replace_column_qtable():
"""Replace existing Quantity column with a new column in a QTable"""
a = [1, 2, 3] * u.m
b = [4, 5, 6]
t = table.QTable([a, b], names=["a", "b"])
ta = t["a"]
tb = t["b"]
ta.info.meta = {"aa": [0, 1, 2, 3, 4]}
ta.info.format = "%f"
t.replace_column("a", a.to("cm"))
assert np.all(t["a"] == ta)
assert t["a"] is not ta # New a column
assert t["b"] is tb # Original b column unchanged
assert t.colnames == ["a", "b"]
assert t["a"].info.meta is None
assert t["a"].info.format is None
def test_replace_update_column_via_setitem():
"""
Test table update like ``t['a'] = value``. This leverages off the
already well-tested ``replace_column`` and in-place update
``t['a'][:] = value``, so this testing is fairly light.
"""
a = [1, 2] * u.m
b = [3, 4]
t = table.QTable([a, b], names=["a", "b"])
assert isinstance(t["a"], u.Quantity)
# Inplace update
ta = t["a"]
t["a"] = 5 * u.m
assert np.all(t["a"] == [5, 5] * u.m)
assert t["a"] is ta
# Replace
t["a"] = [5, 6]
assert np.all(t["a"] == [5, 6])
assert isinstance(t["a"], table.Column)
assert t["a"] is not ta
def test_replace_update_column_via_setitem_warnings_normal():
"""
Test warnings related to table replace change in #5556:
Normal warning-free replace
"""
t = table.Table([[1, 2, 3], [4, 5, 6]], names=["a", "b"])
with table.conf.set_temp("replace_warnings", ["refcount", "attributes", "slice"]):
t["a"] = 0 # in-place update
t["a"] = [10, 20, 30] # replace column
def test_replace_update_column_via_setitem_warnings_slice():
"""
Test warnings related to table replace change in #5556:
Replace a slice, one warning.
"""
t = table.Table([[1, 2, 3], [4, 5, 6]], names=["a", "b"])
with table.conf.set_temp("replace_warnings", ["refcount", "attributes", "slice"]):
t2 = t[:2]
t2["a"] = 0 # in-place slice update
assert np.all(t["a"] == [0, 0, 3])
with pytest.warns(
TableReplaceWarning,
match="replaced column 'a' which looks like an array slice",
) as w:
t2["a"] = [10, 20] # replace slice
assert len(w) == 1
def test_replace_update_column_via_setitem_warnings_attributes():
"""
Test warnings related to table replace change in #5556:
Lost attributes.
"""
t = table.Table([[1, 2, 3], [4, 5, 6]], names=["a", "b"])
t["a"].unit = "m"
with pytest.warns(
TableReplaceWarning,
match=r"replaced column 'a' " r"and column attributes \['unit'\]",
) as w:
with table.conf.set_temp(
"replace_warnings", ["refcount", "attributes", "slice"]
):
t["a"] = [10, 20, 30]
assert len(w) == 1
def test_replace_update_column_via_setitem_warnings_refcount():
"""
Test warnings related to table replace change in #5556:
Reference count changes.
"""
t = table.Table([[1, 2, 3], [4, 5, 6]], names=["a", "b"])
ta = t["a"] # Generate an extra reference to original column
with pytest.warns(
TableReplaceWarning, match="replaced column 'a' and the number of references"
) as w:
with table.conf.set_temp(
"replace_warnings", ["refcount", "attributes", "slice"]
):
t["a"] = [10, 20, 30]
assert len(w) == 1
def test_replace_update_column_via_setitem_warnings_always():
"""
Test warnings related to table replace change in #5556:
Test 'always' setting that raises warning for any replace.
"""
from inspect import currentframe, getframeinfo
t = table.Table([[1, 2, 3], [4, 5, 6]], names=["a", "b"])
with table.conf.set_temp("replace_warnings", ["always"]):
t["a"] = 0 # in-place slice update
with pytest.warns(TableReplaceWarning, match="replaced column 'a'") as w:
frameinfo = getframeinfo(currentframe())
t["a"] = [10, 20, 30] # replace column
assert len(w) == 1
# Make sure the warning points back to the user code line
assert w[0].lineno == frameinfo.lineno + 1
assert "test_table" in w[0].filename
def test_replace_update_column_via_setitem_replace_inplace():
"""
Test the replace_inplace config option related to #5556. In this
case no replace is done.
"""
t = table.Table([[1, 2, 3], [4, 5, 6]], names=["a", "b"])
ta = t["a"]
t["a"].unit = "m"
with table.conf.set_temp("replace_inplace", True):
with table.conf.set_temp(
"replace_warnings", ["always", "refcount", "attributes", "slice"]
):
t["a"] = 0 # in-place update
assert ta is t["a"]
t["a"] = [10, 20, 30] # normally replaces column, but not now
assert ta is t["a"]
assert np.all(t["a"] == [10, 20, 30])
def test_primary_key_is_inherited():
"""Test whether a new Table inherits the primary_key attribute from
its parent Table. Issue #4672"""
t = table.Table([(2, 3, 2, 1), (8, 7, 6, 5)], names=("a", "b"))
t.add_index("a")
original_key = t.primary_key
# can't test if tuples are equal, so just check content
assert original_key[0] == "a"
t2 = t[:]
t3 = t.copy()
t4 = table.Table(t)
# test whether the reference is the same in the following
assert original_key == t2.primary_key
assert original_key == t3.primary_key
assert original_key == t4.primary_key
# just test one element, assume rest are equal if assert passes
assert t.loc[1] == t2.loc[1]
assert t.loc[1] == t3.loc[1]
assert t.loc[1] == t4.loc[1]
def test_qtable_read_for_ipac_table_with_char_columns():
"""Test that a char column of a QTable is assigned no unit and not
a dimensionless unit, otherwise conversion of reader output to
QTable fails."""
t1 = table.QTable([["A"]], names="B")
out = StringIO()
t1.write(out, format="ascii.ipac")
t2 = table.QTable.read(out.getvalue(), format="ascii.ipac", guess=False)
assert t2["B"].unit is None
def test_create_table_from_final_row():
"""Regression test for issue #8422: passing the last row of a table into
Table should return a new table containing that row."""
t1 = table.Table([(1, 2)], names=["col"])
row = t1[-1]
t2 = table.Table(row)["col"]
assert t2[0] == 2
def test_key_values_in_as_array():
# Test for checking column slicing using key_values in Table.as_array()
data_rows = [(1, 2.0, "x"), (4, 5.0, "y"), (5, 8.2, "z")]
# Creating a table with three columns
t1 = table.Table(
rows=data_rows,
names=("a", "b", "c"),
meta={"name": "first table"},
dtype=("i4", "f8", "S1"),
)
# Values of sliced column a,b is stored in a numpy array
a = np.array([(1, 2.0), (4, 5.0), (5, 8.2)], dtype=[("a", "<i4"), ("b", "<f8")])
# Values for sliced column c is stored in a numpy array
b = np.array([(b"x",), (b"y",), (b"z",)], dtype=[("c", "S1")])
# Comparing initialised array with sliced array using Table.as_array()
assert np.array_equal(a, t1.as_array(names=["a", "b"]))
assert np.array_equal(b, t1.as_array(names=["c"]))
def test_tolist():
t = table.Table(
[[1, 2, 3], [1.1, 2.2, 3.3], [b"foo", b"bar", b"hello"]], names=("a", "b", "c")
)
assert t["a"].tolist() == [1, 2, 3]
assert_array_equal(t["b"].tolist(), [1.1, 2.2, 3.3])
assert t["c"].tolist() == ["foo", "bar", "hello"]
assert isinstance(t["a"].tolist()[0], int)
assert isinstance(t["b"].tolist()[0], float)
assert isinstance(t["c"].tolist()[0], str)
t = table.Table(
[[[1, 2], [3, 4]], [[b"foo", b"bar"], [b"hello", b"world"]]], names=("a", "c")
)
assert t["a"].tolist() == [[1, 2], [3, 4]]
assert t["c"].tolist() == [["foo", "bar"], ["hello", "world"]]
assert isinstance(t["a"].tolist()[0][0], int)
assert isinstance(t["c"].tolist()[0][0], str)
class MyTable(Table):
foo = TableAttribute()
bar = TableAttribute(default=[])
baz = TableAttribute(default=1)
def test_table_attribute():
assert repr(MyTable.baz) == "<TableAttribute name=baz default=1>"
t = MyTable([[1, 2]])
# __attributes__ created on the fly on the first access of an attribute
# that has a non-None default.
assert "__attributes__" not in t.meta
assert t.foo is None
assert "__attributes__" not in t.meta
assert t.baz == 1
assert "__attributes__" in t.meta
t.bar.append(2.0)
assert t.bar == [2.0]
assert t.baz == 1
t.baz = "baz"
assert t.baz == "baz"
# Table attributes round-trip through pickle
tp = pickle.loads(pickle.dumps(t))
assert tp.foo is None
assert tp.baz == "baz"
assert tp.bar == [2.0]
# Allow initialization of attributes in table creation, with / without data
for data in None, [[1, 2]]:
t2 = MyTable(data, foo=3, bar="bar", baz="baz")
assert t2.foo == 3
assert t2.bar == "bar"
assert t2.baz == "baz"
# Initializing from an existing MyTable works, with and without kwarg attrs
t3 = MyTable(t2)
assert t3.foo == 3
assert t3.bar == "bar"
assert t3.baz == "baz"
t3 = MyTable(t2, foo=5, bar="fubar")
assert t3.foo == 5
assert t3.bar == "fubar"
assert t3.baz == "baz"
# Deleting attributes removes it from attributes
del t.baz
assert "baz" not in t.meta["__attributes__"]
del t.bar
assert "__attributes__" not in t.meta
def test_table_attribute_ecsv():
# Table attribute round-trip through ECSV
t = MyTable([[1, 2]], bar=[2.0], baz="baz")
out = StringIO()
t.write(out, format="ascii.ecsv")
t2 = MyTable.read(out.getvalue(), format="ascii.ecsv")
assert t2.foo is None
assert t2.bar == [2.0]
assert t2.baz == "baz"
def test_table_attribute_fail():
# Code raises ValueError(f'{attr} not allowed as TableAttribute') but in this
# context it gets re-raised as a RuntimeError during class definition.
with pytest.raises(RuntimeError, match="Error calling __set_name__"):
class MyTable2(Table):
descriptions = TableAttribute() # Conflicts with init arg
with pytest.raises(RuntimeError, match="Error calling __set_name__"):
class MyTable3(Table):
colnames = TableAttribute() # Conflicts with built-in property
def test_set_units_fail():
dat = [[1.0, 2.0], ["aa", "bb"]]
with pytest.raises(
ValueError, match="sequence of unit values must match number of columns"
):
Table(dat, units=[u.m])
with pytest.raises(
ValueError, match="invalid column name c for setting unit attribute"
):
Table(dat, units={"c": u.m})
def test_set_units():
dat = [[1.0, 2.0], ["aa", "bb"], [3, 4]]
exp_units = (u.m, None, None)
for cls in Table, QTable:
for units in ({"a": u.m, "c": ""}, exp_units):
qt = cls(dat, units=units, names=["a", "b", "c"])
if cls is QTable:
assert isinstance(qt["a"], u.Quantity)
assert isinstance(qt["b"], table.Column)
assert isinstance(qt["c"], table.Column)
for col, unit in zip(qt.itercols(), exp_units):
assert col.info.unit is unit
def test_set_descriptions():
dat = [[1.0, 2.0], ["aa", "bb"]]
exp_descriptions = ("my description", None)
for cls in Table, QTable:
for descriptions in ({"a": "my description"}, exp_descriptions):
qt = cls(dat, descriptions=descriptions, names=["a", "b"])
for col, description in zip(qt.itercols(), exp_descriptions):
assert col.info.description == description
def test_set_units_from_row():
text = ["a,b", ",s", "1,2", "3,4"]
units = Table.read(text, format="ascii", data_start=1, data_end=2)[0]
t = Table.read(text, format="ascii", data_start=2, units=units)
assert isinstance(units, table.Row)
assert t["a"].info.unit is None
assert t["b"].info.unit is u.s
def test_set_units_descriptions_read():
"""Test setting units and descriptions via Table.read. The test here
is less comprehensive because the implementation is exactly the same
as for Table.__init__ (calling Table._set_column_attribute)"""
for cls in Table, QTable:
t = cls.read(
["a b", "1 2"],
format="ascii",
units=[u.m, u.s],
descriptions=["hi", "there"],
)
assert t["a"].info.unit is u.m
assert t["b"].info.unit is u.s
assert t["a"].info.description == "hi"
assert t["b"].info.description == "there"
def test_broadcasting_8933():
"""Explicitly check re-work of code related to broadcasting in #8933"""
t = table.Table([[1, 2]]) # Length=2 table
t["a"] = [[3, 4]] # Can broadcast if ndim > 1 and shape[0] == 1
t["b"] = 5
t["c"] = [1] # Treat as broadcastable scalar, not length=1 array (which would fail)
assert np.all(t["a"] == [[3, 4], [3, 4]])
assert np.all(t["b"] == [5, 5])
assert np.all(t["c"] == [1, 1])
# Test that broadcasted column is writeable
t["c"][1] = 10
assert np.all(t["c"] == [1, 10])
def test_custom_masked_column_in_nonmasked_table():
"""Test the refactor and change in column upgrades introduced
in 95902650f. This fixes a regression introduced by #8789
(Change behavior of Table regarding masked columns)."""
class MyMaskedColumn(table.MaskedColumn):
pass
class MySubMaskedColumn(MyMaskedColumn):
pass
class MyColumn(table.Column):
pass
class MySubColumn(MyColumn):
pass
class MyTable(table.Table):
Column = MyColumn
MaskedColumn = MyMaskedColumn
a = table.Column([1])
b = table.MaskedColumn([2], mask=[True])
c = MyMaskedColumn([3], mask=[True])
d = MySubColumn([4])
e = MySubMaskedColumn([5], mask=[True])
# Two different pathways for making table
t1 = MyTable([a, b, c, d, e], names=["a", "b", "c", "d", "e"])
t2 = MyTable()
t2["a"] = a
t2["b"] = b
t2["c"] = c
t2["d"] = d
t2["e"] = e
for t in (t1, t2):
assert type(t["a"]) is MyColumn
assert type(t["b"]) is MyMaskedColumn # upgrade
assert type(t["c"]) is MyMaskedColumn
assert type(t["d"]) is MySubColumn
assert type(t["e"]) is MySubMaskedColumn # sub-class not downgraded
def test_sort_with_mutable_skycoord():
"""Test sorting a table that has a mutable column such as SkyCoord.
In this case the sort is done in-place
"""
t = Table([[2, 1], SkyCoord([4, 3], [6, 5], unit="deg,deg")], names=["a", "sc"])
meta = {"a": [1, 2]}
ta = t["a"]
tsc = t["sc"]
t["sc"].info.meta = meta
t.sort("a")
assert np.all(t["a"] == [1, 2])
assert np.allclose(t["sc"].ra.to_value(u.deg), [3, 4])
assert np.allclose(t["sc"].dec.to_value(u.deg), [5, 6])
assert t["a"] is ta
assert t["sc"] is tsc
# Prior to astropy 4.1 this was a deep copy of SkyCoord column; after 4.1
# it is a reference.
t["sc"].info.meta["a"][0] = 100
assert meta["a"][0] == 100
def test_sort_with_non_mutable():
"""Test sorting a table that has a non-mutable column."""
t = Table([[2, 1], [3, 4]], names=["a", "b"])
ta = t["a"]
tb = t["b"]
t["b"].setflags(write=False)
meta = {"a": [1, 2]}
t["b"].info.meta = meta
t.sort("a")
assert np.all(t["a"] == [1, 2])
assert np.all(t["b"] == [4, 3])
assert ta is t["a"]
assert tb is not t["b"]
# Prior to astropy 4.1 this was a deep copy of SkyCoord column; after 4.1
# it is a reference.
t["b"].info.meta["a"][0] = 100
assert meta["a"][0] == 1
def test_init_with_list_of_masked_arrays():
"""Test the fix for #8977"""
m0 = np.ma.array([0, 1, 2], mask=[True, False, True])
m1 = np.ma.array([3, 4, 5], mask=[False, True, False])
mc = [m0, m1]
# Test _init_from_list
t = table.Table([mc], names=["a"])
# Test add_column
t["b"] = [m1, m0]
assert t["a"].shape == (2, 3)
assert np.all(t["a"][0] == m0)
assert np.all(t["a"][1] == m1)
assert np.all(t["a"][0].mask == m0.mask)
assert np.all(t["a"][1].mask == m1.mask)
assert t["b"].shape == (2, 3)
assert np.all(t["b"][0] == m1)
assert np.all(t["b"][1] == m0)
assert np.all(t["b"][0].mask == m1.mask)
assert np.all(t["b"][1].mask == m0.mask)
def test_data_to_col_convert_strategy():
"""Test the update to how data_to_col works (#8972), using the regression
example from #8971.
"""
t = table.Table([[0, 1]])
t["a"] = 1
t["b"] = np.int64(2) # Failed previously
assert np.all(t["a"] == [1, 1])
assert np.all(t["b"] == [2, 2])
def test_structured_masked_column():
"""Test that adding a masked ndarray with a structured dtype works"""
dtype = np.dtype([("z", "f8"), ("x", "f8"), ("y", "i4")])
t = Table()
t["a"] = np.ma.array(
[
(1, 2, 3),
(4, 5, 6),
],
mask=[
(False, False, True),
(False, True, False),
],
dtype=dtype,
)
assert np.all(t["a"]["z"].mask == [False, False])
assert np.all(t["a"]["x"].mask == [False, True])
assert np.all(t["a"]["y"].mask == [True, False])
assert isinstance(t["a"], MaskedColumn)
def test_rows_with_mixins():
"""Test for #9165 to allow adding a list of mixin objects.
Also test for fix to #9357 where group_by() failed due to
mixin object not having info.indices set to [].
"""
tm = Time([1, 2], format="cxcsec")
q = [1, 2] * u.m
mixed1 = [1 * u.m, 2] # Mixed input, fails to convert to Quantity
mixed2 = [2, 1 * u.m] # Mixed input, not detected as potential mixin
rows = [
(1, q[0], tm[0]),
(2, q[1], tm[1]),
]
t = table.QTable(rows=rows)
t["a"] = [q[0], q[1]]
t["b"] = [tm[0], tm[1]]
t["m1"] = mixed1
t["m2"] = mixed2
assert np.all(t["col1"] == q)
assert np.all(t["col2"] == tm)
assert np.all(t["a"] == q)
assert np.all(t["b"] == tm)
assert np.all(t["m1"][ii] == mixed1[ii] for ii in range(2))
assert np.all(t["m2"][ii] == mixed2[ii] for ii in range(2))
assert type(t["m1"]) is table.Column
assert t["m1"].dtype is np.dtype(object)
assert type(t["m2"]) is table.Column
assert t["m2"].dtype is np.dtype(object)
# Ensure group_by() runs without failing for sortable columns.
# The columns 'm1', and 'm2' are object dtype and not sortable.
for name in ["col0", "col1", "col2", "a", "b"]:
t.group_by(name)
# For good measure include exactly the failure in #9357 in which the
# list of Time() objects is in the Table initializer.
mjds = [Time(58000, format="mjd")]
t = Table([mjds, ["gbt"]], names=("mjd", "obs"))
t.group_by("obs")
def test_iterrows():
dat = [
(1, 2, 3),
(4, 5, 6),
(7, 8, 6),
]
t = table.Table(rows=dat, names=("a", "b", "c"))
c_s = []
a_s = []
for c, a in t.iterrows("c", "a"):
a_s.append(a)
c_s.append(c)
assert np.all(t["a"] == a_s)
assert np.all(t["c"] == c_s)
rows = [row for row in t.iterrows()]
assert rows == dat
with pytest.raises(ValueError, match="d is not a valid column name"):
t.iterrows("d")
def test_values_and_types():
dat = [
(1, 2, 3),
(4, 5, 6),
(7, 8, 6),
]
t = table.Table(rows=dat, names=("a", "b", "c"))
assert isinstance(t.values(), type(OrderedDict().values()))
assert isinstance(t.columns.values(), type(OrderedDict().values()))
assert isinstance(t.columns.keys(), type(OrderedDict().keys()))
for i in t.values():
assert isinstance(i, table.column.Column)
def test_items():
dat = [
(1, 2, 3),
(4, 5, 6),
(7, 8, 9),
]
t = table.Table(rows=dat, names=("a", "b", "c"))
assert isinstance(t.items(), type(OrderedDict({}).items()))
for i in list(t.items()):
assert isinstance(i, tuple)
def test_read_write_not_replaceable():
t = table.Table()
with pytest.raises(AttributeError):
t.read = "fake_read"
with pytest.raises(AttributeError):
t.write = "fake_write"
def test_keep_columns_with_generator():
# Regression test for #12529
t = table.table_helpers.simple_table(1)
t.keep_columns(col for col in t.colnames if col == "a")
assert t.colnames == ["a"]
def test_remove_columns_with_generator():
# Regression test for #12529
t = table.table_helpers.simple_table(1)
t.remove_columns(col for col in t.colnames if col == "a")
assert t.colnames == ["b", "c"]
def test_keep_columns_invalid_names_messages():
t = table.table_helpers.simple_table(1)
with pytest.raises(KeyError, match='column "d" does not exist'):
t.keep_columns(["c", "d"])
with pytest.raises(KeyError, match="columns {'[de]', '[de]'} do not exist"):
t.keep_columns(["c", "d", "e"])
def test_remove_columns_invalid_names_messages():
t = table.table_helpers.simple_table(1)
with pytest.raises(KeyError, match='column "d" does not exist'):
t.remove_columns(["c", "d"])
with pytest.raises(KeyError, match="columns {'[de]', '[de]'} do not exist"):
t.remove_columns(["c", "d", "e"])
@pytest.mark.parametrize("path_type", ["str", "Path"])
def test_read_write_tilde_path(path_type, home_is_tmpdir):
if path_type == "str":
test_file = os.path.join("~", "test.csv")
else:
test_file = pathlib.Path("~", "test.csv")
t1 = Table()
t1["a"] = [1, 2, 3]
t1.write(test_file)
t2 = Table.read(test_file)
assert np.all(t2["a"] == [1, 2, 3])
# Ensure the data wasn't written to the literal tilde-prefixed path
assert not os.path.exists(test_file)
def test_add_list_order():
t = Table()
names = list(map(str, range(20)))
array = np.empty((20, 1))
t.add_columns(array, names=names)
assert t.colnames == names
|
1637ce59ac7b8e1dff41c8402a02874fe6775b8554c47d7694a64d8a6243baea | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from collections import OrderedDict, UserDict
from collections.abc import Mapping
import numpy as np
import pytest
import astropy.units as u
from astropy.table import Column, MaskedColumn, QTable, Table, TableColumns
class DictLike(Mapping):
"""A minimal mapping-like object that does not subclass dict.
This is used to test code that expects dict-like but without actually
inheriting from dict.
"""
def __init__(self, *args, **kwargs):
self._data = dict(*args, **kwargs)
def __getitem__(self, item):
return self._data[item]
def __setitem__(self, item, value):
self._data[item] = value
def __iter__(self):
return iter(self._data)
def __len__(self):
return len(self._data)
class TestTableColumnsInit:
def test_init(self):
"""Test initialisation with lists, tuples, dicts of arrays
rather than Columns [regression test for #2647]"""
x1 = np.arange(10.0)
x2 = np.arange(5.0)
x3 = np.arange(7.0)
col_list = [("x1", x1), ("x2", x2), ("x3", x3)]
tc_list = TableColumns(col_list)
for col in col_list:
assert col[0] in tc_list
assert tc_list[col[0]] is col[1]
col_tuple = (("x1", x1), ("x2", x2), ("x3", x3))
tc_tuple = TableColumns(col_tuple)
for col in col_tuple:
assert col[0] in tc_tuple
assert tc_tuple[col[0]] is col[1]
col_dict = dict([("x1", x1), ("x2", x2), ("x3", x3)])
tc_dict = TableColumns(col_dict)
for col in tc_dict.keys():
assert col in tc_dict
assert tc_dict[col] is col_dict[col]
columns = [Column(col[1], name=col[0]) for col in col_list]
tc = TableColumns(columns)
for col in columns:
assert col.name in tc
assert tc[col.name] is col
# pytest.mark.usefixtures('table_type')
class BaseInitFrom:
def _setup(self, table_type):
pass
def test_basic_init(self, table_type):
self._setup(table_type)
t = table_type(self.data, names=("a", "b", "c"))
assert t.colnames == ["a", "b", "c"]
assert np.all(t["a"] == np.array([1, 3]))
assert np.all(t["b"] == np.array([2, 4]))
assert np.all(t["c"] == np.array([3, 5]))
assert all(t[name].name == name for name in t.colnames)
def test_set_dtype(self, table_type):
self._setup(table_type)
t = table_type(self.data, names=("a", "b", "c"), dtype=("i4", "f4", "f8"))
assert t.colnames == ["a", "b", "c"]
assert np.all(t["a"] == np.array([1, 3], dtype="i4"))
assert np.all(t["b"] == np.array([2, 4], dtype="f4"))
assert np.all(t["c"] == np.array([3, 5], dtype="f8"))
assert t["a"].dtype.type == np.int32
assert t["b"].dtype.type == np.float32
assert t["c"].dtype.type == np.float64
assert all(t[name].name == name for name in t.colnames)
def test_names_dtype_mismatch(self, table_type):
self._setup(table_type)
with pytest.raises(ValueError):
table_type(self.data, names=("a",), dtype=("i4", "f4", "i4"))
def test_names_cols_mismatch(self, table_type):
self._setup(table_type)
with pytest.raises(ValueError):
table_type(self.data, names=("a",), dtype="i4")
@pytest.mark.usefixtures("table_type")
class BaseInitFromListLike(BaseInitFrom):
def test_names_cols_mismatch(self, table_type):
self._setup(table_type)
with pytest.raises(ValueError):
table_type(self.data, names=["a"], dtype=[int])
def test_names_copy_false(self, table_type):
self._setup(table_type)
with pytest.raises(ValueError):
table_type(self.data, names=["a"], dtype=[int], copy=False)
@pytest.mark.usefixtures("table_type")
class BaseInitFromDictLike(BaseInitFrom):
pass
@pytest.mark.usefixtures("table_type")
class TestInitFromNdarrayHomo(BaseInitFromListLike):
def setup_method(self, method):
self.data = np.array([(1, 2, 3), (3, 4, 5)], dtype="i4")
def test_default_names(self, table_type):
self._setup(table_type)
t = table_type(self.data)
assert t.colnames == ["col0", "col1", "col2"]
def test_ndarray_ref(self, table_type):
"""Init with ndarray and copy=False and show that this is a reference
to input ndarray"""
self._setup(table_type)
t = table_type(self.data, copy=False)
t["col1"][1] = 0
assert t.as_array()["col1"][1] == 0
assert t["col1"][1] == 0
assert self.data[1][1] == 0
def test_partial_names_dtype(self, table_type):
self._setup(table_type)
t = table_type(self.data, names=["a", None, "c"], dtype=[None, None, "f8"])
assert t.colnames == ["a", "col1", "c"]
assert t["a"].dtype.type == np.int32
assert t["col1"].dtype.type == np.int32
assert t["c"].dtype.type == np.float64
assert all(t[name].name == name for name in t.colnames)
def test_partial_names_ref(self, table_type):
self._setup(table_type)
t = table_type(self.data, names=["a", None, "c"])
assert t.colnames == ["a", "col1", "c"]
assert t["a"].dtype.type == np.int32
assert t["col1"].dtype.type == np.int32
assert t["c"].dtype.type == np.int32
assert all(t[name].name == name for name in t.colnames)
@pytest.mark.usefixtures("table_type")
class TestInitFromListOfLists(BaseInitFromListLike):
def setup_method(self, table_type):
self._setup(table_type)
self.data = [
(np.int32(1), np.int32(3)),
Column(name="col1", data=[2, 4], dtype=np.int32),
np.array([3, 5], dtype=np.int32),
]
def test_default_names(self, table_type):
self._setup(table_type)
t = table_type(self.data)
assert t.colnames == ["col0", "col1", "col2"]
assert all(t[name].name == name for name in t.colnames)
def test_partial_names_dtype(self, table_type):
self._setup(table_type)
t = table_type(self.data, names=["b", None, "c"], dtype=["f4", None, "f8"])
assert t.colnames == ["b", "col1", "c"]
assert t["b"].dtype.type == np.float32
assert t["col1"].dtype.type == np.int32
assert t["c"].dtype.type == np.float64
assert all(t[name].name == name for name in t.colnames)
def test_bad_data(self, table_type):
self._setup(table_type)
with pytest.raises(ValueError):
table_type([[1, 2], [3, 4, 5]])
@pytest.mark.usefixtures("table_type")
class TestInitFromListOfDicts(BaseInitFromListLike):
def _setup(self, table_type):
self.data = [{"a": 1, "b": 2, "c": 3}, {"a": 3, "b": 4, "c": 5}]
self.data_ragged = [{"a": 1, "b": 2}, {"a": 2, "c": 4}]
def test_names(self, table_type):
self._setup(table_type)
t = table_type(self.data)
assert all(colname in {"a", "b", "c"} for colname in t.colnames)
def test_names_ordered(self, table_type):
self._setup(table_type)
t = table_type(self.data, names=("c", "b", "a"))
assert t.colnames == ["c", "b", "a"]
def test_missing_data_init_from_dict(self, table_type):
self._setup(table_type)
dat = self.data_ragged
for rows in [False, True]:
t = table_type(rows=dat) if rows else table_type(dat)
assert np.all(t["a"] == [1, 2])
assert np.all(t["b"].mask == [False, True])
assert np.all(t["b"].data == [2, 2])
assert np.all(t["c"].mask == [True, False])
assert np.all(t["c"].data == [4, 4])
assert type(t["a"]) is (MaskedColumn if t.masked else Column)
assert type(t["b"]) is MaskedColumn
assert type(t["c"]) is MaskedColumn
class TestInitFromListOfMapping(TestInitFromListOfDicts):
"""Test that init from a Mapping that is not a dict subclass works"""
def _setup(self, table_type):
self.data = [DictLike(a=1, b=2, c=3), DictLike(a=3, b=4, c=5)]
self.data_ragged = [DictLike(a=1, b=2), DictLike(a=2, c=4)]
# Make sure data rows are not a dict subclass
assert not isinstance(self.data[0], dict)
@pytest.mark.usefixtures("table_type")
class TestInitFromColsList(BaseInitFromListLike):
def _setup(self, table_type):
self.data = [
Column([1, 3], name="x", dtype=np.int32),
np.array([2, 4], dtype=np.int32),
np.array([3, 5], dtype="i8"),
]
def test_default_names(self, table_type):
self._setup(table_type)
t = table_type(self.data)
assert t.colnames == ["x", "col1", "col2"]
assert all(t[name].name == name for name in t.colnames)
def test_partial_names_dtype(self, table_type):
self._setup(table_type)
t = table_type(self.data, names=["b", None, "c"], dtype=["f4", None, "f8"])
assert t.colnames == ["b", "col1", "c"]
assert t["b"].dtype.type == np.float32
assert t["col1"].dtype.type == np.int32
assert t["c"].dtype.type == np.float64
assert all(t[name].name == name for name in t.colnames)
def test_ref(self, table_type):
"""Test that initializing from a list of columns can be done by reference"""
self._setup(table_type)
t = table_type(self.data, copy=False)
t["x"][0] = 100
assert self.data[0][0] == 100
@pytest.mark.usefixtures("table_type")
class TestInitFromNdarrayStruct(BaseInitFromDictLike):
def _setup(self, table_type):
self.data = np.array(
[(1, 2, 3), (3, 4, 5)], dtype=[("x", "i8"), ("y", "i4"), ("z", "i8")]
)
def test_ndarray_ref(self, table_type):
"""Init with ndarray and copy=False and show that table uses reference
to input ndarray"""
self._setup(table_type)
t = table_type(self.data, copy=False)
t["x"][1] = 0 # Column-wise assignment
t[0]["y"] = 0 # Row-wise assignment
assert self.data["x"][1] == 0
assert self.data["y"][0] == 0
assert np.all(np.array(t) == self.data)
assert all(t[name].name == name for name in t.colnames)
def test_partial_names_dtype(self, table_type):
self._setup(table_type)
t = table_type(self.data, names=["e", None, "d"], dtype=["f4", None, "f8"])
assert t.colnames == ["e", "y", "d"]
assert t["e"].dtype.type == np.float32
assert t["y"].dtype.type == np.int32
assert t["d"].dtype.type == np.float64
assert all(t[name].name == name for name in t.colnames)
def test_partial_names_ref(self, table_type):
self._setup(table_type)
t = table_type(self.data, names=["e", None, "d"], copy=False)
assert t.colnames == ["e", "y", "d"]
assert t["e"].dtype.type == np.int64
assert t["y"].dtype.type == np.int32
assert t["d"].dtype.type == np.int64
assert all(t[name].name == name for name in t.colnames)
@pytest.mark.usefixtures("table_type")
class TestInitFromDict(BaseInitFromDictLike):
def _setup(self, table_type):
self.data = dict(
[
("a", Column([1, 3], name="x")),
("b", [2, 4]),
("c", np.array([3, 5], dtype="i8")),
]
)
@pytest.mark.usefixtures("table_type")
class TestInitFromMapping(BaseInitFromDictLike):
def _setup(self, table_type):
self.data = UserDict(
[
("a", Column([1, 3], name="x")),
("b", [2, 4]),
("c", np.array([3, 5], dtype="i8")),
]
)
assert isinstance(self.data, Mapping)
assert not isinstance(self.data, dict)
@pytest.mark.usefixtures("table_type")
class TestInitFromOrderedDict(BaseInitFromDictLike):
def _setup(self, table_type):
self.data = OrderedDict(
[
("a", Column(name="x", data=[1, 3])),
("b", [2, 4]),
("c", np.array([3, 5], dtype="i8")),
]
)
def test_col_order(self, table_type):
self._setup(table_type)
t = table_type(self.data)
assert t.colnames == ["a", "b", "c"]
@pytest.mark.usefixtures("table_type")
class TestInitFromRow(BaseInitFromDictLike):
def _setup(self, table_type):
arr = np.array(
[(1, 2, 3), (3, 4, 5)], dtype=[("x", "i8"), ("y", "i8"), ("z", "f8")]
)
self.data = table_type(arr, meta={"comments": ["comment1", "comment2"]})
def test_init_from_row(self, table_type):
self._setup(table_type)
t = table_type(self.data[0])
# Values and meta match original
assert t.meta["comments"][0] == "comment1"
for name in t.colnames:
assert np.all(t[name] == self.data[name][0:1])
assert all(t[name].name == name for name in t.colnames)
# Change value in new instance and check that original is the same
t["x"][0] = 8
t.meta["comments"][1] = "new comment2"
assert np.all(t["x"] == np.array([8]))
assert np.all(self.data["x"] == np.array([1, 3]))
assert self.data.meta["comments"][1] == "comment2"
@pytest.mark.usefixtures("table_type")
class TestInitFromTable(BaseInitFromDictLike):
def _setup(self, table_type):
arr = np.array(
[(1, 2, 3), (3, 4, 5)], dtype=[("x", "i8"), ("y", "i8"), ("z", "f8")]
)
self.data = table_type(arr, meta={"comments": ["comment1", "comment2"]})
def test_data_meta_copy(self, table_type):
self._setup(table_type)
t = table_type(self.data)
assert t.meta["comments"][0] == "comment1"
t["x"][1] = 8
t.meta["comments"][1] = "new comment2"
assert self.data.meta["comments"][1] == "comment2"
assert np.all(t["x"] == np.array([1, 8]))
assert np.all(self.data["x"] == np.array([1, 3]))
assert t["z"].name == "z"
assert all(t[name].name == name for name in t.colnames)
def test_table_ref(self, table_type):
self._setup(table_type)
t = table_type(self.data, copy=False)
t["x"][1] = 0
assert t["x"][1] == 0
assert self.data["x"][1] == 0
assert np.all(t.as_array() == self.data.as_array())
assert all(t[name].name == name for name in t.colnames)
def test_partial_names_dtype(self, table_type):
self._setup(table_type)
t = table_type(self.data, names=["e", None, "d"], dtype=["f4", None, "i8"])
assert t.colnames == ["e", "y", "d"]
assert t["e"].dtype.type == np.float32
assert t["y"].dtype.type == np.int64
assert t["d"].dtype.type == np.int64
assert all(t[name].name == name for name in t.colnames)
def test_partial_names_ref(self, table_type):
self._setup(table_type)
t = table_type(self.data, names=["e", None, "d"], copy=False)
assert t.colnames == ["e", "y", "d"]
assert t["e"].dtype.type == np.int64
assert t["y"].dtype.type == np.int64
assert t["d"].dtype.type == np.float64
assert all(t[name].name == name for name in t.colnames)
def test_init_from_columns(self, table_type):
self._setup(table_type)
t = table_type(self.data)
t2 = table_type(t.columns["z", "x", "y"])
assert t2.colnames == ["z", "x", "y"]
assert t2.dtype.names == ("z", "x", "y")
def test_init_from_columns_slice(self, table_type):
self._setup(table_type)
t = table_type(self.data)
t2 = table_type(t.columns[0:2])
assert t2.colnames == ["x", "y"]
assert t2.dtype.names == ("x", "y")
def test_init_from_columns_mix(self, table_type):
self._setup(table_type)
t = table_type(self.data)
t2 = table_type([t.columns[0], t.columns["z"]])
assert t2.colnames == ["x", "z"]
assert t2.dtype.names == ("x", "z")
@pytest.mark.usefixtures("table_type")
class TestInitFromNone:
# Note table_table.TestEmptyData tests initializing a completely empty
# table and adding data.
def test_data_none_with_cols(self, table_type):
"""
Test different ways of initing an empty table
"""
np_t = np.empty(0, dtype=[("a", "f4", (2,)), ("b", "i4")])
for kwargs in (
{"names": ("a", "b")},
{"names": ("a", "b"), "dtype": (("f4", (2,)), "i4")},
{"dtype": [("a", "f4", (2,)), ("b", "i4")]},
{"dtype": np_t.dtype},
):
t = table_type(**kwargs)
assert t.colnames == ["a", "b"]
assert len(t["a"]) == 0
assert len(t["b"]) == 0
if "dtype" in kwargs:
assert t["a"].dtype.type == np.float32
assert t["b"].dtype.type == np.int32
assert t["a"].shape[1:] == (2,)
@pytest.mark.usefixtures("table_types")
class TestInitFromRows:
def test_init_with_rows(self, table_type):
for rows in ([[1, "a"], [2, "b"]], [(1, "a"), (2, "b")], ((1, "a"), (2, "b"))):
t = table_type(rows=rows, names=("a", "b"))
assert np.all(t["a"] == [1, 2])
assert np.all(t["b"] == ["a", "b"])
assert t.colnames == ["a", "b"]
assert t["a"].dtype.kind == "i"
assert t["b"].dtype.kind in ("S", "U")
# Regression test for
# https://github.com/astropy/astropy/issues/3052
assert t["b"].dtype.str.endswith("1")
rows = np.arange(6).reshape(2, 3)
t = table_type(rows=rows, names=("a", "b", "c"), dtype=["f8", "f4", "i8"])
assert np.all(t["a"] == [0, 3])
assert np.all(t["b"] == [1, 4])
assert np.all(t["c"] == [2, 5])
assert t.colnames == ["a", "b", "c"]
assert t["a"].dtype.str.endswith("f8")
assert t["b"].dtype.str.endswith("f4")
assert t["c"].dtype.str.endswith("i8")
def test_init_with_rows_and_data(self, table_type):
with pytest.raises(ValueError) as err:
table_type(data=[[1]], rows=[[1]])
assert "Cannot supply both `data` and `rows` values" in str(err.value)
@pytest.mark.parametrize("has_data", [True, False])
def test_init_table_with_names_and_structured_dtype(has_data):
"""Test fix for #10393"""
arr = np.ones(2, dtype=np.dtype([("a", "i4"), ("b", "f4")]))
data_args = [arr] if has_data else []
t = Table(*data_args, names=["x", "y"], dtype=arr.dtype)
assert t.colnames == ["x", "y"]
assert str(t["x"].dtype) == "int32"
assert str(t["y"].dtype) == "float32"
assert len(t) == (2 if has_data else 0)
@pytest.mark.usefixtures("table_type")
def test_init_and_ref_from_multidim_ndarray(table_type):
"""
Test that initializing from an ndarray structured array with
a multi-dim column works for both copy=False and True and that
the referencing is as expected.
"""
for copy in (False, True):
nd = np.array(
[(1, [10, 20]), (3, [30, 40])], dtype=[("a", "i8"), ("b", "i8", (2,))]
)
t = table_type(nd, copy=copy)
assert t.colnames == ["a", "b"]
assert t["a"].shape == (2,)
assert t["b"].shape == (2, 2)
t["a"][0] = -200
t["b"][1][1] = -100
if copy:
assert nd["a"][0] == 1
assert nd["b"][1][1] == 40
else:
assert nd["a"][0] == -200
assert nd["b"][1][1] == -100
@pytest.mark.usefixtures("table_type")
@pytest.mark.parametrize("copy", [False, True])
def test_init_and_ref_from_dict(table_type, copy):
"""
Test that initializing from a dict works for both copy=False and True and that
the referencing is as expected.
"""
x1 = np.arange(10.0)
x2 = np.zeros(10)
col_dict = dict([("x1", x1), ("x2", x2)])
t = table_type(col_dict, copy=copy)
assert set(t.colnames) == {"x1", "x2"}
assert t["x1"].shape == (10,)
assert t["x2"].shape == (10,)
t["x1"][0] = -200
t["x2"][1] = -100
if copy:
assert x1[0] == 0.0
assert x2[1] == 0.0
else:
assert x1[0] == -200
assert x2[1] == -100
def test_add_none_object_column():
"""Test fix for a problem introduced in #10636 (see
https://github.com/astropy/astropy/pull/10636#issuecomment-676847515)
"""
t = Table(data={"a": [1, 2, 3]})
t["b"] = None
assert all(val is None for val in t["b"])
assert t["b"].dtype.kind == "O"
@pytest.mark.usefixtures("table_type")
def test_init_from_row_OrderedDict(table_type):
row1 = OrderedDict([("b", 1), ("a", 0)])
row2 = {"a": 10, "b": 20}
rows12 = [row1, row2]
row3 = dict([("b", 1), ("a", 0)])
row4 = dict([("b", 11), ("a", 10)])
rows34 = [row3, row4]
t1 = table_type(rows=rows12)
t2 = table_type(rows=rows34)
t3 = t2[sorted(t2.colnames)]
assert t1.colnames == ["b", "a"]
assert t2.colnames == ["b", "a"]
assert t3.colnames == ["a", "b"]
def test_init_from_rows_as_generator():
rows = ((1 + ii, 2 + ii) for ii in range(2))
t = Table(rows=rows)
assert np.all(t["col0"] == [1, 2])
assert np.all(t["col1"] == [2, 3])
@pytest.mark.parametrize("dtype", ["fail", "i4"])
def test_init_bad_dtype_in_empty_table(dtype):
with pytest.raises(
ValueError, match="type was specified but could not be parsed for column names"
):
Table(dtype=dtype)
def test_init_data_type_not_allowed_to_init_table():
with pytest.raises(
ValueError, match="Data type <class 'str'> not allowed to init Table"
):
Table("hello")
def test_init_Table_from_list_of_quantity():
"""Test fix for #11327"""
# Variation on original example in #11327 at the Table level
data = [{"x": 5 * u.m, "y": 1 * u.m}, {"x": 10 * u.m, "y": 3}]
t = Table(data)
assert t["x"].unit is u.m
assert t["y"].unit is None
assert t["x"].dtype.kind == "f"
assert t["y"].dtype.kind == "O"
assert np.all(t["x"] == [5, 10])
assert t["y"][0] == 1 * u.m
assert t["y"][1] == 3
def test_init_QTable_and_set_units():
"""
Test fix for #14336 where providing units to QTable init fails.
This applies when the input is a Quantity.
"""
t = QTable([[1, 2] * u.km, [1, 2]], units={"col0": u.m, "col1": u.s})
assert t["col0"].unit == u.m
assert np.all(t["col0"].value == [1000, 2000])
assert t["col1"].unit == u.s
assert np.all(t["col1"].value == [1, 2])
|
2790e5c04991dd53c693f2cf959d57011abdc71a64584548d91bacda1e032c9e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
from astropy import coordinates, time
from astropy import units as u
from astropy.table import Column, NdarrayMixin, QTable, Table, table_helpers, unique
from astropy.utils.compat import NUMPY_LT_1_22, NUMPY_LT_1_22_1
from astropy.utils.exceptions import AstropyUserWarning
def sort_eq(list1, list2):
return sorted(list1) == sorted(list2)
def test_column_group_by(T1):
for masked in (False, True):
t1 = QTable(T1, masked=masked)
t1a = t1["a"].copy()
# Group by a Column (i.e. numpy array)
t1ag = t1a.group_by(t1["a"])
assert np.all(t1ag.groups.indices == np.array([0, 1, 4, 8]))
# Group by a Table
t1ag = t1a.group_by(t1["a", "b"])
assert np.all(t1ag.groups.indices == np.array([0, 1, 3, 4, 5, 7, 8]))
# Group by a numpy structured array
t1ag = t1a.group_by(t1["a", "b"].as_array())
assert np.all(t1ag.groups.indices == np.array([0, 1, 3, 4, 5, 7, 8]))
def test_table_group_by(T1):
"""
Test basic table group_by functionality for possible key types and for
masked/unmasked tables.
"""
for masked in (False, True):
t1 = QTable(T1, masked=masked)
# Group by a single column key specified by name
tg = t1.group_by("a")
assert np.all(tg.groups.indices == np.array([0, 1, 4, 8]))
assert str(tg.groups) == "<TableGroups indices=[0 1 4 8]>"
assert str(tg["a"].groups) == "<ColumnGroups indices=[0 1 4 8]>"
# Sorted by 'a' and in original order for rest
assert tg.pformat() == [
" a b c d q ",
" m ",
"--- --- --- --- ---",
" 0 a 0.0 4 4.0",
" 1 b 3.0 5 5.0",
" 1 a 2.0 6 6.0",
" 1 a 1.0 7 7.0",
" 2 c 7.0 0 0.0",
" 2 b 5.0 1 1.0",
" 2 b 6.0 2 2.0",
" 2 a 4.0 3 3.0",
]
assert tg.meta["ta"] == 1
assert tg["c"].meta["a"] == 1
assert tg["c"].description == "column c"
# Group by a table column
tg2 = t1.group_by(t1["a"])
assert tg.pformat() == tg2.pformat()
# Group by two columns spec'd by name
for keys in (["a", "b"], ("a", "b")):
tg = t1.group_by(keys)
assert np.all(tg.groups.indices == np.array([0, 1, 3, 4, 5, 7, 8]))
# Sorted by 'a', 'b' and in original order for rest
assert tg.pformat() == [
" a b c d q ",
" m ",
"--- --- --- --- ---",
" 0 a 0.0 4 4.0",
" 1 a 2.0 6 6.0",
" 1 a 1.0 7 7.0",
" 1 b 3.0 5 5.0",
" 2 a 4.0 3 3.0",
" 2 b 5.0 1 1.0",
" 2 b 6.0 2 2.0",
" 2 c 7.0 0 0.0",
]
# Group by a Table
tg2 = t1.group_by(t1["a", "b"])
assert tg.pformat() == tg2.pformat()
# Group by a structured array
tg2 = t1.group_by(t1["a", "b"].as_array())
assert tg.pformat() == tg2.pformat()
# Group by a simple ndarray
tg = t1.group_by(np.array([0, 1, 0, 1, 2, 1, 0, 0]))
assert np.all(tg.groups.indices == np.array([0, 4, 7, 8]))
assert tg.pformat() == [
" a b c d q ",
" m ",
"--- --- --- --- ---",
" 2 c 7.0 0 0.0",
" 2 b 6.0 2 2.0",
" 1 a 2.0 6 6.0",
" 1 a 1.0 7 7.0",
" 2 b 5.0 1 1.0",
" 2 a 4.0 3 3.0",
" 1 b 3.0 5 5.0",
" 0 a 0.0 4 4.0",
]
def test_groups_keys(T1):
tg = T1.group_by("a")
keys = tg.groups.keys
assert keys.dtype.names == ("a",)
assert np.all(keys["a"] == np.array([0, 1, 2]))
tg = T1.group_by(["a", "b"])
keys = tg.groups.keys
assert keys.dtype.names == ("a", "b")
assert np.all(keys["a"] == np.array([0, 1, 1, 2, 2, 2]))
assert np.all(keys["b"] == np.array(["a", "a", "b", "a", "b", "c"]))
# Grouping by Column ignores column name
tg = T1.group_by(T1["b"])
keys = tg.groups.keys
assert keys.dtype.names is None
def test_groups_iterator(T1):
tg = T1.group_by("a")
for ii, group in enumerate(tg.groups):
assert group.pformat() == tg.groups[ii].pformat()
assert group["a"][0] == tg["a"][tg.groups.indices[ii]]
def test_grouped_copy(T1):
"""
Test that copying a table or column copies the groups properly
"""
for masked in (False, True):
t1 = QTable(T1, masked=masked)
tg = t1.group_by("a")
tgc = tg.copy()
assert np.all(tgc.groups.indices == tg.groups.indices)
assert np.all(tgc.groups.keys == tg.groups.keys)
tac = tg["a"].copy()
assert np.all(tac.groups.indices == tg["a"].groups.indices)
c1 = t1["a"].copy()
gc1 = c1.group_by(t1["a"])
gc1c = gc1.copy()
assert np.all(gc1c.groups.indices == np.array([0, 1, 4, 8]))
def test_grouped_slicing(T1):
"""
Test that slicing a table removes previous grouping
"""
for masked in (False, True):
t1 = QTable(T1, masked=masked)
# Regular slice of a table
tg = t1.group_by("a")
tg2 = tg[3:5]
assert np.all(tg2.groups.indices == np.array([0, len(tg2)]))
assert tg2.groups.keys is None
def test_group_column_from_table(T1):
"""
Group a column that is part of a table
"""
cg = T1["c"].group_by(np.array(T1["a"]))
assert np.all(cg.groups.keys == np.array([0, 1, 2]))
assert np.all(cg.groups.indices == np.array([0, 1, 4, 8]))
def test_table_groups_mask_index(T1):
"""
Use boolean mask as item in __getitem__ for groups
"""
for masked in (False, True):
t1 = Table(T1, masked=masked).group_by("a")
t2 = t1.groups[np.array([True, False, True])]
assert len(t2.groups) == 2
assert t2.groups[0].pformat() == t1.groups[0].pformat()
assert t2.groups[1].pformat() == t1.groups[2].pformat()
assert np.all(t2.groups.keys["a"] == np.array([0, 2]))
def test_table_groups_array_index(T1):
"""
Use numpy array as item in __getitem__ for groups
"""
for masked in (False, True):
t1 = Table(T1, masked=masked).group_by("a")
t2 = t1.groups[np.array([0, 2])]
assert len(t2.groups) == 2
assert t2.groups[0].pformat() == t1.groups[0].pformat()
assert t2.groups[1].pformat() == t1.groups[2].pformat()
assert np.all(t2.groups.keys["a"] == np.array([0, 2]))
def test_table_groups_slicing(T1):
"""
Test that slicing table groups works
"""
for masked in (False, True):
t1 = Table(T1, masked=masked).group_by("a")
# slice(0, 2)
t2 = t1.groups[0:2]
assert len(t2.groups) == 2
assert t2.groups[0].pformat() == t1.groups[0].pformat()
assert t2.groups[1].pformat() == t1.groups[1].pformat()
assert np.all(t2.groups.keys["a"] == np.array([0, 1]))
# slice(1, 2)
t2 = t1.groups[1:2]
assert len(t2.groups) == 1
assert t2.groups[0].pformat() == t1.groups[1].pformat()
assert np.all(t2.groups.keys["a"] == np.array([1]))
# slice(0, 3, 2)
t2 = t1.groups[0:3:2]
assert len(t2.groups) == 2
assert t2.groups[0].pformat() == t1.groups[0].pformat()
assert t2.groups[1].pformat() == t1.groups[2].pformat()
assert np.all(t2.groups.keys["a"] == np.array([0, 2]))
def test_grouped_item_access(T1):
"""
Test that column slicing preserves grouping
"""
for masked in (False, True):
t1 = Table(T1, masked=masked)
# Regular slice of a table
tg = t1.group_by("a")
tgs = tg["a", "c", "d"]
assert np.all(tgs.groups.keys == tg.groups.keys)
assert np.all(tgs.groups.indices == tg.groups.indices)
tgsa = tgs.groups.aggregate(np.sum)
assert tgsa.pformat() == [
" a c d ",
"--- ---- ---",
" 0 0.0 4",
" 1 6.0 18",
" 2 22.0 6",
]
tgs = tg["c", "d"]
assert np.all(tgs.groups.keys == tg.groups.keys)
assert np.all(tgs.groups.indices == tg.groups.indices)
tgsa = tgs.groups.aggregate(np.sum)
assert tgsa.pformat() == [
" c d ",
"---- ---",
" 0.0 4",
" 6.0 18",
"22.0 6",
]
def test_mutable_operations(T1):
"""
Operations like adding or deleting a row should removing grouping,
but adding or removing or renaming a column should retain grouping.
"""
for masked in (False, True):
t1 = QTable(T1, masked=masked)
# add row
tg = t1.group_by("a")
tg.add_row((0, "a", 3.0, 4, 4 * u.m))
assert np.all(tg.groups.indices == np.array([0, len(tg)]))
assert tg.groups.keys is None
# remove row
tg = t1.group_by("a")
tg.remove_row(4)
assert np.all(tg.groups.indices == np.array([0, len(tg)]))
assert tg.groups.keys is None
# add column
tg = t1.group_by("a")
indices = tg.groups.indices.copy()
tg.add_column(Column(name="e", data=np.arange(len(tg))))
assert np.all(tg.groups.indices == indices)
assert np.all(tg["e"].groups.indices == indices)
assert np.all(tg["e"].groups.keys == tg.groups.keys)
# remove column (not key column)
tg = t1.group_by("a")
tg.remove_column("b")
assert np.all(tg.groups.indices == indices)
# Still has original key col names
assert tg.groups.keys.dtype.names == ("a",)
assert np.all(tg["a"].groups.indices == indices)
# remove key column
tg = t1.group_by("a")
tg.remove_column("a")
assert np.all(tg.groups.indices == indices)
assert tg.groups.keys.dtype.names == ("a",)
assert np.all(tg["b"].groups.indices == indices)
# rename key column
tg = t1.group_by("a")
tg.rename_column("a", "aa")
assert np.all(tg.groups.indices == indices)
assert tg.groups.keys.dtype.names == ("a",)
assert np.all(tg["aa"].groups.indices == indices)
def test_group_by_masked(T1):
t1m = QTable(T1, masked=True)
t1m["c"].mask[4] = True
t1m["d"].mask[5] = True
assert t1m.group_by("a").pformat() == [
" a b c d q ",
" m ",
"--- --- --- --- ---",
" 0 a -- 4 4.0",
" 1 b 3.0 -- 5.0",
" 1 a 2.0 6 6.0",
" 1 a 1.0 7 7.0",
" 2 c 7.0 0 0.0",
" 2 b 5.0 1 1.0",
" 2 b 6.0 2 2.0",
" 2 a 4.0 3 3.0",
]
def test_group_by_errors(T1):
"""
Appropriate errors get raised.
"""
# Bad column name as string
with pytest.raises(ValueError):
T1.group_by("f")
# Bad column names in list
with pytest.raises(ValueError):
T1.group_by(["f", "g"])
# Wrong length array
with pytest.raises(ValueError):
T1.group_by(np.array([1, 2]))
# Wrong type
with pytest.raises(TypeError):
T1.group_by(None)
# Masked key column
t1 = QTable(T1, masked=True)
t1["a"].mask[4] = True
with pytest.raises(ValueError):
t1.group_by("a")
def test_groups_keys_meta(T1):
"""
Make sure the keys meta['grouped_by_table_cols'] is working.
"""
# Group by column in this table
tg = T1.group_by("a")
assert tg.groups.keys.meta["grouped_by_table_cols"] is True
assert tg["c"].groups.keys.meta["grouped_by_table_cols"] is True
assert tg.groups[1].groups.keys.meta["grouped_by_table_cols"] is True
assert (
tg["d"]
.groups[np.array([False, True, True])]
.groups.keys.meta["grouped_by_table_cols"]
is True
)
# Group by external Table
tg = T1.group_by(T1["a", "b"])
assert tg.groups.keys.meta["grouped_by_table_cols"] is False
assert tg["c"].groups.keys.meta["grouped_by_table_cols"] is False
assert tg.groups[1].groups.keys.meta["grouped_by_table_cols"] is False
# Group by external numpy array
tg = T1.group_by(T1["a", "b"].as_array())
assert not hasattr(tg.groups.keys, "meta")
assert not hasattr(tg["c"].groups.keys, "meta")
# Group by Column
tg = T1.group_by(T1["a"])
assert "grouped_by_table_cols" not in tg.groups.keys.meta
assert "grouped_by_table_cols" not in tg["c"].groups.keys.meta
def test_table_aggregate(T1):
"""
Aggregate a table
"""
# Table with only summable cols
t1 = T1["a", "c", "d"]
tg = t1.group_by("a")
tga = tg.groups.aggregate(np.sum)
assert tga.pformat() == [
" a c d ",
"--- ---- ---",
" 0 0.0 4",
" 1 6.0 18",
" 2 22.0 6",
]
# Reverts to default groups
assert np.all(tga.groups.indices == np.array([0, 3]))
assert tga.groups.keys is None
# metadata survives
assert tga.meta["ta"] == 1
assert tga["c"].meta["a"] == 1
assert tga["c"].description == "column c"
# Aggregate with np.sum with masked elements. This results
# in one group with no elements, hence a nan result and conversion
# to float for the 'd' column.
t1m = QTable(T1, masked=True)
t1m["c"].mask[4:6] = True
t1m["d"].mask[4:6] = True
tg = t1m.group_by("a")
with pytest.warns(UserWarning, match="converting a masked element to nan"):
tga = tg.groups.aggregate(np.sum)
assert tga.pformat() == [
" a c d q ",
" m ",
"--- ---- ---- ----",
" 0 nan nan 4.0",
" 1 3.0 13.0 18.0",
" 2 22.0 6.0 6.0",
]
# Aggregate with np.sum with masked elements, but where every
# group has at least one remaining (unmasked) element. Then
# the int column stays as an int.
t1m = QTable(t1, masked=True)
t1m["c"].mask[5] = True
t1m["d"].mask[5] = True
tg = t1m.group_by("a")
tga = tg.groups.aggregate(np.sum)
assert tga.pformat() == [
" a c d ",
"--- ---- ---",
" 0 0.0 4",
" 1 3.0 13",
" 2 22.0 6",
]
# Aggregate with a column type that cannot by supplied to the aggregating
# function. This raises a warning but still works.
tg = T1.group_by("a")
with pytest.warns(AstropyUserWarning, match="Cannot aggregate column"):
tga = tg.groups.aggregate(np.sum)
assert tga.pformat() == [
" a c d q ",
" m ",
"--- ---- --- ----",
" 0 0.0 4 4.0",
" 1 6.0 18 18.0",
" 2 22.0 6 6.0",
]
def test_table_aggregate_reduceat(T1):
"""
Aggregate table with functions which have a reduceat method
"""
# Comparison functions without reduceat
def np_mean(x):
return np.mean(x)
def np_sum(x):
return np.sum(x)
def np_add(x):
return np.add(x)
# Table with only summable cols
t1 = T1["a", "c", "d"]
tg = t1.group_by("a")
# Comparison
tga_r = tg.groups.aggregate(np.sum)
tga_a = tg.groups.aggregate(np.add)
tga_n = tg.groups.aggregate(np_sum)
assert np.all(tga_r == tga_n)
assert np.all(tga_a == tga_n)
assert tga_n.pformat() == [
" a c d ",
"--- ---- ---",
" 0 0.0 4",
" 1 6.0 18",
" 2 22.0 6",
]
tga_r = tg.groups.aggregate(np.mean)
tga_n = tg.groups.aggregate(np_mean)
assert np.all(tga_r == tga_n)
assert tga_n.pformat() == [
" a c d ",
"--- --- ---",
" 0 0.0 4.0",
" 1 2.0 6.0",
" 2 5.5 1.5",
]
# Binary ufunc np_add should raise warning without reduceat
t2 = T1["a", "c"]
tg = t2.group_by("a")
with pytest.warns(AstropyUserWarning, match="Cannot aggregate column"):
tga = tg.groups.aggregate(np_add)
assert tga.pformat() == [" a ", "---", " 0", " 1", " 2"]
def test_column_aggregate(T1):
"""
Aggregate a single table column
"""
for masked in (False, True):
tg = QTable(T1, masked=masked).group_by("a")
tga = tg["c"].groups.aggregate(np.sum)
assert tga.pformat() == [" c ", "----", " 0.0", " 6.0", "22.0"]
@pytest.mark.skipif(
not NUMPY_LT_1_22 and NUMPY_LT_1_22_1,
reason="https://github.com/numpy/numpy/issues/20699",
)
def test_column_aggregate_f8():
"""https://github.com/astropy/astropy/issues/12706"""
# Just want to make sure it does not crash again.
for masked in (False, True):
tg = Table({"a": np.arange(2, dtype=">f8")}, masked=masked).group_by("a")
tga = tg["a"].groups.aggregate(np.sum)
assert tga.pformat() == [" a ", "---", "0.0", "1.0"]
def test_table_filter():
"""
Table groups filtering
"""
def all_positive(table, key_colnames):
return all(
np.all(table[colname] >= 0)
for colname in table.colnames
if colname not in key_colnames
)
# Negative value in 'a' column should not filter because it is a key col
t = Table.read(
[
" a c d",
" -2 7.0 0",
" -2 5.0 1",
" 0 0.0 4",
" 1 3.0 5",
" 1 2.0 -6",
" 1 1.0 7",
" 3 3.0 5",
" 3 -2.0 6",
" 3 1.0 7",
],
format="ascii",
)
tg = t.group_by("a")
t2 = tg.groups.filter(all_positive)
assert t2.groups[0].pformat() == [
" a c d ",
"--- --- ---",
" -2 7.0 0",
" -2 5.0 1",
]
assert t2.groups[1].pformat() == [" a c d ", "--- --- ---", " 0 0.0 4"]
def test_column_filter():
"""
Table groups filtering
"""
def all_positive(column):
if np.any(column < 0):
return False
return True
# Negative value in 'a' column should not filter because it is a key col
t = Table.read(
[
" a c d",
" -2 7.0 0",
" -2 5.0 1",
" 0 0.0 4",
" 1 3.0 5",
" 1 2.0 -6",
" 1 1.0 7",
" 3 3.0 5",
" 3 -2.0 6",
" 3 1.0 7",
],
format="ascii",
)
tg = t.group_by("a")
c2 = tg["c"].groups.filter(all_positive)
assert len(c2.groups) == 3
assert c2.groups[0].pformat() == [" c ", "---", "7.0", "5.0"]
assert c2.groups[1].pformat() == [" c ", "---", "0.0"]
assert c2.groups[2].pformat() == [" c ", "---", "3.0", "2.0", "1.0"]
def test_group_mixins():
"""
Test grouping a table with mixin columns
"""
# Setup mixins
idx = np.arange(4)
x = np.array([3.0, 1.0, 2.0, 1.0])
q = x * u.m
lon = coordinates.Longitude(x * u.deg)
lat = coordinates.Latitude(x * u.deg)
# For Time do J2000.0 + few * 0.1 ns (this requires > 64 bit precision)
tm = time.Time(2000, format="jyear") + time.TimeDelta(x * 1e-10, format="sec")
sc = coordinates.SkyCoord(ra=lon, dec=lat)
aw = table_helpers.ArrayWrapper(x)
nd = np.array([(3, "c"), (1, "a"), (2, "b"), (1, "a")], dtype="<i4,|S1").view(
NdarrayMixin
)
qt = QTable(
[idx, x, q, lon, lat, tm, sc, aw, nd],
names=["idx", "x", "q", "lon", "lat", "tm", "sc", "aw", "nd"],
)
# Test group_by with each supported mixin type
mixin_keys = ["x", "q", "lon", "lat", "tm", "sc", "aw", "nd"]
for key in mixin_keys:
qtg = qt.group_by(key)
# Test that it got the sort order correct
assert np.all(qtg["idx"] == [1, 3, 2, 0])
# Test that the groups are right
# Note: skip testing SkyCoord column because that doesn't have equality
for name in ["x", "q", "lon", "lat", "tm", "aw", "nd"]:
assert np.all(qt[name][[1, 3]] == qtg.groups[0][name])
assert np.all(qt[name][[2]] == qtg.groups[1][name])
assert np.all(qt[name][[0]] == qtg.groups[2][name])
# Test that unique also works with mixins since most of the work is
# done with group_by(). This is using *every* mixin as key.
uqt = unique(qt, keys=mixin_keys)
assert len(uqt) == 3
assert np.all(uqt["idx"] == [1, 2, 0])
assert np.all(uqt["x"] == [1.0, 2.0, 3.0])
# Column group_by() with mixins
idxg = qt["idx"].group_by(qt[mixin_keys])
assert np.all(idxg == [1, 3, 2, 0])
@pytest.mark.parametrize(
"col",
[
time.TimeDelta([1, 2], format="sec"),
time.Time([1, 2], format="cxcsec"),
coordinates.SkyCoord([1, 2], [3, 4], unit="deg,deg"),
],
)
def test_group_mixins_unsupported(col):
"""Test that aggregating unsupported mixins produces a warning only"""
t = Table([[1, 1], [3, 4], col], names=["a", "b", "mix"])
tg = t.group_by("a")
with pytest.warns(AstropyUserWarning, match="Cannot aggregate column 'mix'"):
tg.groups.aggregate(np.sum)
|
5aed3d06d852ea5490782f91524185d1ce72fce54fdc50fded4dcde4b0cd64fc | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test behavior related to masked tables"""
import numpy as np
import numpy.ma as ma
import pytest
import astropy.units as u
from astropy.table import Column, MaskedColumn, QTable, Table
from astropy.table.column import BaseColumn
from astropy.time import Time
from astropy.utils.masked import Masked
class SetupData:
def setup_method(self, method):
self.a = MaskedColumn(name="a", data=[1, 2, 3], fill_value=1)
self.b = MaskedColumn(name="b", data=[4, 5, 6], mask=True)
self.c = MaskedColumn(name="c", data=[7, 8, 9], mask=False)
self.d_mask = np.array([False, True, False])
self.d = MaskedColumn(name="d", data=[7, 8, 7], mask=self.d_mask)
self.t = Table([self.a, self.b], masked=True)
self.ca = Column(name="ca", data=[1, 2, 3])
self.sc = MaskedColumn(
name="sc",
data=[(1, 1.0), (2, 2.0), (3, 3.0)],
dtype="i8,f8",
fill_value=(0, -1.0),
)
class TestPprint(SetupData):
def test_pformat(self):
assert self.t.pformat() == [
" a b ",
"--- ---",
" 1 --",
" 2 --",
" 3 --",
]
class TestFilled:
"""Test the filled method in MaskedColumn and Table"""
def setup_method(self, method):
mask = [True, False, False]
self.meta = {"a": 1, "b": [2, 3]}
self.a = MaskedColumn(
name="a", data=[1, 2, 3], fill_value=10, mask=mask, meta={"a": 1}
)
self.b = MaskedColumn(
name="b", data=[4.0, 5.0, 6.0], fill_value=10.0, mask=mask
)
self.c = MaskedColumn(name="c", data=["7", "8", "9"], fill_value="1", mask=mask)
def test_filled_column(self):
f = self.a.filled()
assert np.all(f == [10, 2, 3])
assert isinstance(f, Column)
assert not isinstance(f, MaskedColumn)
# Confirm copy, not ref
assert f.meta["a"] == 1
f.meta["a"] = 2
f[1] = 100
assert self.a[1] == 2
assert self.a.meta["a"] == 1
# Fill with arg fill_value not column fill_value
f = self.a.filled(20)
assert np.all(f == [20, 2, 3])
f = self.b.filled()
assert np.all(f == [10.0, 5.0, 6.0])
assert isinstance(f, Column)
f = self.c.filled()
assert np.all(f == ["1", "8", "9"])
assert isinstance(f, Column)
def test_filled_masked_table(self, tableclass):
t = tableclass([self.a, self.b, self.c], meta=self.meta)
f = t.filled()
assert isinstance(f, Table)
assert f.masked is False
assert np.all(f["a"] == [10, 2, 3])
assert np.allclose(f["b"], [10.0, 5.0, 6.0])
assert np.all(f["c"] == ["1", "8", "9"])
# Confirm copy, not ref
assert f.meta["b"] == [2, 3]
f.meta["b"][0] = 20
assert t.meta["b"] == [2, 3]
f["a"][2] = 100
assert t["a"][2] == 3
def test_filled_unmasked_table(self, tableclass):
t = tableclass([(1, 2), ("3", "4")], names=("a", "b"), meta=self.meta)
f = t.filled()
assert isinstance(f, Table)
assert f.masked is False
assert np.all(f["a"] == t["a"])
assert np.all(f["b"] == t["b"])
# Confirm copy, not ref
assert f.meta["b"] == [2, 3]
f.meta["b"][0] = 20
assert t.meta["b"] == [2, 3]
f["a"][1] = 100
assert t["a"][1] == 2
class TestFillValue(SetupData):
"""Test setting and getting fill value in MaskedColumn and Table"""
def test_init_set_fill_value(self):
"""Check that setting fill_value in the MaskedColumn init works"""
assert self.a.fill_value == 1
c = MaskedColumn(name="c", data=["xxxx", "yyyy"], fill_value="none")
assert c.fill_value == "none"
def test_set_get_fill_value_for_bare_column(self):
"""Check set and get of fill value works for bare Column"""
self.d.fill_value = -999
assert self.d.fill_value == -999
assert np.all(self.d.filled() == [7, -999, 7])
def test_set_get_fill_value_for_str_column(self):
c = MaskedColumn(name="c", data=["xxxx", "yyyy"], mask=[True, False])
# assert np.all(c.filled() == ['N/A', 'yyyy'])
c.fill_value = "ABCDEF"
assert c.fill_value == "ABCD" # string truncated to dtype length
assert np.all(c.filled() == ["ABCD", "yyyy"])
assert np.all(c.filled("XY") == ["XY", "yyyy"])
def test_set_get_fill_value_for_structured_column(self):
assert self.sc.fill_value == np.array((0, -1.0), self.sc.dtype)
sc = self.sc.copy()
assert sc.fill_value.item() == (0, -1.0)
sc.fill_value = (-1, np.inf)
assert sc.fill_value == np.array((-1, np.inf), self.sc.dtype)
sc2 = MaskedColumn(sc, fill_value=(-2, -np.inf))
assert sc2.fill_value == np.array((-2, -np.inf), sc2.dtype)
def test_table_column_mask_not_ref(self):
"""Table column mask is not ref of original column mask"""
self.b.fill_value = -999
assert self.t["b"].fill_value != -999
def test_set_get_fill_value_for_table_column(self):
"""Check set and get of fill value works for Column in a Table"""
self.t["b"].fill_value = 1
assert self.t["b"].fill_value == 1
assert np.all(self.t["b"].filled() == [1, 1, 1])
def test_data_attribute_fill_and_mask(self):
"""Check that .data attribute preserves fill_value and mask"""
self.t["b"].fill_value = 1
self.t["b"].mask = [True, False, True]
assert self.t["b"].data.fill_value == 1
assert np.all(self.t["b"].data.mask == [True, False, True])
class TestMaskedColumnInit(SetupData):
"""Initialization of a masked column"""
def test_set_mask_and_not_ref(self):
"""Check that mask gets set properly and that it is a copy, not ref"""
assert np.all(~self.a.mask)
assert np.all(self.b.mask)
assert np.all(~self.c.mask)
assert np.all(self.d.mask == self.d_mask)
self.d.mask[0] = True
assert not np.all(self.d.mask == self.d_mask)
def test_set_mask_from_list(self):
"""Set mask from a list"""
mask_list = [False, True, False]
a = MaskedColumn(name="a", data=[1, 2, 3], mask=mask_list)
assert np.all(a.mask == mask_list)
def test_override_existing_mask(self):
"""Override existing mask values"""
mask_list = [False, True, False]
b = MaskedColumn(name="b", data=self.b, mask=mask_list)
assert np.all(b.mask == mask_list)
def test_incomplete_mask_spec(self):
"""Incomplete mask specification raises MaskError"""
mask_list = [False, True]
with pytest.raises(ma.MaskError):
MaskedColumn(name="b", length=4, mask=mask_list)
class TestTableInit(SetupData):
"""Initializing a table"""
@pytest.mark.parametrize("type_str", ("?", "b", "i2", "f4", "c8", "S", "U", "O"))
@pytest.mark.parametrize("shape", ((8,), (4, 2), (2, 2, 2)))
def test_init_from_sequence_data_numeric_typed(self, type_str, shape):
"""Test init from list or list of lists with dtype specified, optionally
including an np.ma.masked element.
"""
# Make data of correct dtype and shape, then turn into a list,
# then use that to init Table with spec'd type_str.
data = list(range(8))
np_data = np.array(data, dtype=type_str).reshape(shape)
np_data_list = np_data.tolist()
t = Table([np_data_list], dtype=[type_str])
col = t["col0"]
assert col.dtype == np_data.dtype
assert np.all(col == np_data)
assert type(col) is Column
# Introduce np.ma.masked in the list input and confirm dtype still OK.
if len(shape) == 1:
np_data_list[-1] = np.ma.masked
elif len(shape) == 2:
np_data_list[-1][-1] = np.ma.masked
else:
np_data_list[-1][-1][-1] = np.ma.masked
last_idx = tuple(-1 for _ in shape)
t = Table([np_data_list], dtype=[type_str])
col = t["col0"]
assert col.dtype == np_data.dtype
assert np.all(col == np_data)
assert col.mask[last_idx]
assert type(col) is MaskedColumn
@pytest.mark.parametrize("type_str", ("?", "b", "i2", "f4", "c8", "S", "U", "O"))
@pytest.mark.parametrize("shape", ((8,), (4, 2), (2, 2, 2)))
def test_init_from_sequence_data_numeric_untyped(self, type_str, shape):
"""Test init from list or list of lists with dtype NOT specified,
optionally including an np.ma.masked element.
"""
data = list(range(8))
np_data = np.array(data, dtype=type_str).reshape(shape)
np_data_list = np_data.tolist()
t = Table([np_data_list])
# Grab the dtype that numpy assigns for the Python list inputs
dtype_expected = t["col0"].dtype
# Introduce np.ma.masked in the list input and confirm dtype still OK.
if len(shape) == 1:
np_data_list[-1] = np.ma.masked
elif len(shape) == 2:
np_data_list[-1][-1] = np.ma.masked
else:
np_data_list[-1][-1][-1] = np.ma.masked
last_idx = tuple(-1 for _ in shape)
t = Table([np_data_list])
col = t["col0"]
# Confirm dtype is same as for untype list input w/ no mask
assert col.dtype == dtype_expected
assert np.all(col == np_data)
assert col.mask[last_idx]
assert type(col) is MaskedColumn
def test_initialization_with_all_columns(self):
t1 = Table([self.a, self.b, self.c, self.d, self.ca, self.sc])
assert t1.colnames == ["a", "b", "c", "d", "ca", "sc"]
# Check we get the same result by passing in as list of dict.
# (Regression test for error uncovered by scintillometry package.)
lofd = [{k: row[k] for k in t1.colnames} for row in t1]
t2 = Table(lofd)
for k in t1.colnames:
assert t1[k].dtype == t2[k].dtype
assert np.all(t1[k] == t2[k]) in (True, np.ma.masked)
assert np.all(
getattr(t1[k], "mask", False) == getattr(t2[k], "mask", False)
)
def test_mask_false_if_input_mask_not_true(self):
"""Masking is always False if initial masked arg is not True"""
t = Table([self.ca, self.a])
assert t.masked is False # True before astropy 4.0
t = Table([self.ca])
assert t.masked is False
t = Table([self.ca, ma.array([1, 2, 3])])
assert t.masked is False # True before astropy 4.0
def test_mask_false_if_no_input_masked(self):
"""Masking not true if not (requested or input requires mask)"""
t0 = Table([[3, 4]], masked=False)
t1 = Table(t0, masked=True)
t2 = Table(t1, masked=False)
assert not t0.masked
assert t1.masked
assert not t2.masked
def test_mask_property(self):
t = self.t
# Access table mask (boolean structured array) by column name
assert np.all(t.mask["a"] == np.array([False, False, False]))
assert np.all(t.mask["b"] == np.array([True, True, True]))
# Check that setting mask from table mask has the desired effect on column
t.mask["b"] = np.array([False, True, False])
assert np.all(t["b"].mask == np.array([False, True, False]))
# Non-masked table returns None for mask attribute
t2 = Table([self.ca], masked=False)
assert t2.mask is None
# Set mask property globally and verify local correctness
for mask in (True, False):
t.mask = mask
for name in ("a", "b"):
assert np.all(t[name].mask == mask)
class TestAddColumn:
def test_add_masked_column_to_masked_table(self):
t = Table(masked=True)
assert t.masked
t.add_column(MaskedColumn(name="a", data=[1, 2, 3], mask=[0, 1, 0]))
assert t.masked
t.add_column(MaskedColumn(name="b", data=[4, 5, 6], mask=[1, 0, 1]))
assert t.masked
assert isinstance(t["a"], MaskedColumn)
assert isinstance(t["b"], MaskedColumn)
assert np.all(t["a"] == np.array([1, 2, 3]))
assert np.all(t["a"].mask == np.array([0, 1, 0], bool))
assert np.all(t["b"] == np.array([4, 5, 6]))
assert np.all(t["b"].mask == np.array([1, 0, 1], bool))
def test_add_masked_column_to_non_masked_table(self):
t = Table(masked=False)
assert not t.masked
t.add_column(Column(name="a", data=[1, 2, 3]))
assert not t.masked
t.add_column(MaskedColumn(name="b", data=[4, 5, 6], mask=[1, 0, 1]))
assert not t.masked # Changed in 4.0, table no longer auto-upgrades
assert isinstance(t["a"], Column) # Was MaskedColumn before 4.0
assert isinstance(t["b"], MaskedColumn)
assert np.all(t["a"] == np.array([1, 2, 3]))
assert not hasattr(t["a"], "mask")
assert np.all(t["b"] == np.array([4, 5, 6]))
assert np.all(t["b"].mask == np.array([1, 0, 1], bool))
def test_add_non_masked_column_to_masked_table(self):
t = Table(masked=True)
assert t.masked
t.add_column(Column(name="a", data=[1, 2, 3]))
assert t.masked
t.add_column(MaskedColumn(name="b", data=[4, 5, 6], mask=[1, 0, 1]))
assert t.masked
assert isinstance(t["a"], MaskedColumn)
assert isinstance(t["b"], MaskedColumn)
assert np.all(t["a"] == np.array([1, 2, 3]))
assert np.all(t["a"].mask == np.array([0, 0, 0], bool))
assert np.all(t["b"] == np.array([4, 5, 6]))
assert np.all(t["b"].mask == np.array([1, 0, 1], bool))
def test_convert_to_masked_table_only_if_necessary(self):
# Do not convert to masked table, if new column has no masked value.
# See #1185 for details.
t = Table(masked=False)
assert not t.masked
t.add_column(Column(name="a", data=[1, 2, 3]))
assert not t.masked
t.add_column(MaskedColumn(name="b", data=[4, 5, 6], mask=[0, 0, 0]))
assert not t.masked
assert np.all(t["a"] == np.array([1, 2, 3]))
assert np.all(t["b"] == np.array([4, 5, 6]))
class TestRenameColumn:
def test_rename_masked_column(self):
t = Table(masked=True)
t.add_column(MaskedColumn(name="a", data=[1, 2, 3], mask=[0, 1, 0]))
t["a"].fill_value = 42
t.rename_column("a", "b")
assert t.masked
assert np.all(t["b"] == np.array([1, 2, 3]))
assert np.all(t["b"].mask == np.array([0, 1, 0], bool))
assert t["b"].fill_value == 42
assert t.colnames == ["b"]
class TestRemoveColumn:
def test_remove_masked_column(self):
t = Table(masked=True)
t.add_column(MaskedColumn(name="a", data=[1, 2, 3], mask=[0, 1, 0]))
t["a"].fill_value = 42
t.add_column(MaskedColumn(name="b", data=[4, 5, 6], mask=[1, 0, 1]))
t.remove_column("b")
assert t.masked
assert np.all(t["a"] == np.array([1, 2, 3]))
assert np.all(t["a"].mask == np.array([0, 1, 0], bool))
assert t["a"].fill_value == 42
assert t.colnames == ["a"]
class TestAddRow:
def test_add_masked_row_to_masked_table_iterable(self):
t = Table(masked=True)
t.add_column(MaskedColumn(name="a", data=[1], mask=[0]))
t.add_column(MaskedColumn(name="b", data=[4], mask=[1]))
t.add_row([2, 5], mask=[1, 0])
t.add_row([3, 6], mask=[0, 1])
assert t.masked
assert np.all(np.array(t["a"]) == np.array([1, 2, 3]))
assert np.all(t["a"].mask == np.array([0, 1, 0], bool))
assert np.all(np.array(t["b"]) == np.array([4, 5, 6]))
assert np.all(t["b"].mask == np.array([1, 0, 1], bool))
def test_add_masked_row_to_masked_table_mapping1(self):
t = Table(masked=True)
t.add_column(MaskedColumn(name="a", data=[1], mask=[0]))
t.add_column(MaskedColumn(name="b", data=[4], mask=[1]))
t.add_row({"b": 5, "a": 2}, mask={"a": 1, "b": 0})
t.add_row({"a": 3, "b": 6}, mask={"b": 1, "a": 0})
assert t.masked
assert np.all(np.array(t["a"]) == np.array([1, 2, 3]))
assert np.all(t["a"].mask == np.array([0, 1, 0], bool))
assert np.all(np.array(t["b"]) == np.array([4, 5, 6]))
assert np.all(t["b"].mask == np.array([1, 0, 1], bool))
def test_add_masked_row_to_masked_table_mapping2(self):
# When adding values to a masked table, if the mask is specified as a
# dict, then values not specified will have mask values set to True
t = Table(masked=True)
t.add_column(MaskedColumn(name="a", data=[1], mask=[0]))
t.add_column(MaskedColumn(name="b", data=[4], mask=[1]))
t.add_row({"b": 5}, mask={"b": 0})
t.add_row({"a": 3}, mask={"a": 0})
assert t.masked
assert t["a"][0] == 1 and t["a"][2] == 3
assert np.all(t["a"].mask == np.array([0, 1, 0], bool))
assert t["b"][1] == 5
assert np.all(t["b"].mask == np.array([1, 0, 1], bool))
def test_add_masked_row_to_masked_table_mapping3(self):
# When adding values to a masked table, if mask is not passed to
# add_row, then the mask should be set to False if values are present
# and True if not.
t = Table(masked=True)
t.add_column(MaskedColumn(name="a", data=[1], mask=[0]))
t.add_column(MaskedColumn(name="b", data=[4], mask=[1]))
t.add_row({"b": 5})
t.add_row({"a": 3})
assert t.masked
assert t["a"][0] == 1 and t["a"][2] == 3
assert np.all(t["a"].mask == np.array([0, 1, 0], bool))
assert t["b"][1] == 5
assert np.all(t["b"].mask == np.array([1, 0, 1], bool))
def test_add_masked_row_to_masked_table_mapping4(self):
# When adding values to a masked table, if the mask is specified as a
# dict, then keys in values should match keys in mask
t = Table(masked=True)
t.add_column(MaskedColumn(name="a", data=[1], mask=[0]))
t.add_column(MaskedColumn(name="b", data=[4], mask=[1]))
with pytest.raises(ValueError) as exc:
t.add_row({"b": 5}, mask={"a": True})
assert exc.value.args[0] == "keys in mask should match keys in vals"
def test_add_masked_row_to_masked_table_mismatch(self):
t = Table(masked=True)
t.add_column(MaskedColumn(name="a", data=[1], mask=[0]))
t.add_column(MaskedColumn(name="b", data=[4], mask=[1]))
with pytest.raises(TypeError) as exc:
t.add_row([2, 5], mask={"a": 1, "b": 0})
assert exc.value.args[0] == "Mismatch between type of vals and mask"
with pytest.raises(TypeError) as exc:
t.add_row({"b": 5, "a": 2}, mask=[1, 0])
assert exc.value.args[0] == "Mismatch between type of vals and mask"
def test_add_masked_row_to_non_masked_table_iterable(self):
t = Table(masked=False)
t["a"] = [1]
t["b"] = [4]
t["c"] = Time([1], format="cxcsec")
tm = Time(2, format="cxcsec")
assert not t.masked
t.add_row([2, 5, tm])
assert not t.masked
t.add_row([3, 6, tm], mask=[0, 1, 1])
assert not t.masked
assert type(t["a"]) is Column
assert type(t["b"]) is MaskedColumn
assert type(t["c"]) is Time
assert np.all(t["a"] == [1, 2, 3])
assert np.all(t["b"].data == [4, 5, 6])
assert np.all(t["b"].mask == [False, False, True])
assert np.all(t["c"][:2] == Time([1, 2], format="cxcsec"))
assert np.all(t["c"].mask == [False, False, True])
def test_add_row_cannot_mask_column_raises_typeerror(self):
t = QTable()
t["a"] = [1, 2] * u.m
t.add_row((3 * u.m,)) # No problem
with pytest.raises(ValueError) as exc:
t.add_row((3 * u.m,), mask=(True,))
assert exc.value.args[0].splitlines() == [
"Unable to insert row because of exception in column 'a':",
"mask was supplied for column 'a' but it does not support masked values",
]
def test_setting_from_masked_column():
"""Test issue in #2997"""
mask_b = np.array([True, True, False, False])
for select in (mask_b, slice(0, 2)):
t = Table(masked=True)
t["a"] = Column([1, 2, 3, 4])
t["b"] = MaskedColumn([11, 22, 33, 44], mask=mask_b)
t["c"] = MaskedColumn([111, 222, 333, 444], mask=[True, False, True, False])
t["b"][select] = t["c"][select]
assert t["b"][1] == t[1]["b"]
assert t["b"][0] is np.ma.masked # Original state since t['c'][0] is masked
assert t["b"][1] == 222 # New from t['c'] since t['c'][1] is unmasked
assert t["b"][2] == 33
assert t["b"][3] == 44
assert np.all(
t["b"].mask == t.mask["b"]
) # Avoid t.mask in general, this is for testing
mask_before_add = t.mask.copy()
t["d"] = np.arange(len(t))
assert np.all(t.mask["b"] == mask_before_add["b"])
def test_coercing_fill_value_type():
"""
Test that masked column fill_value is coerced into the correct column type.
"""
# This is the original example posted on the astropy@scipy mailing list
t = Table({"a": ["1"]}, masked=True)
t["a"].set_fill_value("0")
t2 = Table(t, names=["a"], dtype=[np.int32])
assert isinstance(t2["a"].fill_value, np.int32)
# Unit test the same thing.
c = MaskedColumn(["1"])
c.set_fill_value("0")
c2 = MaskedColumn(c, dtype=np.int32)
assert isinstance(c2.fill_value, np.int32)
def test_mask_copy():
"""Test that the mask is copied when copying a table (issue #7362)."""
c = MaskedColumn([1, 2], mask=[False, True])
c2 = MaskedColumn(c, copy=True)
c2.mask[0] = True
assert np.all(c.mask == [False, True])
assert np.all(c2.mask == [True, True])
def test_masked_as_array_with_mixin():
"""Test that as_array() and Table.mask attr work with masked mixin columns"""
t = Table()
t["a"] = Time([1, 2], format="cxcsec")
t["b"] = [3, 4]
t["c"] = [5, 6] * u.m
# With no mask, the output should be ndarray
ta = t.as_array()
assert isinstance(ta, np.ndarray) and not isinstance(ta, np.ma.MaskedArray)
# With a mask, output is MaskedArray
t["a"][1] = np.ma.masked
ta = t.as_array()
assert isinstance(ta, np.ma.MaskedArray)
assert np.all(ta["a"].mask == [False, True])
assert np.isclose(ta["a"][0].cxcsec, 1.0)
assert not np.any(ta["b"].mask)
assert not np.any(ta["c"].mask)
# Check table ``mask`` property
tm = t.mask
assert np.all(tm["a"] == [False, True])
assert not np.any(tm["b"])
assert not np.any(tm["c"])
def test_masked_column_with_unit_in_qtable():
"""Test that adding a MaskedColumn with a unit to QTable creates a MaskedQuantity."""
MaskedQuantity = Masked(u.Quantity)
t = QTable()
t["a"] = MaskedColumn([1, 2])
assert isinstance(t["a"], MaskedColumn)
t["b"] = MaskedColumn([1, 2], unit=u.m)
assert isinstance(t["b"], MaskedQuantity)
assert not np.any(t["b"].mask)
t["c"] = MaskedColumn([1, 2], unit=u.m, mask=[True, False])
assert isinstance(t["c"], MaskedQuantity)
assert np.all(t["c"].mask == [True, False])
def test_masked_quantity_in_table():
MaskedQuantity = Masked(u.Quantity)
t = Table()
t["b"] = MaskedQuantity([1, 2], unit=u.m)
assert isinstance(t["b"], MaskedColumn)
assert not np.any(t["b"].mask)
t["c"] = MaskedQuantity([1, 2], unit=u.m, mask=[True, False])
assert isinstance(t["c"], MaskedColumn)
assert np.all(t["c"].mask == [True, False])
def test_masked_column_data_attribute_is_plain_masked_array():
c = MaskedColumn([1, 2], mask=[False, True])
c_data = c.data
assert type(c_data) is np.ma.MaskedArray
assert type(c_data.data) is np.ndarray
def test_mask_slicing_count_array_finalize():
"""Check that we don't finalize MaskedColumn too often.
Regression test for gh-6721.
"""
# Create a new BaseColumn class that counts how often
# ``__array_finalize__`` is called.
class MyBaseColumn(BaseColumn):
counter = 0
def __array_finalize__(self, obj):
super().__array_finalize__(obj)
MyBaseColumn.counter += 1
# Base a new MaskedColumn class on it. The normal MaskedColumn
# hardcodes the initialization to BaseColumn, so we exchange that.
class MyMaskedColumn(MaskedColumn, Column, MyBaseColumn):
def __new__(cls, *args, **kwargs):
self = super().__new__(cls, *args, **kwargs)
self._baseclass = MyBaseColumn
return self
# Creation really needs 2 finalizations (once for the BaseColumn
# call inside ``__new__`` and once when the view as a MaskedColumn
# is taken), but since the first is hardcoded, we do not capture it
# and thus the count is only 1.
c = MyMaskedColumn([1, 2], mask=[False, True])
assert MyBaseColumn.counter == 1
# slicing should need only one ``__array_finalize__`` (used to be 3).
c0 = c[:]
assert MyBaseColumn.counter == 2
# repr should need none (used to be 2!!)
repr(c0)
assert MyBaseColumn.counter == 2
|
19e15a26f132a14405add46dbebce70739d1225b3e951929f538c9f4d47c0151 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import operator
import numpy as np
import pytest
from numpy.testing import assert_array_equal
from astropy import units as u
from astropy.coordinates import Angle
from astropy.tests.helper import assert_quantity_allclose
from astropy.uncertainty import distributions as ds
from astropy.uncertainty.core import Distribution
from astropy.utils import NumpyRNGContext
from astropy.utils.compat.optional_deps import HAS_SCIPY
if HAS_SCIPY:
from scipy.stats import norm # pylint: disable=W0611
SMAD_FACTOR = 1 / norm.ppf(0.75)
class TestInit:
@classmethod
def setup_class(self):
self.rates = np.array([1, 5, 30, 400])[:, np.newaxis]
self.parr = np.random.poisson(self.rates, (4, 1000))
self.parr_t = np.random.poisson(self.rates.squeeze(), (1000, 4))
def test_numpy_init(self):
# Test that we can initialize directly from a Numpy array
Distribution(self.parr)
def test_numpy_init_T(self):
Distribution(self.parr_t.T)
def test_quantity_init(self):
# Test that we can initialize directly from a Quantity
pq = self.parr << u.ct
pqd = Distribution(pq)
assert isinstance(pqd, u.Quantity)
assert isinstance(pqd, Distribution)
assert isinstance(pqd.value, Distribution)
assert_array_equal(pqd.value.distribution, self.parr)
def test_quantity_init_T(self):
# Test that we can initialize directly from a Quantity
pq = self.parr_t << u.ct
Distribution(pq.T)
def test_quantity_init_with_distribution(self):
# Test that we can initialize a Quantity from a Distribution.
pd = Distribution(self.parr)
qpd = pd << u.ct
assert isinstance(qpd, u.Quantity)
assert isinstance(qpd, Distribution)
assert qpd.unit == u.ct
assert_array_equal(qpd.value.distribution, pd.distribution.astype(float))
def test_init_scalar():
parr = np.random.poisson(np.array([1, 5, 30, 400])[:, np.newaxis], (4, 1000))
with pytest.raises(
TypeError, match=r"Attempted to initialize a Distribution with a scalar"
):
Distribution(parr.ravel()[0])
class TestDistributionStatistics:
def setup_class(self):
with NumpyRNGContext(12345):
self.data = np.random.normal(
np.array([1, 2, 3, 4])[:, np.newaxis],
np.array([3, 2, 4, 5])[:, np.newaxis],
(4, 10000),
)
self.distr = Distribution(self.data * u.kpc)
def test_shape(self):
# Distribution shape
assert self.distr.shape == (4,)
assert self.distr.distribution.shape == (4, 10000)
def test_size(self):
# Total number of values
assert self.distr.size == 4
assert self.distr.distribution.size == 40000
def test_n_samples(self):
# Number of samples
assert self.distr.n_samples == 10000
def test_n_distr(self):
assert self.distr.shape == (4,)
def test_pdf_mean(self):
# Mean of each PDF
expected = np.mean(self.data, axis=-1) * self.distr.unit
pdf_mean = self.distr.pdf_mean()
assert_quantity_allclose(pdf_mean, expected)
assert_quantity_allclose(pdf_mean, [1, 2, 3, 4] * self.distr.unit, rtol=0.05)
# make sure the right type comes out - should be a Quantity because it's
# now a summary statistic
assert not isinstance(pdf_mean, Distribution)
assert isinstance(pdf_mean, u.Quantity)
# Check with out argument.
out = pdf_mean * 0.0
pdf_mean2 = self.distr.pdf_mean(out=out)
assert pdf_mean2 is out
assert np.all(pdf_mean2 == pdf_mean)
def test_pdf_std(self):
# Standard deviation of each PDF
expected = np.std(self.data, axis=-1) * self.distr.unit
pdf_std = self.distr.pdf_std()
assert_quantity_allclose(pdf_std, expected)
assert_quantity_allclose(pdf_std, [3, 2, 4, 5] * self.distr.unit, rtol=0.05)
# make sure the right type comes out - should be a Quantity because it's
# now a summary statistic
assert not isinstance(pdf_std, Distribution)
assert isinstance(pdf_std, u.Quantity)
# Check with proper ddof, using out argument.
out = pdf_std * 0.0
expected = np.std(self.data, axis=-1, ddof=1) * self.distr.unit
pdf_std2 = self.distr.pdf_std(ddof=1, out=out)
assert pdf_std2 is out
assert np.all(pdf_std2 == expected)
def test_pdf_var(self):
# Variance of each PDF
expected = np.var(self.data, axis=-1) * self.distr.unit**2
pdf_var = self.distr.pdf_var()
assert_quantity_allclose(pdf_var, expected)
assert_quantity_allclose(
pdf_var, [9, 4, 16, 25] * self.distr.unit**2, rtol=0.1
)
# make sure the right type comes out - should be a Quantity because it's
# now a summary statistic
assert not isinstance(pdf_var, Distribution)
assert isinstance(pdf_var, u.Quantity)
# Check with proper ddof, using out argument.
out = pdf_var * 0.0
expected = np.var(self.data, axis=-1, ddof=1) * self.distr.unit**2
pdf_var2 = self.distr.pdf_var(ddof=1, out=out)
assert pdf_var2 is out
assert np.all(pdf_var2 == expected)
def test_pdf_median(self):
# Median of each PDF
expected = np.median(self.data, axis=-1) * self.distr.unit
pdf_median = self.distr.pdf_median()
assert_quantity_allclose(pdf_median, expected)
assert_quantity_allclose(pdf_median, [1, 2, 3, 4] * self.distr.unit, rtol=0.1)
# make sure the right type comes out - should be a Quantity because it's
# now a summary statistic
assert not isinstance(pdf_median, Distribution)
assert isinstance(pdf_median, u.Quantity)
# Check with out argument.
out = pdf_median * 0.0
pdf_median2 = self.distr.pdf_median(out=out)
assert pdf_median2 is out
assert np.all(pdf_median2 == expected)
@pytest.mark.skipif(not HAS_SCIPY, reason="no scipy")
def test_pdf_mad_smad(self):
# Median absolute deviation of each PDF
median = np.median(self.data, axis=-1, keepdims=True)
expected = np.median(np.abs(self.data - median), axis=-1) * self.distr.unit
pdf_mad = self.distr.pdf_mad()
assert_quantity_allclose(pdf_mad, expected)
pdf_smad = self.distr.pdf_smad()
assert_quantity_allclose(pdf_smad, pdf_mad * SMAD_FACTOR, rtol=1e-5)
assert_quantity_allclose(pdf_smad, [3, 2, 4, 5] * self.distr.unit, rtol=0.05)
# make sure the right type comes out - should be a Quantity because it's
# now a summary statistic
assert not isinstance(pdf_mad, Distribution)
assert isinstance(pdf_mad, u.Quantity)
assert not isinstance(pdf_smad, Distribution)
assert isinstance(pdf_smad, u.Quantity)
# Check out argument for smad (which checks mad too).
out = pdf_smad * 0.0
pdf_smad2 = self.distr.pdf_smad(out=out)
assert pdf_smad2 is out
assert np.all(pdf_smad2 == pdf_smad)
def test_percentile(self):
expected = np.percentile(self.data, [10, 50, 90], axis=-1) * self.distr.unit
percs = self.distr.pdf_percentiles([10, 50, 90])
assert_quantity_allclose(percs, expected)
assert percs.shape == (3, 4)
# make sure the right type comes out - should be a Quantity because it's
# now a summary statistic
assert not isinstance(percs, Distribution)
assert isinstance(percs, u.Quantity)
def test_add_quantity(self):
distrplus = self.distr + [2000, 0, 0, 500] * u.pc
expected = (
np.median(self.data, axis=-1) + np.array([2, 0, 0, 0.5])
) * self.distr.unit
assert_quantity_allclose(distrplus.pdf_median(), expected)
expected = np.var(self.data, axis=-1) * self.distr.unit**2
assert_quantity_allclose(distrplus.pdf_var(), expected)
def test_add_distribution(self):
another_data = (
np.random.randn(4, 10000) * np.array([1000, 0.01, 80, 10])[:, np.newaxis]
+ np.array([2000, 0, 0, 500])[:, np.newaxis]
)
# another_data is in pc, but main distr is in kpc
another_distr = Distribution(another_data * u.pc)
combined_distr = self.distr + another_distr
expected = np.median(self.data + another_data / 1000, axis=-1) * self.distr.unit
assert_quantity_allclose(combined_distr.pdf_median(), expected)
expected = (
np.var(self.data + another_data / 1000, axis=-1) * self.distr.unit**2
)
assert_quantity_allclose(combined_distr.pdf_var(), expected)
def test_helper_normal_samples():
centerq = [1, 5, 30, 400] * u.kpc
with NumpyRNGContext(12345):
n_dist = ds.normal(centerq, std=[0.2, 1.5, 4, 1] * u.kpc, n_samples=100)
assert n_dist.distribution.shape == (4, 100)
assert n_dist.shape == (4,)
assert n_dist.unit == u.kpc
assert np.all(n_dist.pdf_std() > 100 * u.pc)
n_dist2 = ds.normal(centerq, std=[0.2, 1.5, 4, 1] * u.pc, n_samples=20000)
assert n_dist2.distribution.shape == (4, 20000)
assert n_dist2.shape == (4,)
assert n_dist2.unit == u.kpc
assert np.all(n_dist2.pdf_std() < 100 * u.pc)
def test_helper_poisson_samples():
centerqcounts = [1, 5, 30, 400] * u.count
with NumpyRNGContext(12345):
p_dist = ds.poisson(centerqcounts, n_samples=100)
assert p_dist.shape == (4,)
assert p_dist.distribution.shape == (4, 100)
assert p_dist.unit == u.count
p_min = np.min(p_dist)
assert isinstance(p_min, Distribution)
assert p_min.shape == ()
assert np.all(p_min >= 0)
assert np.all(np.abs(p_dist.pdf_mean() - centerqcounts) < centerqcounts)
def test_helper_uniform_samples():
udist = ds.uniform(lower=[1, 2] * u.kpc, upper=[3, 4] * u.kpc, n_samples=1000)
assert udist.shape == (2,)
assert udist.distribution.shape == (2, 1000)
assert np.all(np.min(udist.distribution, axis=-1) > [1, 2] * u.kpc)
assert np.all(np.max(udist.distribution, axis=-1) < [3, 4] * u.kpc)
# try the alternative creator
udist = ds.uniform(center=[1, 3, 2] * u.pc, width=[5, 4, 3] * u.pc, n_samples=1000)
assert udist.shape == (3,)
assert udist.distribution.shape == (3, 1000)
assert np.all(np.min(udist.distribution, axis=-1) > [-1.5, 1, 0.5] * u.pc)
assert np.all(np.max(udist.distribution, axis=-1) < [3.5, 5, 3.5] * u.pc)
def test_helper_normal_exact():
pytest.skip("distribution stretch goal not yet implemented")
centerq = [1, 5, 30, 400] * u.kpc
ds.normal(centerq, std=[0.2, 1.5, 4, 1] * u.kpc)
ds.normal(centerq, var=[0.04, 2.25, 16, 1] * u.kpc**2)
ds.normal(centerq, ivar=[25, 0.44444444, 0.625, 1] * u.kpc**-2)
def test_helper_poisson_exact():
pytest.skip("distribution stretch goal not yet implemented")
centerq = [1, 5, 30, 400] * u.one
ds.poisson(centerq, n_samples=1000)
with pytest.raises(
u.UnitsError,
match=r"Poisson distribution can only be computed for dimensionless quantities",
):
centerq = [1, 5, 30, 400] * u.kpc
ds.poisson(centerq, n_samples=1000)
def test_reprs():
darr = np.arange(30).reshape(3, 10)
distr = Distribution(darr * u.kpc)
assert "n_samples=10" in repr(distr)
assert "n_samples=10" in str(distr)
assert r"n_{\rm samp}=10" in distr._repr_latex_()
@pytest.mark.parametrize(
"func, kws",
[
(ds.normal, {"center": 0, "std": 2}),
(ds.uniform, {"lower": 0, "upper": 2}),
(ds.poisson, {"center": 2}),
(ds.normal, {"center": 0 * u.count, "std": 2 * u.count}),
(ds.uniform, {"lower": 0 * u.count, "upper": 2 * u.count}),
(ds.poisson, {"center": 2 * u.count}),
],
)
def test_wrong_kw_fails(func, kws):
with pytest.raises(Exception):
kw_temp = kws.copy()
kw_temp["n_sample"] = 100 # note the missing "s"
assert func(**kw_temp).n_samples == 100
kw_temp = kws.copy()
kw_temp["n_samples"] = 100
assert func(**kw_temp).n_samples == 100
def test_index_assignment_quantity():
arr = np.random.randn(2, 1000)
distr = Distribution(arr * u.kpc)
d1q, d2q = distr
assert isinstance(d1q, Distribution)
assert isinstance(d2q, Distribution)
ndistr = ds.normal(center=[1, 2] * u.kpc, std=[3, 4] * u.kpc, n_samples=1000)
n1, n2 = ndistr
assert isinstance(n1, ds.Distribution)
assert isinstance(n2, ds.Distribution)
def test_index_assignment_array():
arr = np.random.randn(2, 1000)
distr = Distribution(arr)
d1a, d2a = distr
assert isinstance(d1a, Distribution)
assert isinstance(d2a, Distribution)
ndistr = ds.normal(center=[1, 2], std=[3, 4], n_samples=1000)
n1, n2 = ndistr
assert isinstance(n1, ds.Distribution)
assert isinstance(n2, ds.Distribution)
def test_histogram():
arr = np.random.randn(2, 3, 1000)
distr = Distribution(arr)
hist, bins = distr.pdf_histogram(bins=10)
assert hist.shape == (2, 3, 10)
assert bins.shape == (2, 3, 11)
def test_array_repr_latex():
# as of this writing ndarray does not have a _repr_latex_, and this test
# ensure distributions account for that. However, if in the future ndarray
# gets a _repr_latex_, we can skip this.
arr = np.random.randn(4, 1000)
if hasattr(arr, "_repr_latex_"):
pytest.skip("in this version of numpy, ndarray has a _repr_latex_")
distr = Distribution(arr)
assert distr._repr_latex_() is None
def test_distr_to():
distr = ds.normal(10 * u.cm, n_samples=100, std=1 * u.cm)
todistr = distr.to(u.m)
assert_quantity_allclose(distr.pdf_mean().to(u.m), todistr.pdf_mean())
def test_distr_noq_to():
# this is an array distribution not a quantity
distr = ds.normal(10, n_samples=100, std=1)
with pytest.raises(AttributeError):
distr.to(u.m)
def test_distr_to_value():
distr = ds.normal(10 * u.cm, n_samples=100, std=1 * u.cm)
tovdistr = distr.to_value(u.m)
assert np.allclose(distr.pdf_mean().to_value(u.m), tovdistr.pdf_mean())
def test_distr_noq_to_value():
distr = ds.normal(10, n_samples=100, std=1)
with pytest.raises(AttributeError):
distr.to_value(u.m)
def test_distr_angle():
# Check that Quantity subclasses decay to Quantity appropriately.
distr = Distribution([2.0, 3.0, 4.0])
ad = Angle(distr, "deg")
ad_plus_ad = ad + ad
assert isinstance(ad_plus_ad, Angle)
assert isinstance(ad_plus_ad, Distribution)
ad_times_ad = ad * ad
assert not isinstance(ad_times_ad, Angle)
assert isinstance(ad_times_ad, u.Quantity)
assert isinstance(ad_times_ad, Distribution)
ad += ad
assert isinstance(ad, Angle)
assert isinstance(ad, Distribution)
assert_array_equal(ad.distribution, ad_plus_ad.distribution)
with pytest.raises(u.UnitTypeError):
ad *= ad
def test_distr_angle_view_as_quantity():
# Check that Quantity subclasses decay to Quantity appropriately.
distr = Distribution([2.0, 3.0, 4.0])
ad = Angle(distr, "deg")
qd = ad.view(u.Quantity)
assert not isinstance(qd, Angle)
assert isinstance(qd, u.Quantity)
assert isinstance(qd, Distribution)
# View directly as DistributionQuantity class.
qd2 = ad.view(qd.__class__)
assert not isinstance(qd2, Angle)
assert isinstance(qd2, u.Quantity)
assert isinstance(qd2, Distribution)
assert_array_equal(qd2.distribution, qd.distribution)
qd3 = ad.view(qd.dtype, qd.__class__)
assert not isinstance(qd3, Angle)
assert isinstance(qd3, u.Quantity)
assert isinstance(qd3, Distribution)
assert_array_equal(qd3.distribution, qd.distribution)
def test_distr_cannot_view_new_dtype():
# A Distribution has a very specific structured dtype with just one
# element that holds the array of samples. As it is not clear what
# to do with a view as a new dtype, we just error on it.
# TODO: with a lot of thought, this restriction can likely be relaxed.
distr = Distribution([2.0, 3.0, 4.0])
with pytest.raises(ValueError, match="with a new dtype"):
distr.view(np.dtype("f8"))
# Check subclass just in case.
ad = Angle(distr, "deg")
with pytest.raises(ValueError, match="with a new dtype"):
ad.view(np.dtype("f8"))
with pytest.raises(ValueError, match="with a new dtype"):
ad.view(np.dtype("f8"), Distribution)
def test_scalar_quantity_distribution():
# Regression test for gh-12336
angles = Distribution([90.0, 30.0, 0.0] * u.deg)
sin_angles = np.sin(angles) # This failed in 4.3.
assert isinstance(sin_angles, Distribution)
assert isinstance(sin_angles, u.Quantity)
assert_array_equal(sin_angles, Distribution(np.sin([90.0, 30.0, 0.0] * u.deg)))
@pytest.mark.parametrize("op", [operator.eq, operator.ne, operator.gt])
class TestComparison:
@classmethod
def setup_class(cls):
cls.d = Distribution([90.0, 30.0, 0.0])
class Override:
__array_ufunc__ = None
def __eq__(self, other):
return "eq"
def __ne__(self, other):
return "ne"
def __lt__(self, other):
return "gt" # Since it is called for the reverse of gt
cls.override = Override()
def test_distribution_can_be_compared_to_non_distribution(self, op):
result = op(self.d, 0.0)
assert_array_equal(result, Distribution(op(self.d.distribution, 0.0)))
def test_distribution_comparison_defers_correctly(self, op):
result = op(self.d, self.override)
assert result == op.__name__
class TestSetItemWithSelection:
def test_setitem(self):
d = Distribution([90.0, 30.0, 0.0])
d[d > 50] = 0.0
assert_array_equal(d, Distribution([0.0, 30.0, 0.0]))
def test_inplace_operation(self):
d = Distribution([90.0, 30.0, 0.0])
d[d > 50] *= -1.0
assert_array_equal(d, Distribution([-90.0, 30.0, 0.0]))
|
912fdc129cb91fdcca8dcb6f438cf0da132aa055773e1196310b0353d39e567b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test that Distribution works with classes other than ndarray and Quantity."""
import numpy as np
import pytest
from numpy.testing import assert_array_equal
import astropy.units as u
from astropy.coordinates import Angle, Latitude, Longitude
from astropy.uncertainty import Distribution
class TestAngles:
@classmethod
def setup_class(cls):
cls.a = np.arange(27.0).reshape(3, 9)
cls.d = Distribution(cls.a)
cls.q = cls.a << u.deg
cls.dq = Distribution(cls.q)
@pytest.mark.parametrize("angle_cls", [Angle, Longitude, Latitude])
def test_as_input_for_angle(self, angle_cls):
da = angle_cls(self.dq)
assert isinstance(da, angle_cls)
assert isinstance(da, Distribution)
assert_array_equal(da.distribution, angle_cls(self.q))
@pytest.mark.parametrize("angle_cls", [Angle, Longitude, Latitude])
def test_using_angle_as_input(self, angle_cls):
a = angle_cls(self.q)
da = Distribution(a)
assert isinstance(da, angle_cls)
assert isinstance(da, Distribution)
# Parametrize the unit to check the various branches in Latitude._validate_angles
@pytest.mark.parametrize("dtype", ["f8", "f4"])
@pytest.mark.parametrize(
"value", [90 * u.deg, np.pi / 2 * u.radian, 90 * 60 * u.arcmin]
)
def test_at_limit_for_latitude(self, value, dtype):
q = u.Quantity(value, dtype=dtype).reshape(1)
qd = Distribution(q)
ld = Latitude(qd)
assert_array_equal(ld.distribution, Latitude(q))
# Parametrize the unit in case Longitude._wrap_at becomes unit-dependent.
@pytest.mark.parametrize("dtype", ["f8", "f4"])
@pytest.mark.parametrize(
"value", [360 * u.deg, 2 * np.pi * u.radian, 360 * 60 * u.arcmin]
)
def test_at_wrap_angle_for_longitude(self, value, dtype):
q = u.Quantity(value, dtype=dtype).reshape(1)
qd = Distribution(q)
ld = Longitude(qd)
assert_array_equal(ld.distribution, Longitude(q))
assert np.all(ld.distribution == 0)
@pytest.mark.parametrize("angle_cls", [Longitude, Latitude])
def test_operation_gives_correct_subclass(self, angle_cls):
# Lon and Lat always fall back to Angle
da = angle_cls(self.dq)
da2 = da + da
assert isinstance(da, Angle)
assert isinstance(da, Distribution)
@pytest.mark.parametrize("angle_cls", [Longitude, Latitude])
def test_pdfstd_gives_correct_subclass(self, angle_cls):
# Lon and Lat always fall back to Angle
da = angle_cls(self.dq)
std = da.pdf_std()
assert isinstance(std, Angle)
assert_array_equal(std, Angle(self.q.std(-1)))
|
a9bda2d1e93736078453f34c4bc4185440e02e827a12f3e2a3cf03a33bd3ff1a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Module to test fitting routines
"""
# pylint: disable=invalid-name
import os.path
import unittest.mock as mk
from importlib.metadata import EntryPoint
from itertools import combinations
from unittest import mock
import numpy as np
import pytest
from numpy import linalg
from numpy.testing import assert_allclose, assert_almost_equal, assert_equal
from astropy.modeling import models
from astropy.modeling.core import Fittable2DModel, Parameter
from astropy.modeling.fitting import (
DogBoxLSQFitter,
Fitter,
FittingWithOutlierRemoval,
JointFitter,
LevMarLSQFitter,
LinearLSQFitter,
LMLSQFitter,
NonFiniteValueError,
SimplexLSQFitter,
SLSQPLSQFitter,
TRFLSQFitter,
_NLLSQFitter,
populate_entry_points,
)
from astropy.modeling.optimizers import Optimization
from astropy.stats import sigma_clip
from astropy.utils import NumpyRNGContext
from astropy.utils.compat.optional_deps import HAS_SCIPY
from astropy.utils.data import get_pkg_data_filename
from astropy.utils.exceptions import AstropyUserWarning
from . import irafutil
if HAS_SCIPY:
from scipy import optimize
fitters = [SimplexLSQFitter, SLSQPLSQFitter]
non_linear_fitters = [LevMarLSQFitter, TRFLSQFitter, LMLSQFitter, DogBoxLSQFitter]
_RANDOM_SEED = 0x1337
class TestPolynomial2D:
"""Tests for 2D polynomial fitting."""
def setup_class(self):
self.model = models.Polynomial2D(2)
self.y, self.x = np.mgrid[:5, :5]
def poly2(x, y):
return 1 + 2 * x + 3 * x**2 + 4 * y + 5 * y**2 + 6 * x * y
self.z = poly2(self.x, self.y)
def test_poly2D_fitting(self):
fitter = LinearLSQFitter()
v = self.model.fit_deriv(x=self.x, y=self.y)
p = linalg.lstsq(v, self.z.flatten(), rcond=-1)[0]
new_model = fitter(self.model, self.x, self.y, self.z)
assert_allclose(new_model.parameters, p)
def test_eval(self):
fitter = LinearLSQFitter()
new_model = fitter(self.model, self.x, self.y, self.z)
assert_allclose(new_model(self.x, self.y), self.z)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize("fitter", non_linear_fitters)
def test_nonlinear_fitting(self, fitter):
fitter = fitter()
self.model.parameters = [0.6, 1.8, 2.9, 3.7, 4.9, 6.7]
with pytest.warns(AstropyUserWarning, match=r"Model is linear in parameters"):
new_model = fitter(self.model, self.x, self.y, self.z)
assert_allclose(new_model.parameters, [1, 2, 3, 4, 5, 6])
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_compare_nonlinear_fitting(self):
self.model.parameters = [0.6, 1.8, 2.9, 3.7, 4.9, 6.7]
fit_models = []
for fitter in non_linear_fitters:
fitter = fitter()
with pytest.warns(
AstropyUserWarning, match=r"Model is linear in parameters"
):
fit_models.append(fitter(self.model, self.x, self.y, self.z))
for pair in combinations(fit_models, 2):
assert_allclose(pair[0].parameters, pair[1].parameters)
class TestICheb2D:
"""
Tests 2D Chebyshev polynomial fitting
Create a 2D polynomial (z) using Polynomial2DModel and default coefficients
Fit z using a ICheb2D model
Evaluate the ICheb2D polynomial and compare with the initial z
"""
def setup_class(self):
self.pmodel = models.Polynomial2D(2)
self.y, self.x = np.mgrid[:5, :5]
self.z = self.pmodel(self.x, self.y)
self.cheb2 = models.Chebyshev2D(2, 2)
self.fitter = LinearLSQFitter()
def test_default_params(self):
self.cheb2.parameters = np.arange(9)
p = np.array(
[1344.0, 1772.0, 400.0, 1860.0, 2448.0, 552.0, 432.0, 568.0, 128.0]
)
z = self.cheb2(self.x, self.y)
model = self.fitter(self.cheb2, self.x, self.y, z)
assert_almost_equal(model.parameters, p)
def test_poly2D_cheb2D(self):
model = self.fitter(self.cheb2, self.x, self.y, self.z)
z1 = model(self.x, self.y)
assert_almost_equal(self.z, z1)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize("fitter", non_linear_fitters)
def test_chebyshev2D_nonlinear_fitting(self, fitter):
fitter = fitter()
cheb2d = models.Chebyshev2D(2, 2)
cheb2d.parameters = np.arange(9)
z = cheb2d(self.x, self.y)
cheb2d.parameters = [0.1, 0.6, 1.8, 2.9, 3.7, 4.9, 6.7, 7.5, 8.9]
with pytest.warns(AstropyUserWarning, match=r"Model is linear in parameters"):
model = fitter(cheb2d, self.x, self.y, z)
assert_allclose(model.parameters, [0, 1, 2, 3, 4, 5, 6, 7, 8], atol=10**-9)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize("fitter", non_linear_fitters)
def test_chebyshev2D_nonlinear_fitting_with_weights(self, fitter):
fitter = fitter()
cheb2d = models.Chebyshev2D(2, 2)
cheb2d.parameters = np.arange(9)
z = cheb2d(self.x, self.y)
cheb2d.parameters = [0.1, 0.6, 1.8, 2.9, 3.7, 4.9, 6.7, 7.5, 8.9]
weights = np.ones_like(self.y)
with pytest.warns(AstropyUserWarning, match=r"Model is linear in parameters"):
model = fitter(cheb2d, self.x, self.y, z, weights=weights)
assert_allclose(model.parameters, [0, 1, 2, 3, 4, 5, 6, 7, 8], atol=10**-9)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
class TestJointFitter:
"""
Tests the joint fitting routine using 2 gaussian models
"""
def setup_class(self):
"""
Create 2 gaussian models and some data with noise.
Create a fitter for the two models keeping the amplitude parameter
common for the two models.
"""
self.g1 = models.Gaussian1D(10, mean=14.9, stddev=0.3)
self.g2 = models.Gaussian1D(10, mean=13, stddev=0.4)
self.jf = JointFitter(
[self.g1, self.g2], {self.g1: ["amplitude"], self.g2: ["amplitude"]}, [9.8]
)
self.x = np.arange(10, 20, 0.1)
y1 = self.g1(self.x)
y2 = self.g2(self.x)
with NumpyRNGContext(_RANDOM_SEED):
n = np.random.randn(100)
self.ny1 = y1 + 2 * n
self.ny2 = y2 + 2 * n
self.jf(self.x, self.ny1, self.x, self.ny2)
def test_joint_parameter(self):
"""
Tests that the amplitude of the two models is the same
"""
assert_allclose(self.jf.fitparams[0], self.g1.parameters[0])
assert_allclose(self.jf.fitparams[0], self.g2.parameters[0])
def test_joint_fitter(self):
"""
Tests the fitting routine with similar procedure.
Compares the fitted parameters.
"""
p1 = [14.9, 0.3]
p2 = [13, 0.4]
A = 9.8
p = np.r_[A, p1, p2]
def model(A, p, x):
return A * np.exp(-0.5 / p[1] ** 2 * (x - p[0]) ** 2)
def errfunc(p, x1, y1, x2, y2):
return np.ravel(
np.r_[model(p[0], p[1:3], x1) - y1, model(p[0], p[3:], x2) - y2]
)
coeff, _ = optimize.leastsq(
errfunc, p, args=(self.x, self.ny1, self.x, self.ny2)
)
assert_allclose(coeff, self.jf.fitparams, rtol=10 ** (-2))
class TestLinearLSQFitter:
def test_compound_model_raises_error(self):
"""Test that if an user tries to use a compound model, raises an error"""
MESSAGE = r"Model must be simple, not compound"
with pytest.raises(ValueError, match=MESSAGE):
init_model1 = models.Polynomial1D(degree=2, c0=[1, 1], n_models=2)
init_model2 = models.Polynomial1D(degree=2, c0=[1, 1], n_models=2)
init_model_comp = init_model1 + init_model2
x = np.arange(10)
y = init_model_comp(x, model_set_axis=False)
fitter = LinearLSQFitter()
fitter(init_model_comp, x, y)
def test_chebyshev1D(self):
"""Tests fitting a 1D Chebyshev polynomial to some real world data."""
test_file = get_pkg_data_filename(os.path.join("data", "idcompspec.fits"))
with open(test_file) as f:
lines = f.read()
reclist = lines.split("begin")
record = irafutil.IdentifyRecord(reclist[1])
coeffs = record.coeff
order = int(record.fields["order"])
initial_model = models.Chebyshev1D(order - 1, domain=record.get_range())
fitter = LinearLSQFitter()
fitted_model = fitter(initial_model, record.x, record.z)
assert_allclose(fitted_model.parameters, np.array(coeffs), rtol=10e-2)
def test_linear_fit_model_set(self):
"""Tests fitting multiple models simultaneously."""
init_model = models.Polynomial1D(degree=2, c0=[1, 1], n_models=2)
x = np.arange(10)
y_expected = init_model(x, model_set_axis=False)
assert y_expected.shape == (2, 10)
# Add a bit of random noise
with NumpyRNGContext(_RANDOM_SEED):
y = y_expected + np.random.normal(0, 0.01, size=y_expected.shape)
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y)
assert_allclose(fitted_model(x, model_set_axis=False), y_expected, rtol=1e-1)
def test_linear_fit_2d_model_set(self):
"""Tests fitted multiple 2-D models simultaneously."""
init_model = models.Polynomial2D(degree=2, c0_0=[1, 1], n_models=2)
x = np.arange(10)
y = np.arange(10)
z_expected = init_model(x, y, model_set_axis=False)
assert z_expected.shape == (2, 10)
# Add a bit of random noise
with NumpyRNGContext(_RANDOM_SEED):
z = z_expected + np.random.normal(0, 0.01, size=z_expected.shape)
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y, z)
assert_allclose(fitted_model(x, y, model_set_axis=False), z_expected, rtol=1e-1)
def test_linear_fit_fixed_parameter(self):
"""
Tests fitting a polynomial model with a fixed parameter (issue #6135).
"""
init_model = models.Polynomial1D(degree=2, c1=1)
init_model.c1.fixed = True
x = np.arange(10)
y = 2 + x + 0.5 * x * x
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y)
assert_allclose(fitted_model.parameters, [2.0, 1.0, 0.5], atol=1e-14)
def test_linear_fit_model_set_fixed_parameter(self):
"""
Tests fitting a polynomial model set with a fixed parameter (#6135).
"""
init_model = models.Polynomial1D(degree=2, c1=[1, -2], n_models=2)
init_model.c1.fixed = True
x = np.arange(10)
yy = np.array([2 + x + 0.5 * x * x, -2 * x])
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, yy)
assert_allclose(fitted_model.c0, [2.0, 0.0], atol=1e-14)
assert_allclose(fitted_model.c1, [1.0, -2.0], atol=1e-14)
assert_allclose(fitted_model.c2, [0.5, 0.0], atol=1e-14)
def test_linear_fit_2d_model_set_fixed_parameters(self):
"""
Tests fitting a 2d polynomial model set with fixed parameters (#6135).
"""
init_model = models.Polynomial2D(
degree=2,
c1_0=[1, 2],
c0_1=[-0.5, 1],
n_models=2,
fixed={"c1_0": True, "c0_1": True},
)
x, y = np.mgrid[0:5, 0:5]
zz = np.array([1 + x - 0.5 * y + 0.1 * x * x, 2 * x + y - 0.2 * y * y])
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y, zz)
assert_allclose(fitted_model(x, y, model_set_axis=False), zz, atol=1e-14)
def test_linear_fit_model_set_masked_values(self):
"""
Tests model set fitting with masked value(s) (#4824, #6819).
"""
# NB. For single models, there is an equivalent doctest.
init_model = models.Polynomial1D(degree=1, n_models=2)
x = np.arange(10)
y = np.ma.masked_array([2 * x + 1, x - 2], mask=np.zeros_like([x, x]))
y[0, 7] = 100.0 # throw off fit coefficients if unmasked
y.mask[0, 7] = True
y[1, 1:3] = -100.0
y.mask[1, 1:3] = True
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y)
assert_allclose(fitted_model.c0, [1.0, -2.0], atol=1e-14)
assert_allclose(fitted_model.c1, [2.0, 1.0], atol=1e-14)
def test_linear_fit_2d_model_set_masked_values(self):
"""
Tests 2D model set fitting with masked value(s) (#4824, #6819).
"""
init_model = models.Polynomial2D(1, n_models=2)
x, y = np.mgrid[0:5, 0:5]
z = np.ma.masked_array(
[2 * x + 3 * y + 1, x - 0.5 * y - 2], mask=np.zeros_like([x, x])
)
z[0, 3, 1] = -1000.0 # throw off fit coefficients if unmasked
z.mask[0, 3, 1] = True
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y, z)
assert_allclose(fitted_model.c0_0, [1.0, -2.0], atol=1e-14)
assert_allclose(fitted_model.c1_0, [2.0, 1.0], atol=1e-14)
assert_allclose(fitted_model.c0_1, [3.0, -0.5], atol=1e-14)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
class TestNonLinearFitters:
"""Tests non-linear least squares fitting and the SLSQP algorithm."""
def setup_class(self):
self.initial_values = [100, 5, 1]
self.xdata = np.arange(0, 10, 0.1)
sigma = 4.0 * np.ones_like(self.xdata)
with NumpyRNGContext(_RANDOM_SEED):
yerror = np.random.normal(0, sigma)
def func(p, x):
return p[0] * np.exp(-0.5 / p[2] ** 2 * (x - p[1]) ** 2)
self.ydata = func(self.initial_values, self.xdata) + yerror
self.gauss = models.Gaussian1D(100, 5, stddev=1)
@pytest.mark.parametrize("fitter0", non_linear_fitters)
@pytest.mark.parametrize("fitter1", non_linear_fitters)
def test_estimated_vs_analytic_deriv(self, fitter0, fitter1):
"""
Runs `LevMarLSQFitter` and `TRFLSQFitter` with estimated and
analytic derivatives of a `Gaussian1D`.
"""
fitter0 = fitter0()
model = fitter0(self.gauss, self.xdata, self.ydata)
g1e = models.Gaussian1D(100, 5.0, stddev=1)
fitter1 = fitter1()
emodel = fitter1(g1e, self.xdata, self.ydata, estimate_jacobian=True)
assert_allclose(model.parameters, emodel.parameters, rtol=10 ** (-3))
@pytest.mark.parametrize("fitter0", non_linear_fitters)
@pytest.mark.parametrize("fitter1", non_linear_fitters)
def test_estimated_vs_analytic_deriv_with_weights(self, fitter0, fitter1):
"""
Runs `LevMarLSQFitter` and `TRFLSQFitter` with estimated and
analytic derivatives of a `Gaussian1D`.
"""
weights = 1.0 / (self.ydata / 10.0)
fitter0 = fitter0()
model = fitter0(self.gauss, self.xdata, self.ydata, weights=weights)
g1e = models.Gaussian1D(100, 5.0, stddev=1)
fitter1 = fitter1()
emodel = fitter1(
g1e, self.xdata, self.ydata, weights=weights, estimate_jacobian=True
)
assert_allclose(model.parameters, emodel.parameters, rtol=10 ** (-3))
@pytest.mark.parametrize("fitter", non_linear_fitters)
def test_with_optimize(self, fitter):
"""
Tests results from `LevMarLSQFitter` and `TRFLSQFitter` against
`scipy.optimize.leastsq`.
"""
fitter = fitter()
model = fitter(self.gauss, self.xdata, self.ydata, estimate_jacobian=True)
def func(p, x):
return p[0] * np.exp(-0.5 / p[2] ** 2 * (x - p[1]) ** 2)
def errfunc(p, x, y):
return func(p, x) - y
result = optimize.leastsq(
errfunc, self.initial_values, args=(self.xdata, self.ydata)
)
assert_allclose(model.parameters, result[0], rtol=10 ** (-3))
@pytest.mark.parametrize("fitter", non_linear_fitters)
def test_with_weights(self, fitter):
"""
Tests results from `LevMarLSQFitter` and `TRFLSQFitter` with weights.
"""
fitter = fitter()
# part 1: weights are equal to 1
model = fitter(self.gauss, self.xdata, self.ydata, estimate_jacobian=True)
withw = fitter(
self.gauss,
self.xdata,
self.ydata,
estimate_jacobian=True,
weights=np.ones_like(self.xdata),
)
assert_allclose(model.parameters, withw.parameters, rtol=10 ** (-4))
# part 2: weights are 0 or 1 (effectively, they are a mask)
weights = np.zeros_like(self.xdata)
weights[::2] = 1.0
mask = weights >= 1.0
model = fitter(
self.gauss, self.xdata[mask], self.ydata[mask], estimate_jacobian=True
)
withw = fitter(
self.gauss, self.xdata, self.ydata, estimate_jacobian=True, weights=weights
)
assert_allclose(model.parameters, withw.parameters, rtol=10 ** (-4))
@pytest.mark.filterwarnings(r"ignore:.* Maximum number of iterations reached")
@pytest.mark.filterwarnings(
r"ignore:Values in x were outside bounds during a minimize step, "
r"clipping to bounds"
)
@pytest.mark.parametrize("fitter_class", fitters)
@pytest.mark.parametrize("fitter", non_linear_fitters)
def test_fitter_against_LevMar(self, fitter_class, fitter):
"""
Tests results from non-linear fitters against `LevMarLSQFitter`
and `TRFLSQFitter`
"""
fitter = fitter()
fitter_cls = fitter_class()
# This emits a warning from fitter that we need to ignore with
# pytest.mark.filterwarnings above.
new_model = fitter_cls(self.gauss, self.xdata, self.ydata)
model = fitter(self.gauss, self.xdata, self.ydata)
assert_allclose(model.parameters, new_model.parameters, rtol=10 ** (-4))
@pytest.mark.filterwarnings(
r"ignore:Values in x were outside bounds during a minimize step, "
r"clipping to bounds"
)
@pytest.mark.parametrize("fitter", non_linear_fitters)
def test_LSQ_SLSQP_with_constraints(self, fitter):
"""
Runs `LevMarLSQFitter`/`TRFLSQFitter` and `SLSQPLSQFitter` on a
model with constraints.
"""
fitter = fitter()
g1 = models.Gaussian1D(100, 5, stddev=1)
g1.mean.fixed = True
fslsqp = SLSQPLSQFitter()
slsqp_model = fslsqp(g1, self.xdata, self.ydata)
model = fitter(g1, self.xdata, self.ydata)
assert_allclose(model.parameters, slsqp_model.parameters, rtol=10 ** (-4))
@pytest.mark.parametrize("fitter", non_linear_fitters)
def test_non_linear_lsq_fitter_with_weights(self, fitter):
"""
Tests that issue #11581 has been solved.
"""
fitter = fitter()
np.random.seed(42)
norder = 2
fitter2 = LinearLSQFitter()
model = models.Polynomial1D(norder)
npts = 10000
c = [2.0, -10.0, 7.0]
tw = np.random.uniform(0.0, 10.0, npts)
tx = np.random.uniform(0.0, 10.0, npts)
ty = c[0] + c[1] * tx + c[2] * (tx**2)
ty += np.random.normal(0.0, 1.5, npts)
with pytest.warns(AstropyUserWarning, match=r"Model is linear in parameters"):
tf1 = fitter(model, tx, ty, weights=tw)
tf2 = fitter2(model, tx, ty, weights=tw)
assert_allclose(tf1.parameters, tf2.parameters, atol=10 ** (-16))
assert_allclose(tf1.parameters, c, rtol=10 ** (-2), atol=10 ** (-2))
model = models.Gaussian1D()
if isinstance(fitter, (TRFLSQFitter, LMLSQFitter)):
with pytest.warns(
AstropyUserWarning, match=r"The fit may be unsuccessful; *."
):
fitter(model, tx, ty, weights=tw)
else:
fitter(model, tx, ty, weights=tw)
model = models.Polynomial2D(norder)
nxpts = 100
nypts = 150
npts = nxpts * nypts
c = [1.0, 4.0, 7.0, -8.0, -9.0, -3.0]
tw = np.random.uniform(0.0, 10.0, npts).reshape(nxpts, nypts)
tx = np.random.uniform(0.0, 10.0, npts).reshape(nxpts, nypts)
ty = np.random.uniform(0.0, 10.0, npts).reshape(nxpts, nypts)
tz = (
c[0]
+ c[1] * tx
+ c[2] * (tx**2)
+ c[3] * ty
+ c[4] * (ty**2)
+ c[5] * tx * ty
)
tz += np.random.normal(0.0, 1.5, npts).reshape(nxpts, nypts)
with pytest.warns(AstropyUserWarning, match=r"Model is linear in parameters"):
tf1 = fitter(model, tx, ty, tz, weights=tw)
tf2 = fitter2(model, tx, ty, tz, weights=tw)
assert_allclose(tf1.parameters, tf2.parameters, atol=10 ** (-16))
assert_allclose(tf1.parameters, c, rtol=10 ** (-2), atol=10 ** (-2))
def test_simplex_lsq_fitter(self):
"""A basic test for the `SimplexLSQ` fitter."""
class Rosenbrock(Fittable2DModel):
a = Parameter()
b = Parameter()
@staticmethod
def evaluate(x, y, a, b):
return (a - x) ** 2 + b * (y - x**2) ** 2
x = y = np.linspace(-3.0, 3.0, 100)
with NumpyRNGContext(_RANDOM_SEED):
z = Rosenbrock.evaluate(x, y, 1.0, 100.0)
z += np.random.normal(0.0, 0.1, size=z.shape)
fitter = SimplexLSQFitter()
r_i = Rosenbrock(1, 100)
r_f = fitter(r_i, x, y, z)
assert_allclose(r_f.parameters, [1.0, 100.0], rtol=1e-2)
@pytest.mark.parametrize("fitter", non_linear_fitters)
def test_param_cov(self, fitter):
"""
Tests that the 'param_cov' fit_info entry gets the right answer for
*linear* least squares, where the answer is exact
"""
fitter = fitter()
a = 2
b = 100
with NumpyRNGContext(_RANDOM_SEED):
x = np.linspace(0, 1, 100)
# y scatter is amplitude ~1 to make sure covariance is
# non-negligible
y = x * a + b + np.random.randn(len(x))
# first compute the ordinary least squares covariance matrix
X = np.vstack([x, np.ones(len(x))]).T
beta = np.matmul(np.matmul(np.linalg.inv(np.matmul(X.T, X)), X.T), y.T)
s2 = np.sum((y - np.matmul(X, beta).ravel()) ** 2) / (len(y) - len(beta))
olscov = np.linalg.inv(np.matmul(X.T, X)) * s2
# now do the non-linear least squares fit
mod = models.Linear1D(a, b)
with pytest.warns(AstropyUserWarning, match=r"Model is linear in parameters"):
fmod = fitter(mod, x, y)
assert_allclose(fmod.parameters, beta.ravel())
assert_allclose(olscov, fitter.fit_info["param_cov"])
@pytest.mark.parametrize("fitter", non_linear_fitters)
def test_param_cov_with_uncertainties(self, fitter):
"""
Tests that the 'param_cov' fit_info entry gets the right answer for
*linear* least squares, where the answer is exact
"""
fitter = fitter()
a = 2
b = 100
with NumpyRNGContext(_RANDOM_SEED):
x = np.linspace(0, 1, 100)
# y scatter is amplitude ~1 to make sure covariance is
# non-negligible
y = x * a + b + np.random.normal(size=len(x))
sigma = np.random.normal(loc=1, scale=0.1, size=len(x))
# compute the ordinary least squares covariance matrix
# accounting for measurement uncertainties `sigma`
X = np.vstack([x, np.ones(len(x))]).T
inv_N = np.linalg.inv(np.diag(sigma) ** 2)
cov = np.linalg.inv(X.T @ inv_N @ X)
beta = cov @ X.T @ inv_N @ y.T
# now do the non-linear least squares fit
mod = models.Linear1D(a, b)
with pytest.warns(AstropyUserWarning, match=r"Model is linear in parameters"):
fmod = fitter(mod, x, y, weights=sigma**-1)
assert_allclose(fmod.parameters, beta.ravel())
assert_allclose(cov, fitter.fit_info["param_cov"])
class TestEntryPoint:
"""Tests population of fitting with entry point fitters"""
def successfulimport(self):
# This should work
class goodclass(Fitter):
__name__ = "GoodClass"
return goodclass
def raiseimporterror(self):
# This should fail as it raises an Import Error
raise ImportError
def returnbadfunc(self):
def badfunc():
# This should import but it should fail type check
pass
return badfunc
def returnbadclass(self):
# This should import But it should fail subclass type check
class badclass:
pass
return badclass
def test_working(self):
"""This should work fine"""
mock_entry_working = mock.create_autospec(EntryPoint)
mock_entry_working.name = "Working"
mock_entry_working.load = self.successfulimport
populate_entry_points([mock_entry_working])
def test_import_error(self):
"""This raises an import error on load to test that it is handled correctly"""
mock_entry_importerror = mock.create_autospec(EntryPoint)
mock_entry_importerror.name = "IErr"
mock_entry_importerror.load = self.raiseimporterror
with pytest.warns(AstropyUserWarning, match=r".*ImportError.*"):
populate_entry_points([mock_entry_importerror])
def test_bad_func(self):
"""This returns a function which fails the type check"""
mock_entry_badfunc = mock.create_autospec(EntryPoint)
mock_entry_badfunc.name = "BadFunc"
mock_entry_badfunc.load = self.returnbadfunc
with pytest.warns(AstropyUserWarning, match=r".*Class.*"):
populate_entry_points([mock_entry_badfunc])
def test_bad_class(self):
"""This returns a class which doesn't inherient from fitter"""
mock_entry_badclass = mock.create_autospec(EntryPoint)
mock_entry_badclass.name = "BadClass"
mock_entry_badclass.load = self.returnbadclass
with pytest.warns(AstropyUserWarning, match=r".*BadClass.*"):
populate_entry_points([mock_entry_badclass])
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
class Test1DFittingWithOutlierRemoval:
def setup_class(self):
self.x = np.linspace(-5.0, 5.0, 200)
self.model_params = (3.0, 1.3, 0.8)
def func(p, x):
return p[0] * np.exp(-0.5 * (x - p[1]) ** 2 / p[2] ** 2)
self.y = func(self.model_params, self.x)
@pytest.mark.filterwarnings("ignore:The fit may be unsuccessful")
@pytest.mark.filterwarnings(
r"ignore:Values in x were outside bounds during a minimize step, "
r"clipping to bounds"
)
@pytest.mark.parametrize("fitter", non_linear_fitters + fitters)
def test_with_fitters_and_sigma_clip(self, fitter):
import scipy.stats as stats
fitter = fitter()
np.random.seed(0)
c = stats.bernoulli.rvs(0.25, size=self.x.shape)
y = self.y + (
np.random.normal(0.0, 0.2, self.x.shape)
+ c * np.random.normal(3.0, 5.0, self.x.shape)
)
g_init = models.Gaussian1D(amplitude=1.0, mean=0, stddev=1.0)
fit = FittingWithOutlierRemoval(fitter, sigma_clip, niter=3, sigma=3.0)
fitted_model, _ = fit(g_init, self.x, y)
assert_allclose(fitted_model.parameters, self.model_params, rtol=1e-1)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
class Test2DFittingWithOutlierRemoval:
def setup_class(self):
self.y, self.x = np.mgrid[-3:3:128j, -3:3:128j]
self.model_params = (3.0, 1.0, 0.0, 0.8, 0.8)
def Gaussian_2D(p, pos):
return p[0] * np.exp(
-0.5 * (pos[0] - p[2]) ** 2 / p[4] ** 2
- 0.5 * (pos[1] - p[1]) ** 2 / p[3] ** 2
)
self.z = Gaussian_2D(self.model_params, np.array([self.y, self.x]))
def initial_guess(self, data, pos):
y = pos[0]
x = pos[1]
"""computes the centroid of the data as the initial guess for the
center position"""
wx = x * data
wy = y * data
total_intensity = np.sum(data)
x_mean = np.sum(wx) / total_intensity
y_mean = np.sum(wy) / total_intensity
x_to_pixel = x[0].size / (x[x[0].size - 1][x[0].size - 1] - x[0][0])
y_to_pixel = y[0].size / (y[y[0].size - 1][y[0].size - 1] - y[0][0])
x_pos = np.around(x_mean * x_to_pixel + x[0].size / 2.0).astype(int)
y_pos = np.around(y_mean * y_to_pixel + y[0].size / 2.0).astype(int)
amplitude = data[y_pos][x_pos]
return amplitude, x_mean, y_mean
@pytest.mark.filterwarnings("ignore:The fit may be unsuccessful")
@pytest.mark.filterwarnings(
r"ignore:Values in x were outside bounds during a minimize step, "
r"clipping to bounds"
)
@pytest.mark.parametrize("fitter", non_linear_fitters + fitters)
def test_with_fitters_and_sigma_clip(self, fitter):
import scipy.stats as stats
fitter = fitter()
np.random.seed(0)
c = stats.bernoulli.rvs(0.25, size=self.z.shape)
z = self.z + (
np.random.normal(0.0, 0.2, self.z.shape)
+ c * np.random.normal(self.z, 2.0, self.z.shape)
)
guess = self.initial_guess(self.z, np.array([self.y, self.x]))
g2_init = models.Gaussian2D(
amplitude=guess[0],
x_mean=guess[1],
y_mean=guess[2],
x_stddev=0.75,
y_stddev=1.25,
)
fit = FittingWithOutlierRemoval(fitter, sigma_clip, niter=3, sigma=3.0)
fitted_model, _ = fit(g2_init, self.x, self.y, z)
assert_allclose(fitted_model.parameters[0:5], self.model_params, atol=1e-1)
def test_1d_set_fitting_with_outlier_removal():
"""Test model set fitting with outlier removal (issue #6819)"""
poly_set = models.Polynomial1D(2, n_models=2)
fitter = FittingWithOutlierRemoval(
LinearLSQFitter(),
sigma_clip,
sigma=2.5,
niter=3,
cenfunc=np.ma.mean,
stdfunc=np.ma.std,
)
x = np.arange(10)
y = np.array([2.5 * x - 4, 2 * x * x + x + 10])
y[1, 5] = -1000 # outlier
poly_set, filt_y = fitter(poly_set, x, y)
assert_allclose(poly_set.c0, [-4.0, 10.0], atol=1e-14)
assert_allclose(poly_set.c1, [2.5, 1.0], atol=1e-14)
assert_allclose(poly_set.c2, [0.0, 2.0], atol=1e-14)
def test_2d_set_axis_2_fitting_with_outlier_removal():
"""Test fitting 2D model set (axis 2) with outlier removal (issue #6819)"""
poly_set = models.Polynomial2D(1, n_models=2, model_set_axis=2)
fitter = FittingWithOutlierRemoval(
LinearLSQFitter(),
sigma_clip,
sigma=2.5,
niter=3,
cenfunc=np.ma.mean,
stdfunc=np.ma.std,
)
y, x = np.mgrid[0:5, 0:5]
z = np.rollaxis(np.array([x + y, 1 - 0.1 * x + 0.2 * y]), 0, 3)
z[3, 3:5, 0] = 100.0 # outliers
poly_set, filt_z = fitter(poly_set, x, y, z)
assert_allclose(poly_set.c0_0, [[[0.0, 1.0]]], atol=1e-14)
assert_allclose(poly_set.c1_0, [[[1.0, -0.1]]], atol=1e-14)
assert_allclose(poly_set.c0_1, [[[1.0, 0.2]]], atol=1e-14)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
class TestWeightedFittingWithOutlierRemoval:
"""Issue #7020"""
def setup_class(self):
# values of x,y not important as we fit y(x,y) = p0 model here
self.y, self.x = np.mgrid[0:20, 0:20]
self.z = np.mod(self.x + self.y, 2) * 2 - 1 # -1,1 chessboard
self.weights = np.mod(self.x + self.y, 2) * 2 + 1 # 1,3 chessboard
self.z[0, 0] = 1000.0 # outlier
self.z[0, 1] = 1000.0 # outlier
self.x1d = self.x.flatten()
self.z1d = self.z.flatten()
self.weights1d = self.weights.flatten()
def test_1d_without_weights_without_sigma_clip(self):
model = models.Polynomial1D(0)
fitter = LinearLSQFitter()
fit = fitter(model, self.x1d, self.z1d)
assert_allclose(fit.parameters[0], self.z1d.mean(), atol=10 ** (-2))
def test_1d_without_weights_with_sigma_clip(self):
model = models.Polynomial1D(0)
fitter = FittingWithOutlierRemoval(
LinearLSQFitter(), sigma_clip, niter=3, sigma=3.0
)
fit, mask = fitter(model, self.x1d, self.z1d)
assert (~mask).sum() == self.z1d.size - 2
assert mask[0] and mask[1]
assert_allclose(
fit.parameters[0], 0.0, atol=10 ** (-2)
) # with removed outliers mean is 0.0
def test_1d_with_weights_without_sigma_clip(self):
model = models.Polynomial1D(0)
fitter = LinearLSQFitter()
fit = fitter(model, self.x1d, self.z1d, weights=self.weights1d)
assert fit.parameters[0] > 1.0 # outliers pulled it high
def test_1d_with_weights_with_sigma_clip(self):
"""
smoke test for #7020 - fails without fitting.py
patch because weights does not propagate
"""
model = models.Polynomial1D(0)
fitter = FittingWithOutlierRemoval(
LinearLSQFitter(), sigma_clip, niter=3, sigma=3.0
)
fit, filtered = fitter(model, self.x1d, self.z1d, weights=self.weights1d)
assert fit.parameters[0] > 10 ** (-2) # weights pulled it > 0
# outliers didn't pull it out of [-1:1] because they had been removed
assert fit.parameters[0] < 1.0
def test_1d_set_with_common_weights_with_sigma_clip(self):
"""added for #6819 (1D model set with weights in common)"""
model = models.Polynomial1D(0, n_models=2)
fitter = FittingWithOutlierRemoval(
LinearLSQFitter(), sigma_clip, niter=3, sigma=3.0
)
z1d = np.array([self.z1d, self.z1d])
fit, filtered = fitter(model, self.x1d, z1d, weights=self.weights1d)
assert_allclose(fit.parameters, [0.8, 0.8], atol=1e-14)
def test_1d_set_with_weights_with_sigma_clip(self):
"""1D model set with separate weights"""
model = models.Polynomial1D(0, n_models=2)
fitter = FittingWithOutlierRemoval(
LinearLSQFitter(), sigma_clip, niter=3, sigma=3.0
)
z1d = np.array([self.z1d, self.z1d])
weights = np.array([self.weights1d, self.weights1d])
fit, filtered = fitter(model, self.x1d, z1d, weights=weights)
assert_allclose(fit.parameters, [0.8, 0.8], atol=1e-14)
def test_2d_without_weights_without_sigma_clip(self):
model = models.Polynomial2D(0)
fitter = LinearLSQFitter()
fit = fitter(model, self.x, self.y, self.z)
assert_allclose(fit.parameters[0], self.z.mean(), atol=10 ** (-2))
def test_2d_without_weights_with_sigma_clip(self):
model = models.Polynomial2D(0)
fitter = FittingWithOutlierRemoval(
LinearLSQFitter(), sigma_clip, niter=3, sigma=3.0
)
fit, mask = fitter(model, self.x, self.y, self.z)
assert (~mask).sum() == self.z.size - 2
assert mask[0, 0] and mask[0, 1]
assert_allclose(fit.parameters[0], 0.0, atol=10 ** (-2))
@pytest.mark.parametrize("fitter", non_linear_fitters)
def test_2d_with_weights_without_sigma_clip(self, fitter):
fitter = fitter()
model = models.Polynomial2D(0)
with pytest.warns(AstropyUserWarning, match=r"Model is linear in parameters"):
fit = fitter(model, self.x, self.y, self.z, weights=self.weights)
assert fit.parameters[0] > 1.0 # outliers pulled it high
def test_2d_linear_with_weights_without_sigma_clip(self):
model = models.Polynomial2D(0)
# LinearLSQFitter doesn't handle weights properly in 2D
fitter = LinearLSQFitter()
fit = fitter(model, self.x, self.y, self.z, weights=self.weights)
assert fit.parameters[0] > 1.0 # outliers pulled it high
@pytest.mark.parametrize("base_fitter", non_linear_fitters)
def test_2d_with_weights_with_sigma_clip(self, base_fitter):
"""smoke test for #7020 - fails without fitting.py patch because
weights does not propagate"""
base_fitter = base_fitter()
model = models.Polynomial2D(0)
fitter = FittingWithOutlierRemoval(base_fitter, sigma_clip, niter=3, sigma=3.0)
with pytest.warns(AstropyUserWarning, match=r"Model is linear in parameters"):
fit, _ = fitter(model, self.x, self.y, self.z, weights=self.weights)
assert fit.parameters[0] > 10 ** (-2) # weights pulled it > 0
# outliers didn't pull it out of [-1:1] because they had been removed
assert fit.parameters[0] < 1.0
def test_2d_linear_with_weights_with_sigma_clip(self):
"""same as test above with a linear fitter."""
model = models.Polynomial2D(0)
fitter = FittingWithOutlierRemoval(
LinearLSQFitter(), sigma_clip, niter=3, sigma=3.0
)
fit, _ = fitter(model, self.x, self.y, self.z, weights=self.weights)
assert fit.parameters[0] > 10 ** (-2) # weights pulled it > 0
# outliers didn't pull it out of [-1:1] because they had been removed
assert fit.parameters[0] < 1.0
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize("fitter", non_linear_fitters)
def test_fitters_with_weights(fitter):
"""Issue #5737"""
fitter = fitter()
if isinstance(fitter, _NLLSQFitter):
pytest.xfail(
"This test is poorly designed and causes issues for "
"scipy.optimize.least_squares based fitters"
)
Xin, Yin = np.mgrid[0:21, 0:21]
with NumpyRNGContext(_RANDOM_SEED):
zsig = np.random.normal(0, 0.01, size=Xin.shape)
# Non-linear model
g2 = models.Gaussian2D(10, 10, 9, 2, 3)
z = g2(Xin, Yin)
gmod = fitter(models.Gaussian2D(15, 7, 8, 1.3, 1.2), Xin, Yin, z + zsig)
assert_allclose(gmod.parameters, g2.parameters, atol=10 ** (-2))
# Linear model
p2 = models.Polynomial2D(3)
p2.parameters = np.arange(10) / 1.2
z = p2(Xin, Yin)
with pytest.warns(AstropyUserWarning, match=r"Model is linear in parameters"):
pmod = fitter(models.Polynomial2D(3), Xin, Yin, z + zsig)
assert_allclose(pmod.parameters, p2.parameters, atol=10 ** (-2))
def test_linear_fitter_with_weights():
"""Regression test for #7035"""
Xin, Yin = np.mgrid[0:21, 0:21]
fitter = LinearLSQFitter()
with NumpyRNGContext(_RANDOM_SEED):
zsig = np.random.normal(0, 0.01, size=Xin.shape)
p2 = models.Polynomial2D(3)
p2.parameters = np.arange(10) / 1.2
z = p2(Xin, Yin)
pmod = fitter(models.Polynomial2D(3), Xin, Yin, z + zsig, weights=zsig ** (-2))
assert_allclose(pmod.parameters, p2.parameters, atol=10 ** (-2))
@pytest.mark.parametrize(
"fixed, warns",
[
({}, True), # tests fitting non-fixed parameters models produces warnings
(
{"c1_0": True},
True,
), # tests fitting fixed par models produces warnings - #14037
(
{"c0_1": True},
False,
), # https://github.com/astropy/astropy/pull/14037#pullrequestreview-1191726872
],
)
def test_polynomial_poorly_conditioned(fixed, warns):
p0 = models.Polynomial2D(degree=1, c0_0=3, c1_0=5, c0_1=0, fixed=fixed)
fitter = LinearLSQFitter()
x = [1, 2, 3, 4, 5]
y = [1, 1, 1, 1, 1]
values = p0(x, y)
if warns:
with pytest.warns(
AstropyUserWarning, match="The fit may be poorly conditioned"
):
p = fitter(p0, x, y, values)
else:
p = fitter(p0, x, y, values)
assert np.allclose(p0.parameters, p.parameters, rtol=0, atol=1e-14)
def test_linear_fitter_with_weights_flat():
"""Same as the above #7035 test but with flattened inputs"""
Xin, Yin = np.mgrid[0:21, 0:21]
Xin, Yin = Xin.flatten(), Yin.flatten()
fitter = LinearLSQFitter()
with NumpyRNGContext(_RANDOM_SEED):
zsig = np.random.normal(0, 0.01, size=Xin.shape)
p2 = models.Polynomial2D(3)
p2.parameters = np.arange(10) / 1.2
z = p2(Xin, Yin)
pmod = fitter(models.Polynomial2D(3), Xin, Yin, z + zsig, weights=zsig ** (-2))
assert_allclose(pmod.parameters, p2.parameters, atol=10 ** (-2))
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.filterwarnings("ignore:The fit may be unsuccessful")
@pytest.mark.parametrize("fitter", non_linear_fitters + fitters)
def test_fitters_interface(fitter):
"""
Test that ``**kwargs`` work with all optimizers.
This is a basic smoke test.
"""
fitter = fitter()
model = models.Gaussian1D(10, 4, 0.3)
x = np.arange(21)
y = model(x)
if isinstance(fitter, SimplexLSQFitter):
kwargs = {"maxiter": 79, "verblevel": 1, "acc": 1e-6}
else:
kwargs = {"maxiter": 77, "verblevel": 1, "epsilon": 1e-2, "acc": 1e-6}
if isinstance(fitter, (LevMarLSQFitter, _NLLSQFitter)):
kwargs.pop("verblevel")
_ = fitter(model, x, y, **kwargs)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize("fitter_class", [SLSQPLSQFitter, SimplexLSQFitter])
def test_optimizers(fitter_class):
fitter = fitter_class()
# Test maxiter
assert fitter._opt_method.maxiter == 100
fitter._opt_method.maxiter = 1000
assert fitter._opt_method.maxiter == 1000
# Test eps
assert fitter._opt_method.eps == np.sqrt(np.finfo(float).eps)
fitter._opt_method.eps = 1e-16
assert fitter._opt_method.eps == 1e-16
# Test acc
assert fitter._opt_method.acc == 1e-7
fitter._opt_method.acc = 1e-16
assert fitter._opt_method.acc == 1e-16
# Test repr
assert repr(fitter._opt_method) == f"{fitter._opt_method.__class__.__name__}()"
fitparams = mk.MagicMock()
final_func_val = mk.MagicMock()
numiter = mk.MagicMock()
funcalls = mk.MagicMock()
exit_mode = 1
mess = mk.MagicMock()
xtol = mk.MagicMock()
if fitter_class == SLSQPLSQFitter:
return_value = (fitparams, final_func_val, numiter, exit_mode, mess)
fit_info = {
"final_func_val": final_func_val,
"numiter": numiter,
"exit_mode": exit_mode,
"message": mess,
}
else:
return_value = (fitparams, final_func_val, numiter, funcalls, exit_mode)
fit_info = {
"final_func_val": final_func_val,
"numiter": numiter,
"exit_mode": exit_mode,
"num_function_calls": funcalls,
}
with mk.patch.object(
fitter._opt_method.__class__, "opt_method", return_value=return_value
):
with pytest.warns(AstropyUserWarning, match=r"The fit may be unsuccessful; .*"):
assert (fitparams, fit_info) == fitter._opt_method(
mk.MagicMock(), mk.MagicMock(), mk.MagicMock(), xtol=xtol
)
assert fit_info == fitter._opt_method.fit_info
if isinstance(fitter, SLSQPLSQFitter):
fitter._opt_method.acc == 1e-16
else:
fitter._opt_method.acc == xtol
@mk.patch.multiple(Optimization, __abstractmethods__=set())
def test_Optimization_abstract_call():
optimization = Optimization(mk.MagicMock())
MESSAGE = r"Subclasses should implement this method"
with pytest.raises(NotImplementedError, match=MESSAGE):
optimization()
def test_fitting_with_outlier_removal_niter():
"""
Test that FittingWithOutlierRemoval stops prior to reaching niter if the
set of masked points has converged and correctly reports the actual number
of iterations performed.
"""
# 2 rows with some noise around a constant level and 1 deviant point:
x = np.arange(25)
with NumpyRNGContext(_RANDOM_SEED):
y = np.random.normal(loc=10.0, scale=1.0, size=(2, 25))
y[0, 14] = 100.0
# Fit 2 models with up to 5 iterations (should only take 2):
fitter = FittingWithOutlierRemoval(
fitter=LinearLSQFitter(),
outlier_func=sigma_clip,
niter=5,
sigma_lower=3.0,
sigma_upper=3.0,
maxiters=1,
)
model, mask = fitter(models.Chebyshev1D(2, n_models=2), x, y)
# Confirm that only the deviant point was rejected, in 2 iterations:
assert_equal(np.where(mask), [[0], [14]])
assert fitter.fit_info["niter"] == 2
# Refit just the first row without any rejection iterations, to ensure
# there are no regressions for that special case:
fitter = FittingWithOutlierRemoval(
fitter=LinearLSQFitter(),
outlier_func=sigma_clip,
niter=0,
sigma_lower=3.0,
sigma_upper=3.0,
maxiters=1,
)
model, mask = fitter(models.Chebyshev1D(2), x, y[0])
# Confirm that there were no iterations or rejected points:
assert mask.sum() == 0
assert fitter.fit_info["niter"] == 0
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
class TestFittingUncertanties:
"""
Test that parameter covariance is calculated correctly for the fitters
that do so (currently LevMarLSQFitter, LinearLSQFitter).
"""
example_1D_models = [models.Polynomial1D(2), models.Linear1D()]
example_1D_sets = [
models.Polynomial1D(2, n_models=2, model_set_axis=False),
models.Linear1D(n_models=2, slope=[1.0, 1.0], intercept=[0, 0]),
]
def setup_class(self):
np.random.seed(619)
self.x = np.arange(10)
self.x_grid = np.random.randint(0, 100, size=100).reshape(10, 10)
self.y_grid = np.random.randint(0, 100, size=100).reshape(10, 10)
self.rand_grid = np.random.random(100).reshape(10, 10)
self.rand = self.rand_grid[0]
@pytest.mark.parametrize(
("single_model", "model_set"), list(zip(example_1D_models, example_1D_sets))
)
@pytest.mark.parametrize("fitter", non_linear_fitters)
def test_1d_models(self, single_model, model_set, fitter):
"""Test that fitting uncertainties are computed correctly for 1D models
and 1D model sets. Use covariance/stds given by LevMarLSQFitter as
a benchmark since they are returned by the numpy fitter.
"""
fitter = fitter(calc_uncertainties=True)
linlsq_fitter = LinearLSQFitter(calc_uncertainties=True)
# test 1D single models
# fit single model w/ nonlinear fitter
y = single_model(self.x) + self.rand
with pytest.warns(AstropyUserWarning, match=r"Model is linear in parameters"):
fit_model = fitter(single_model, self.x, y)
cov_model = fit_model.cov_matrix.cov_matrix
# fit single model w/ linlsq fitter
fit_model_linlsq = linlsq_fitter(single_model, self.x, y)
cov_model_linlsq = fit_model_linlsq.cov_matrix.cov_matrix
# check covariance, stds computed correctly computed
assert_allclose(cov_model_linlsq, cov_model)
assert_allclose(np.sqrt(np.diag(cov_model_linlsq)), fit_model_linlsq.stds.stds)
# now test 1D model sets
# fit set of models w/ linear fitter
y = model_set(self.x, model_set_axis=False) + np.array([self.rand, self.rand])
fit_1d_set_linlsq = linlsq_fitter(model_set, self.x, y)
cov_1d_set_linlsq = [j.cov_matrix for j in fit_1d_set_linlsq.cov_matrix]
# make sure cov matrix from single model fit w/ levmar fitter matches
# the cov matrix of first model in the set
assert_allclose(cov_1d_set_linlsq[0], cov_model)
assert_allclose(
np.sqrt(np.diag(cov_1d_set_linlsq[0])), fit_1d_set_linlsq.stds[0].stds
)
@pytest.mark.parametrize("fitter", non_linear_fitters)
def test_2d_models(self, fitter):
"""
Test that fitting uncertainties are computed correctly for 2D models
and 2D model sets. Use covariance/stds given by LevMarLSQFitter as
a benchmark since they are returned by the numpy fitter.
"""
fitter = fitter(calc_uncertainties=True)
linlsq_fitter = LinearLSQFitter(calc_uncertainties=True)
single_model = models.Polynomial2D(2, c0_0=2)
model_set = models.Polynomial2D(
degree=2, n_models=2, c0_0=[2, 3], model_set_axis=False
)
# fit single model w/ nonlinear fitter
z_grid = single_model(self.x_grid, self.y_grid) + self.rand_grid
with pytest.warns(AstropyUserWarning, match=r"Model is linear in parameters"):
fit_model = fitter(single_model, self.x_grid, self.y_grid, z_grid)
cov_model = fit_model.cov_matrix.cov_matrix
# fit single model w/ nonlinear fitter
fit_model_linlsq = linlsq_fitter(single_model, self.x_grid, self.y_grid, z_grid)
cov_model_linlsq = fit_model_linlsq.cov_matrix.cov_matrix
assert_allclose(cov_model, cov_model_linlsq)
assert_allclose(np.sqrt(np.diag(cov_model_linlsq)), fit_model_linlsq.stds.stds)
# fit 2d model set
z_grid = model_set(self.x_grid, self.y_grid) + np.array(
(self.rand_grid, self.rand_grid)
)
fit_2d_set_linlsq = linlsq_fitter(model_set, self.x_grid, self.y_grid, z_grid)
cov_2d_set_linlsq = [j.cov_matrix for j in fit_2d_set_linlsq.cov_matrix]
# make sure cov matrix from single model fit w/ levmar fitter matches
# the cov matrix of first model in the set
assert_allclose(cov_2d_set_linlsq[0], cov_model)
assert_allclose(
np.sqrt(np.diag(cov_2d_set_linlsq[0])), fit_2d_set_linlsq.stds[0].stds
)
def test_covariance_std_printing_indexing(self, capsys):
"""
Test printing methods and indexing.
"""
# test str representation for Covariance/stds
fitter = LinearLSQFitter(calc_uncertainties=True)
mod = models.Linear1D()
fit_mod = fitter(mod, self.x, mod(self.x) + self.rand)
print(fit_mod.cov_matrix)
captured = capsys.readouterr()
assert "slope | 0.001" in captured.out
assert "intercept| -0.005, 0.03" in captured.out
print(fit_mod.stds)
captured = capsys.readouterr()
assert "slope | 0.032" in captured.out
assert "intercept| 0.173" in captured.out
# test 'pprint' for Covariance/stds
print(fit_mod.cov_matrix.pprint(round_val=5, max_lines=1))
captured = capsys.readouterr()
assert "slope | 0.00105" in captured.out
assert "intercept" not in captured.out
print(fit_mod.stds.pprint(max_lines=1, round_val=5))
captured = capsys.readouterr()
assert "slope | 0.03241" in captured.out
assert "intercept" not in captured.out
# test indexing for Covariance class.
assert fit_mod.cov_matrix[0, 0] == fit_mod.cov_matrix["slope", "slope"]
# test indexing for stds class.
assert fit_mod.stds[1] == fit_mod.stds["intercept"]
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize("fitter", non_linear_fitters)
def test_non_finite_error(fitter):
"""Regression test error introduced to solve issues #3575 and #12809"""
x = np.array([1, 2, 3, 4, 5, np.nan, 7, np.inf])
y = np.array([9, np.nan, 11, np.nan, 13, np.nan, 15, 16])
m_init = models.Gaussian1D()
fit = fitter()
# Raise warning, notice fit fails due to nans
with pytest.raises(
NonFiniteValueError, match=r"Objective function has encountered.*"
):
fit(m_init, x, y)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize("fitter", non_linear_fitters)
def test_non_finite_filter_1D(fitter):
"""Regression test filter introduced to remove non-finte values from data"""
x = np.array([1, 2, 3, 4, 5, 6, 7, 8])
y = np.array([9, np.nan, 11, np.nan, 13, np.nan, 15, np.inf])
m_init = models.Gaussian1D()
fit = fitter()
with pytest.warns(
AstropyUserWarning,
match=r"Non-Finite input data has been removed by the fitter",
):
fit(m_init, x, y, filter_non_finite=True)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize("fitter", non_linear_fitters)
def test_non_finite_filter_2D(fitter):
"""Regression test filter introduced to remove non-finte values from data"""
x, y = np.mgrid[0:10, 0:10]
m_true = models.Gaussian2D(amplitude=1, x_mean=5, y_mean=5, x_stddev=2, y_stddev=2)
with NumpyRNGContext(_RANDOM_SEED):
z = m_true(x, y) + np.random.rand(*x.shape)
z[0, 0] = np.nan
z[3, 3] = np.inf
z[7, 5] = -np.inf
m_init = models.Gaussian2D()
fit = fitter()
with pytest.warns(
AstropyUserWarning,
match=r"Non-Finite input data has been removed by the fitter",
):
fit(m_init, x, y, z, filter_non_finite=True)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.filterwarnings(r"ignore:Model is linear in parameters*")
@pytest.mark.parametrize("fitter", non_linear_fitters)
def test_non_linear_fit_zero_degree_polynomial_with_weights(fitter):
"""
Regression test for issue #13617
Issue:
Weighted non-linear weighted fits of O-degree polynomials cause an error
to be raised by scipy.
Fix:
There should be no error raised in this circumstance
"""
model = models.Polynomial1D(0, c0=0)
fitter = fitter()
x = np.arange(10, dtype=float)
y = np.ones((10,))
weights = np.ones((10,))
fit = fitter(model, x, y)
assert_almost_equal(fit.c0, 1.0)
fit = fitter(model, x, y, weights=weights)
assert_almost_equal(fit.c0, 1.0)
|
35b90e4725b3da34f5576fdff746afa354f47dc9156c5e33521c5e3bd4daf169 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# pylint: disable=invalid-name
from contextlib import nullcontext
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_array_equal, assert_array_less
from astropy import units as u
from astropy.coordinates import Angle
from astropy.modeling import InputParameterError, fitting, models
from astropy.utils.compat.optional_deps import HAS_SCIPY
from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyUserWarning
fitters = [
fitting.LevMarLSQFitter,
fitting.TRFLSQFitter,
fitting.LMLSQFitter,
fitting.DogBoxLSQFitter,
]
def test_sigma_constant():
"""
Test that the GAUSSIAN_SIGMA_TO_FWHM constant matches the
gaussian_sigma_to_fwhm constant in astropy.stats. We define
it manually in astropy.modeling to avoid importing from
astropy.stats.
"""
from astropy.modeling.functional_models import GAUSSIAN_SIGMA_TO_FWHM
from astropy.stats.funcs import gaussian_sigma_to_fwhm
assert gaussian_sigma_to_fwhm == GAUSSIAN_SIGMA_TO_FWHM
def test_Trapezoid1D():
"""Regression test for https://github.com/astropy/astropy/issues/1721"""
model = models.Trapezoid1D(amplitude=4.2, x_0=2.0, width=1.0, slope=3)
xx = np.linspace(0, 4, 8)
yy = model(xx)
yy_ref = [0.0, 1.41428571, 3.12857143, 4.2, 4.2, 3.12857143, 1.41428571, 0.0]
assert_allclose(yy, yy_ref, rtol=0, atol=1e-6)
def test_Gaussian1D():
model = models.Gaussian1D(4.2, 1.7, stddev=5.1)
x = np.mgrid[0:5]
g = model(x)
g_ref = [3.97302977, 4.16062403, 4.19273985, 4.06574509, 3.79389376]
assert_allclose(g, g_ref, rtol=0, atol=1e-6)
assert_allclose(model.fwhm, 12.009582229657841)
def test_Gaussian2D():
"""
Test rotated elliptical Gaussian2D model.
https://github.com/astropy/astropy/pull/2038
"""
model = models.Gaussian2D(
4.2, 1.7, 3.1, x_stddev=5.1, y_stddev=3.3, theta=np.pi / 6.0
)
y, x = np.mgrid[0:5, 0:5]
g = model(x, y)
g_ref = [
[3.01907812, 2.99051889, 2.81271552, 2.5119566, 2.13012709],
[3.55982239, 3.6086023, 3.4734158, 3.17454575, 2.75494838],
[3.88059142, 4.0257528, 3.96554926, 3.70908389, 3.29410187],
[3.91095768, 4.15212857, 4.18567526, 4.00652015, 3.64146544],
[3.6440466, 3.95922417, 4.08454159, 4.00113878, 3.72161094],
]
assert_allclose(g, g_ref, rtol=0, atol=1e-6)
assert_allclose(
[model.x_fwhm, model.y_fwhm], [12.009582229657841, 7.7709061486021325]
)
def test_Gaussian2DCovariance():
"""
Test rotated elliptical Gaussian2D model when cov_matrix is input.
https://github.com/astropy/astropy/pull/2199
"""
cov_matrix = [[49.0, -16.0], [-16.0, 9.0]]
model = models.Gaussian2D(17.0, 2.0, 2.5, cov_matrix=cov_matrix)
y, x = np.mgrid[0:5, 0:5]
g = model(x, y)
g_ref = [
[4.3744505, 5.8413977, 7.42988694, 9.00160175, 10.38794269],
[8.83290201, 10.81772851, 12.61946384, 14.02225593, 14.84113227],
[13.68528889, 15.37184621, 16.44637743, 16.76048705, 16.26953638],
[16.26953638, 16.76048705, 16.44637743, 15.37184621, 13.68528889],
[14.84113227, 14.02225593, 12.61946384, 10.81772851, 8.83290201],
]
assert_allclose(g, g_ref, rtol=0, atol=1e-6)
# Test bad cov_matrix shape
cov_matrix = [[49.0, 3.14, -16.0], [3.14, -16.0, 9.0], [-16, 27, 3.14]]
MESSAGE = r"Covariance matrix must be 2x2"
with pytest.raises(ValueError, match=MESSAGE):
models.Gaussian2D(17.0, 2.0, 2.5, cov_matrix=cov_matrix)
def test_Gaussian2DRotation():
amplitude = 42
x_mean, y_mean = 0, 0
x_stddev, y_stddev = 2, 3
theta = Angle(10, "deg")
pars = dict(
amplitude=amplitude,
x_mean=x_mean,
y_mean=y_mean,
x_stddev=x_stddev,
y_stddev=y_stddev,
)
rotation = models.Rotation2D(angle=theta.degree)
point1 = (x_mean + 2 * x_stddev, y_mean + 2 * y_stddev)
point2 = rotation(*point1)
g1 = models.Gaussian2D(theta=0, **pars)
g2 = models.Gaussian2D(theta=theta.radian, **pars)
value1 = g1(*point1)
value2 = g2(*point2)
assert_allclose(value1, value2)
def test_Gaussian2D_invalid_inputs():
x_stddev = 5.1
y_stddev = 3.3
theta = 10
cov_matrix = [[49.0, -16.0], [-16.0, 9.0]]
# first make sure the valid ones are OK
models.Gaussian2D()
models.Gaussian2D(x_stddev=x_stddev, y_stddev=y_stddev, theta=theta)
models.Gaussian2D(x_stddev=None, y_stddev=y_stddev, theta=theta)
models.Gaussian2D(x_stddev=x_stddev, y_stddev=None, theta=theta)
models.Gaussian2D(x_stddev=x_stddev, y_stddev=y_stddev, theta=None)
models.Gaussian2D(cov_matrix=cov_matrix)
MESSAGE = r"Cannot specify both cov_matrix and x/y_stddev/theta"
with pytest.raises(InputParameterError, match=MESSAGE):
models.Gaussian2D(x_stddev=0, cov_matrix=cov_matrix)
with pytest.raises(InputParameterError, match=MESSAGE):
models.Gaussian2D(y_stddev=0, cov_matrix=cov_matrix)
with pytest.raises(InputParameterError, match=MESSAGE):
models.Gaussian2D(theta=0, cov_matrix=cov_matrix)
def test_Gaussian2D_theta():
theta = Angle(90, "deg")
model1 = models.Gaussian2D(1, 25, 25, 15, 5, theta=theta)
theta2 = np.pi / 2.0
model2 = models.Gaussian2D(1, 25, 25, 15, 5, theta=theta2)
assert model1.theta.quantity.to("radian").value == model2.theta.value
assert model1.bounding_box == model2.bounding_box
assert model1(619.42, 31.314) == model2(619.42, 31.314)
@pytest.mark.parametrize("gamma", (10, -10))
def test_moffat_fwhm(gamma):
ans = 34.641016151377542
kwargs = {"gamma": gamma, "alpha": 0.5}
m1 = models.Moffat1D(**kwargs)
m2 = models.Moffat2D(**kwargs)
assert_allclose([m1.fwhm, m2.fwhm], ans)
assert_array_less(0, [m1.fwhm, m2.fwhm])
def test_RedshiftScaleFactor():
"""Like ``test_ScaleModel()``."""
# Scale by a scalar
m = models.RedshiftScaleFactor(0.4)
assert m(0) == 0
assert_array_equal(m([1, 2]), [1.4, 2.8])
assert_allclose(m.inverse(m([1, 2])), [1, 2])
# Scale by a list
m = models.RedshiftScaleFactor([-0.5, 0, 0.5], n_models=3)
assert_array_equal(m(0), 0)
assert_array_equal(m([1, 2], model_set_axis=False), [[0.5, 1], [1, 2], [1.5, 3]])
assert_allclose(
m.inverse(m([1, 2], model_set_axis=False)), [[1, 2], [1, 2], [1, 2]]
)
def test_RedshiftScaleFactor_inverse():
m = models.RedshiftScaleFactor(1.2345)
assert_allclose(m.inverse(m(6.789)), 6.789)
def test_RedshiftScaleFactor_inverse_bounding_box():
model = models.RedshiftScaleFactor(2)
model.bounding_box = (1, 5)
assert model.bounding_box == (1, 5)
inverse_model = model.inverse
assert inverse_model.bounding_box == (3, 15)
assert_allclose(
inverse_model(model(4, with_bounding_box=True), with_bounding_box=True), 4
)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_RedshiftScaleFactor_model_levmar_fit():
"""Test fitting RedshiftScaleFactor model with LevMarLSQFitter."""
init_model = models.RedshiftScaleFactor()
x = np.arange(10)
y = 2.7174 * x
fitter = fitting.LevMarLSQFitter()
fitted_model = fitter(init_model, x, y)
assert_allclose(fitted_model.parameters, [1.7174])
def test_Ellipse2D():
"""Test Ellipse2D model."""
amplitude = 7.5
x0, y0 = 15, 15
theta = Angle(45, "deg")
em = models.Ellipse2D(amplitude, x0, y0, 7, 3, theta.radian)
y, x = np.mgrid[0:30, 0:30]
e = em(x, y)
assert np.all(e[e > 0] == amplitude)
assert e[y0, x0] == amplitude
rotation = models.Rotation2D(angle=theta.degree)
point1 = [2, 0] # Rotation2D center is (0, 0)
point2 = rotation(*point1)
point1 = np.array(point1) + [x0, y0]
point2 = np.array(point2) + [x0, y0]
e1 = models.Ellipse2D(amplitude, x0, y0, 7, 3, theta=0.0)
e2 = models.Ellipse2D(amplitude, x0, y0, 7, 3, theta=theta.radian)
assert e1(*point1) == e2(*point2)
def test_Ellipse2D_circular():
"""Test that circular Ellipse2D agrees with Disk2D [3736]."""
amplitude = 7.5
radius = 10
size = (radius * 2) + 1
y, x = np.mgrid[0:size, 0:size]
ellipse = models.Ellipse2D(amplitude, radius, radius, radius, radius, theta=0)(x, y)
disk = models.Disk2D(amplitude, radius, radius, radius)(x, y)
assert np.all(ellipse == disk)
def test_Ellipse2D_theta():
theta = Angle(90, "deg")
model1 = models.Ellipse2D(1, 25, 25, 15, 5, theta=theta)
theta2 = np.pi / 2.0
model2 = models.Ellipse2D(1, 25, 25, 15, 5, theta=theta2)
assert model1.theta.quantity.to("radian").value == model2.theta.value
assert model1.bounding_box == model2.bounding_box
assert model1(619.42, 31.314) == model2(619.42, 31.314)
def test_Scale_inverse():
m = models.Scale(1.2345)
assert_allclose(m.inverse(m(6.789)), 6.789)
def test_Scale_inverse_bounding_box():
model = models.Scale(2)
model.bounding_box = (1, 5)
assert model.bounding_box == (1, 5)
inverse_model = model.inverse
assert inverse_model.bounding_box == (2, 10)
assert (
inverse_model(model(4, with_bounding_box=True), with_bounding_box=True) == 4.0
)
def test_Multiply_inverse():
m = models.Multiply(1.2345)
assert_allclose(m.inverse(m(6.789)), 6.789)
def test_Multiply_inverse_bounding_box():
model = models.Multiply(2)
model.bounding_box = (1, 5)
assert model.bounding_box == (1, 5)
inverse_model = model.inverse
assert inverse_model.bounding_box == (2, 10)
assert (
inverse_model(model(4, with_bounding_box=True), with_bounding_box=True) == 4.0
)
def test_Shift_inverse():
m = models.Shift(1.2345)
assert_allclose(m.inverse(m(6.789)), 6.789)
def test_Shift_inverse_bounding_box():
model = models.Shift(10)
model.bounding_box = (1, 5)
assert model.bounding_box == (1, 5)
inverse_model = model.inverse
assert inverse_model.bounding_box == (11, 15)
assert (
inverse_model(model(4, with_bounding_box=True), with_bounding_box=True) == 4.0
)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize("fitter", fitters)
def test_Shift_model_levmar_fit(fitter):
"""Test fitting Shift model with LevMarLSQFitter (issue #6103)."""
fitter = fitter()
init_model = models.Shift()
x = np.arange(10)
y = x + 0.1
with pytest.warns(AstropyUserWarning, match="Model is linear in parameters"):
fitted_model = fitter(init_model, x, y)
assert_allclose(fitted_model.parameters, [0.1], atol=1e-15)
def test_Shift_model_set_linear_fit():
"""Test linear fitting of Shift model (issue #6103)."""
init_model = models.Shift(offset=[0, 0], n_models=2)
x = np.arange(10)
yy = np.array([x + 0.1, x - 0.2])
fitter = fitting.LinearLSQFitter()
fitted_model = fitter(init_model, x, yy)
assert_allclose(fitted_model.parameters, [0.1, -0.2], atol=1e-15)
@pytest.mark.parametrize("Model", (models.Scale, models.Multiply))
def test_Scale_model_set_linear_fit(Model):
"""Test linear fitting of Scale model (#6103)."""
init_model = Model(factor=[0, 0], n_models=2)
x = np.arange(-3, 7)
yy = np.array([1.15 * x, 0.96 * x])
fitter = fitting.LinearLSQFitter()
fitted_model = fitter(init_model, x, yy)
assert_allclose(fitted_model.parameters, [1.15, 0.96], atol=1e-15)
@pytest.mark.parametrize("Model", (models.Scale, models.Multiply))
def test_Scale_model_evaluate_without_units(Model):
m = Model(factor=4 * u.m)
kwargs = {"x": 3 * u.m, "y": 7 * u.m}
mnu = m.without_units_for_data(**kwargs)
x = np.linspace(-1, 1, 100)
assert_allclose(mnu(x), 4 * x)
# https://github.com/astropy/astropy/issues/6178
def test_Ring2D_rout():
# Test with none of r_in, r_out, width specified
m = models.Ring2D(amplitude=1, x_0=1, y_0=1)
assert m.amplitude.value == 1
assert m.x_0.value == 1
assert m.y_0.value == 1
assert m.r_in.value == 1
assert m.width.value == 1
# Test with r_in specified only
m = models.Ring2D(amplitude=1, x_0=1, y_0=1, r_in=4)
assert m.amplitude.value == 1
assert m.x_0.value == 1
assert m.y_0.value == 1
assert m.r_in.value == 4
assert m.width.value == 1
# Test with r_out specified only
m = models.Ring2D(amplitude=1, x_0=1, y_0=1, r_out=7)
assert m.amplitude.value == 1
assert m.x_0.value == 1
assert m.y_0.value == 1
assert m.r_in.value == 1
assert m.width.value == 6
# Error when r_out is too small for default r_in
MESSAGE = r"r_in=.* and width=.* must both be >=0"
with pytest.raises(InputParameterError, match=MESSAGE):
models.Ring2D(amplitude=1, x_0=1, y_0=1, r_out=0.5)
# Test with width specified only
m = models.Ring2D(amplitude=1, x_0=1, y_0=1, width=11)
assert m.amplitude.value == 1
assert m.x_0.value == 1
assert m.y_0.value == 1
assert m.r_in.value == 1
assert m.width.value == 11
# Test with r_in and r_out specified only
m = models.Ring2D(amplitude=1, x_0=1, y_0=1, r_in=2, r_out=5)
assert m.amplitude.value == 1
assert m.x_0.value == 1
assert m.y_0.value == 1
assert m.r_in.value == 2
assert m.width.value == 3
# Error when r_out is smaller than r_in
with pytest.raises(InputParameterError, match=MESSAGE):
models.Ring2D(amplitude=1, x_0=1, y_0=1, r_out=1, r_in=4)
# Test with r_in and width specified only
m = models.Ring2D(amplitude=1, x_0=1, y_0=1, r_in=2, width=4)
assert m.amplitude.value == 1
assert m.x_0.value == 1
assert m.y_0.value == 1
assert m.r_in.value == 2
assert m.width.value == 4
# Test with r_out and width specified only
m = models.Ring2D(amplitude=1, x_0=1, y_0=1, r_out=12, width=7)
assert m.amplitude.value == 1
assert m.x_0.value == 1
assert m.y_0.value == 1
assert m.r_in.value == 5
assert m.width.value == 7
# Error when width is larger than r_out
with pytest.raises(InputParameterError, match=MESSAGE):
models.Ring2D(amplitude=1, x_0=1, y_0=1, r_out=1, width=4)
# Test with r_in, r_out, and width all specified
m = models.Ring2D(amplitude=1, x_0=1, y_0=1, r_in=3, r_out=11, width=8)
assert m.amplitude.value == 1
assert m.x_0.value == 1
assert m.y_0.value == 1
assert m.r_in.value == 3
assert m.width.value == 8
# error when specifying all
MESSAGE = r"Width must be r_out - r_in"
with pytest.raises(InputParameterError, match=MESSAGE):
models.Ring2D(amplitude=1, x_0=1, y_0=1, r_in=3, r_out=11, width=7)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize("fitter", fitters)
def test_Voigt1D(fitter):
fitter = fitter()
voi = models.Voigt1D(amplitude_L=-0.5, x_0=1.0, fwhm_L=5.0, fwhm_G=5.0)
xarr = np.linspace(-5.0, 5.0, num=40)
yarr = voi(xarr)
voi_init = models.Voigt1D(amplitude_L=-1.0, x_0=1.0, fwhm_L=5.0, fwhm_G=5.0)
voi_fit = fitter(voi_init, xarr, yarr)
assert_allclose(voi_fit.param_sets, voi.param_sets)
# Invalid method
MESSAGE = r"Not a valid method for Voigt1D Faddeeva function: test"
with pytest.raises(ValueError, match=MESSAGE):
models.Voigt1D(method="test")
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize("algorithm", ("humlicek2", "wofz"))
def test_Voigt1D_norm(algorithm):
"""Test integral of normalized Voigt profile."""
from scipy.integrate import quad
if algorithm == "humlicek2":
ctx = pytest.warns(
AstropyDeprecationWarning, match=r"humlicek2 has been deprecated since .*"
)
atol = 1e-8
else:
ctx = nullcontext()
atol = 1e-14
def voigt(algorithm):
return models.Voigt1D(
amplitude_L=1.0 / np.pi, x_0=0.0, fwhm_L=2.0, fwhm_G=1.5, method=algorithm
)
with ctx:
voi = models.Voigt1D(
amplitude_L=1.0 / np.pi, x_0=0.0, fwhm_L=2.0, fwhm_G=1.5, method=algorithm
)
assert_allclose(quad(voi, -np.inf, np.inf)[0], 1.0, atol=atol)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize("doppler", (1.0e-3, 1.0e-2, 0.1, 0.5, 1.0, 2.5, 5.0, 10))
@pytest.mark.filterwarnings(r"ignore:humlicek2 has been deprecated since .*")
def test_Voigt1D_hum2(doppler):
"""
Verify accuracy of Voigt profile in Humlicek approximation to Faddeeva.cc (SciPy).
"""
x = np.linspace(-20, 20, 400001)
voi_w = models.Voigt1D(
amplitude_L=2.0 / np.pi, fwhm_L=1.0, fwhm_G=doppler, method="wofz"
)
vf_w = voi_w(x)
dvda_w = voi_w.fit_deriv(
x, x_0=0, amplitude_L=2.0 / np.pi, fwhm_L=1.0, fwhm_G=doppler
)
voi_h = models.Voigt1D(
amplitude_L=2.0 / np.pi, fwhm_L=1.0, fwhm_G=doppler, method="humlicek2"
)
vf_h = voi_h(x)
dvda_h = voi_h.fit_deriv(
x, x_0=0, amplitude_L=2.0 / np.pi, fwhm_L=1.0, fwhm_G=doppler
)
assert_allclose(vf_h, vf_w, rtol=1e-7 * (2 + 1 / np.sqrt(doppler)))
assert_allclose(dvda_h, dvda_w, rtol=1e-9, atol=1e-7 * (1 + 30 / doppler))
@pytest.mark.filterwarnings(r"ignore:humlicek2 has been deprecated since .*")
def test_Voigt1D_method():
"""Test Voigt1D default method"""
voi = models.Voigt1D(method="humlicek2")
assert voi.method == "_hum2zpf16c"
voi = models.Voigt1D()
if HAS_SCIPY:
assert voi.method == "wofz"
voi = models.Voigt1D(method="wofz")
assert voi.method == "wofz"
voi = models.Voigt1D(method="scipy")
assert voi.method == "wofz"
else:
assert voi.method == "_hum2zpf16c"
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize("fitter", fitters)
def test_KingProjectedAnalytic1D_fit(fitter):
fitter = fitter()
km = models.KingProjectedAnalytic1D(amplitude=1, r_core=1, r_tide=2)
xarr = np.linspace(0.1, 2, 10)
yarr = km(xarr)
km_init = models.KingProjectedAnalytic1D(amplitude=1, r_core=1, r_tide=1)
km_fit = fitter(km_init, xarr, yarr)
assert_allclose(km_fit.param_sets, km.param_sets)
assert_allclose(km_fit.concentration, 0.30102999566398136)
@pytest.mark.parametrize("model", [models.Exponential1D(), models.Logarithmic1D()])
def test_ExponentialAndLogarithmic1D_fit(model):
xarr = np.linspace(0.1, 10.0, 200)
assert_allclose(xarr, model.inverse(model(xarr)))
@pytest.mark.parametrize("model", [models.Exponential1D(), models.Logarithmic1D()])
def test_ExponentialAndLogarithmic_set_tau(model):
MESSAGE = r"0 is not an allowed value for tau"
with pytest.raises(ValueError, match=MESSAGE):
model.tau = 0
def test_Linear1D_inverse():
model = models.Linear1D(slope=4, intercept=-12)
inverse = model.inverse
assert inverse.slope == 1 / 4
assert inverse.intercept == 3
@pytest.mark.parametrize(
"trig",
[
(models.Sine1D, [-0.25, 0.25]),
(models.ArcSine1D, [-0.25, 0.25]),
(models.Cosine1D, [0, 0.5]),
(models.ArcCosine1D, [0, 0.5]),
(models.Tangent1D, [-0.25, 0.25]),
(models.ArcTangent1D, [-0.25, 0.25]),
],
)
def test_trig_inverse(trig):
mdl = trig[0]()
lower, upper = trig[1]
x = np.arange(lower, upper, 0.01)
assert_allclose(mdl.inverse(mdl(x)), x, atol=1e-10)
assert_allclose(mdl(mdl.inverse(x)), x, atol=1e-10)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_Sersic2D_theta():
theta = Angle(90, "deg")
model1 = models.Sersic2D(1, 5, 4, 25, 25, 0.5, theta=theta)
theta2 = np.pi / 2.0
model2 = models.Sersic2D(1, 5, 4, 25, 25, 0.5, theta=theta2)
assert model1.theta.quantity.to("radian").value == model2.theta.value
assert model1(619.42, 31.314) == model2(619.42, 31.314)
|
4dca908ba4dbc67444ee9599b535323d16279370038a468b790d7f836c5949fd | # Licensed under a 3-clause BSD style license - see LICENSE.rst:
"""
Tests for model evaluation.
Compare the results of some models with other programs.
"""
import unittest.mock as mk
import numpy as np
# pylint: disable=invalid-name, no-member
import pytest
from numpy.testing import assert_allclose, assert_equal
import astropy.modeling.tabular as tabular_models
from astropy import units as u
from astropy.modeling import fitting, models
from astropy.modeling.bounding_box import ModelBoundingBox
from astropy.modeling.core import FittableModel, Model, _ModelMeta
from astropy.modeling.models import Gaussian2D
from astropy.modeling.parameters import InputParameterError, Parameter
from astropy.modeling.polynomial import PolynomialBase
from astropy.modeling.powerlaws import (
BrokenPowerLaw1D,
ExponentialCutoffPowerLaw1D,
LogParabola1D,
PowerLaw1D,
SmoothlyBrokenPowerLaw1D,
)
from astropy.modeling.separable import separability_matrix
from astropy.tests.helper import assert_quantity_allclose
from astropy.utils import NumpyRNGContext, minversion
from astropy.utils.compat.optional_deps import HAS_SCIPY
from .example_models import models_1D, models_2D
fitters = [
fitting.LevMarLSQFitter,
fitting.TRFLSQFitter,
fitting.LMLSQFitter,
fitting.DogBoxLSQFitter,
]
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize("fitter", fitters)
def test_custom_model(fitter, amplitude=4, frequency=1):
fitter = fitter()
def sine_model(x, amplitude=4, frequency=1):
"""
Model function
"""
return amplitude * np.sin(2 * np.pi * frequency * x)
def sine_deriv(x, amplitude=4, frequency=1):
"""
Jacobian of model function, e.g. derivative of the function with
respect to the *parameters*
"""
da = np.sin(2 * np.pi * frequency * x)
df = 2 * np.pi * x * amplitude * np.cos(2 * np.pi * frequency * x)
return np.vstack((da, df))
SineModel = models.custom_model(sine_model, fit_deriv=sine_deriv)
x = np.linspace(0, 4, 50)
sin_model = SineModel()
sin_model.evaluate(x, 5.0, 2.0)
sin_model.fit_deriv(x, 5.0, 2.0)
np.random.seed(0)
data = sin_model(x) + np.random.rand(len(x)) - 0.5
model = fitter(sin_model, x, data)
assert np.all(
(
np.array([model.amplitude.value, model.frequency.value])
- np.array([amplitude, frequency])
)
< 0.001
)
def test_custom_model_init():
@models.custom_model
def SineModel(x, amplitude=4, frequency=1):
"""Model function"""
return amplitude * np.sin(2 * np.pi * frequency * x)
sin_model = SineModel(amplitude=2.0, frequency=0.5)
assert sin_model.amplitude == 2.0
assert sin_model.frequency == 0.5
def test_custom_model_defaults():
@models.custom_model
def SineModel(x, amplitude=4, frequency=1):
"""Model function"""
return amplitude * np.sin(2 * np.pi * frequency * x)
sin_model = SineModel()
assert SineModel.amplitude.default == 4
assert SineModel.frequency.default == 1
assert sin_model.amplitude == 4
assert sin_model.frequency == 1
def test_inconsistent_input_shapes():
g = Gaussian2D()
x = np.arange(-1.0, 1, 0.2)
y = x.copy()
# check scalar input broadcasting works
assert np.abs(g(x, 0) - g(x, 0 * x)).sum() == 0
# but not array broadcasting
x.shape = (10, 1)
y.shape = (1, 10)
result = g(x, y)
assert result.shape == (10, 10)
def test_custom_model_bounding_box():
"""Test bounding box evaluation for a 3D model"""
def ellipsoid(x, y, z, x0=13, y0=10, z0=8, a=4, b=3, c=2, amp=1):
rsq = ((x - x0) / a) ** 2 + ((y - y0) / b) ** 2 + ((z - z0) / c) ** 2
val = (rsq < 1) * amp
return val
class Ellipsoid3D(models.custom_model(ellipsoid)):
@property
def bounding_box(self):
return (
(self.z0 - self.c, self.z0 + self.c),
(self.y0 - self.b, self.y0 + self.b),
(self.x0 - self.a, self.x0 + self.a),
)
model = Ellipsoid3D()
bbox = model.bounding_box
zlim, ylim, xlim = bbox.bounding_box()
dz, dy, dx = np.diff(bbox) / 2
z1, y1, x1 = np.mgrid[
slice(zlim[0], zlim[1] + 1),
slice(ylim[0], ylim[1] + 1),
slice(xlim[0], xlim[1] + 1),
]
z2, y2, x2 = np.mgrid[
slice(zlim[0] - dz, zlim[1] + dz + 1),
slice(ylim[0] - dy, ylim[1] + dy + 1),
slice(xlim[0] - dx, xlim[1] + dx + 1),
]
arr = model(x2, y2, z2, with_bounding_box=True)
sub_arr = model(x1, y1, z1, with_bounding_box=True)
# check for flux agreement
assert abs(np.nansum(arr) - np.nansum(sub_arr)) < np.nansum(arr) * 1e-7
class Fittable2DModelTester:
"""
Test class for all two dimensional parametric models.
Test values have to be defined in example_models.py. It currently test the
model with different input types, evaluates the model at different
positions and assures that it gives the correct values. And tests if the
model works with non-linear fitters.
This can be used as a base class for user defined model testing.
"""
def setup_class(self):
self.N = 100
self.M = 100
self.eval_error = 0.0001
self.fit_error = 0.1
self.x = 5.3
self.y = 6.7
self.x1 = np.arange(1, 10, 0.1)
self.y1 = np.arange(1, 10, 0.1)
self.y2, self.x2 = np.mgrid[:10, :8]
def test_input2D(self, model_class, test_parameters):
"""Test model with different input types."""
model = create_model(model_class, test_parameters)
model(self.x, self.y)
model(self.x1, self.y1)
model(self.x2, self.y2)
def test_eval2D(self, model_class, test_parameters):
"""Test model values add certain given points"""
model = create_model(model_class, test_parameters)
x = test_parameters["x_values"]
y = test_parameters["y_values"]
z = test_parameters["z_values"]
assert np.all(np.abs(model(x, y) - z) < self.eval_error)
def test_bounding_box2D(self, model_class, test_parameters):
"""Test bounding box evaluation"""
model = create_model(model_class, test_parameters)
# testing setter
model.bounding_box = ((-5, 5), (-5, 5))
assert model.bounding_box == ((-5, 5), (-5, 5))
model.bounding_box = None
MESSAGE = r"No bounding box is defined for this model .*"
with pytest.raises(NotImplementedError, match=MESSAGE):
model.bounding_box
# test the exception of dimensions don't match
MESSAGE = r"An interval must be some sort of sequence of length 2"
with pytest.raises(ValueError, match=MESSAGE):
model.bounding_box = (-5, 5)
del model.bounding_box
try:
bbox = model.bounding_box
except NotImplementedError:
return
ddx = 0.01
ylim, xlim = bbox
x1 = np.arange(xlim[0], xlim[1], ddx)
y1 = np.arange(ylim[0], ylim[1], ddx)
x2 = np.concatenate(
(
[xlim[0] - idx * ddx for idx in range(10, 0, -1)],
x1,
[xlim[1] + idx * ddx for idx in range(1, 10)],
)
)
y2 = np.concatenate(
(
[ylim[0] - idx * ddx for idx in range(10, 0, -1)],
y1,
[ylim[1] + idx * ddx for idx in range(1, 10)],
)
)
inside_bbox = model(x1, y1)
outside_bbox = model(x2, y2, with_bounding_box=True)
outside_bbox = outside_bbox[~np.isnan(outside_bbox)]
assert np.all(inside_bbox == outside_bbox)
def test_bounding_box2D_peak(self, model_class, test_parameters):
if not test_parameters.pop("bbox_peak", False):
return
model = create_model(model_class, test_parameters)
bbox = model.bounding_box
ylim, xlim = bbox
dy, dx = np.diff(bbox) / 2
y1, x1 = np.mgrid[slice(ylim[0], ylim[1] + 1), slice(xlim[0], xlim[1] + 1)]
y2, x2 = np.mgrid[
slice(ylim[0] - dy, ylim[1] + dy + 1), slice(xlim[0] - dx, xlim[1] + dx + 1)
]
arr = model(x2, y2)
sub_arr = model(x1, y1)
# check for flux agreement
assert abs(arr.sum() - sub_arr.sum()) < arr.sum() * 1e-7
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize("fitter", fitters)
def test_fitter2D(self, model_class, test_parameters, fitter):
"""Test if the parametric model works with the fitter."""
fitter = fitter()
x_lim = test_parameters["x_lim"]
y_lim = test_parameters["y_lim"]
parameters = test_parameters["parameters"]
model = create_model(model_class, test_parameters)
if isinstance(parameters, dict):
parameters = [parameters[name] for name in model.param_names]
if "log_fit" in test_parameters:
if test_parameters["log_fit"]:
x = np.logspace(x_lim[0], x_lim[1], self.N)
y = np.logspace(y_lim[0], y_lim[1], self.N)
else:
x = np.linspace(x_lim[0], x_lim[1], self.N)
y = np.linspace(y_lim[0], y_lim[1], self.N)
xv, yv = np.meshgrid(x, y)
np.random.seed(0)
# add 10% noise to the amplitude
noise = np.random.rand(self.N, self.N) - 0.5
data = model(xv, yv) + 0.1 * parameters[0] * noise
new_model = fitter(model, xv, yv, data)
params = [getattr(new_model, name) for name in new_model.param_names]
fixed = [param.fixed for param in params]
expected = np.array([val for val, fixed in zip(parameters, fixed) if not fixed])
fitted = np.array([param.value for param in params if not param.fixed])
assert_allclose(fitted, expected, atol=self.fit_error)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize("fitter", fitters)
def test_deriv_2D(self, model_class, test_parameters, fitter):
"""
Test the derivative of a model by fitting with an estimated and
analytical derivative.
"""
fitter = fitter()
x_lim = test_parameters["x_lim"]
y_lim = test_parameters["y_lim"]
if model_class.fit_deriv is None or issubclass(model_class, PolynomialBase):
return
if "log_fit" in test_parameters:
if test_parameters["log_fit"]:
x = np.logspace(x_lim[0], x_lim[1], self.N)
y = np.logspace(y_lim[0], y_lim[1], self.M)
x_test = np.logspace(x_lim[0], x_lim[1], self.N * 10)
y_test = np.logspace(y_lim[0], y_lim[1], self.M * 10)
else:
x = np.linspace(x_lim[0], x_lim[1], self.N)
y = np.linspace(y_lim[0], y_lim[1], self.M)
x_test = np.linspace(x_lim[0], x_lim[1], self.N * 10)
y_test = np.linspace(y_lim[0], y_lim[1], self.M * 10)
xv, yv = np.meshgrid(x, y)
xv_test, yv_test = np.meshgrid(x_test, y_test)
try:
model_with_deriv = create_model(
model_class,
test_parameters,
use_constraints=False,
parameter_key="deriv_initial",
)
model_no_deriv = create_model(
model_class,
test_parameters,
use_constraints=False,
parameter_key="deriv_initial",
)
model = create_model(
model_class,
test_parameters,
use_constraints=False,
parameter_key="deriv_initial",
)
except KeyError:
model_with_deriv = create_model(
model_class, test_parameters, use_constraints=False
)
model_no_deriv = create_model(
model_class, test_parameters, use_constraints=False
)
model = create_model(model_class, test_parameters, use_constraints=False)
# add 10% noise to the amplitude
rsn = np.random.default_rng(0)
amplitude = test_parameters["parameters"][0]
n = 0.1 * amplitude * (rsn.random((self.M, self.N)) - 0.5)
data = model(xv, yv) + n
fitter_with_deriv = fitter
new_model_with_deriv = fitter_with_deriv(model_with_deriv, xv, yv, data)
fitter_no_deriv = fitter
new_model_no_deriv = fitter_no_deriv(
model_no_deriv, xv, yv, data, estimate_jacobian=True
)
assert_allclose(
new_model_with_deriv(xv_test, yv_test),
new_model_no_deriv(xv_test, yv_test),
rtol=1e-2,
)
if model_class != Gaussian2D:
assert_allclose(
new_model_with_deriv.parameters, new_model_no_deriv.parameters, rtol=0.1
)
@pytest.mark.filterwarnings(r"ignore:humlicek2 has been deprecated since .*")
class Fittable1DModelTester:
"""
Test class for all one dimensional parametric models.
Test values have to be defined in example_models.py. It currently test the
model with different input types, evaluates the model at different
positions and assures that it gives the correct values. And tests if the
model works with non-linear fitters.
This can be used as a base class for user defined model testing.
"""
# These models will fail fitting test, because built in fitting data
# will produce non-finite values
_non_finite_models = [
BrokenPowerLaw1D,
ExponentialCutoffPowerLaw1D,
LogParabola1D,
PowerLaw1D,
SmoothlyBrokenPowerLaw1D,
]
def setup_class(self):
self.N = 100
self.M = 100
self.eval_error = 0.0001
self.fit_error = 0.11
self.x = 5.3
self.y = 6.7
self.x1 = np.arange(1, 10, 0.1)
self.y1 = np.arange(1, 10, 0.1)
self.y2, self.x2 = np.mgrid[:10, :8]
@pytest.mark.filterwarnings(r"ignore:.*:RuntimeWarning")
def test_input1D(self, model_class, test_parameters):
"""Test model with different input types."""
model = create_model(model_class, test_parameters)
model(self.x)
model(self.x1)
model(self.x2)
def test_eval1D(self, model_class, test_parameters):
"""
Test model values at certain given points
"""
model = create_model(model_class, test_parameters)
x = test_parameters["x_values"]
y = test_parameters["y_values"]
assert_allclose(model(x), y, atol=self.eval_error)
def test_bounding_box1D(self, model_class, test_parameters):
"""Test bounding box evaluation"""
model = create_model(model_class, test_parameters)
# testing setter
model.bounding_box = (-5, 5)
model.bounding_box = None
MESSAGE = r"No bounding box is defined for this model .*"
with pytest.raises(NotImplementedError, match=MESSAGE):
model.bounding_box
del model.bounding_box
# test exception if dimensions don't match
MESSAGE = r"An interval must be some sort of sequence of length 2"
with pytest.raises(ValueError, match=MESSAGE):
model.bounding_box = 5
try:
bbox = model.bounding_box.bounding_box()
except NotImplementedError:
return
ddx = 0.01
x1 = np.arange(bbox[0], bbox[1], ddx)
x2 = np.concatenate(
(
[bbox[0] - idx * ddx for idx in range(10, 0, -1)],
x1,
[bbox[1] + idx * ddx for idx in range(1, 10)],
)
)
inside_bbox = model(x1)
outside_bbox = model(x2, with_bounding_box=True)
outside_bbox = outside_bbox[~np.isnan(outside_bbox)]
assert np.all(inside_bbox == outside_bbox)
def test_bounding_box1D_peak(self, model_class, test_parameters):
if not test_parameters.pop("bbox_peak", False):
return
model = create_model(model_class, test_parameters)
bbox = model.bounding_box
if isinstance(model, (models.Lorentz1D, models.Drude1D)):
rtol = 0.01 # 1% agreement is enough due to very extended wings
ddx = 0.1 # Finer sampling to "integrate" flux for narrow peak
else:
rtol = 1e-7
ddx = 1
if isinstance(bbox, ModelBoundingBox):
bbox = bbox.bounding_box()
dx = np.diff(bbox) / 2
x1 = np.mgrid[slice(bbox[0], bbox[1] + 1, ddx)]
x2 = np.mgrid[slice(bbox[0] - dx, bbox[1] + dx + 1, ddx)]
arr = model(x2)
sub_arr = model(x1)
# check for flux agreement
assert abs(arr.sum() - sub_arr.sum()) < arr.sum() * rtol
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize("fitter", fitters)
def test_fitter1D(self, model_class, test_parameters, fitter):
"""
Test if the parametric model works with the fitter.
"""
SCIPY_LT_1_6 = not minversion("scipy", "1.6")
if (
model_class == models.BrokenPowerLaw1D
and fitter == fitting.TRFLSQFitter
and SCIPY_LT_1_6
):
pytest.xfail(reason="TRF fitter fails for BrokenPowerLaw1D in scipy < 1.6")
fitter = fitter()
x_lim = test_parameters["x_lim"]
parameters = test_parameters["parameters"]
model = create_model(model_class, test_parameters)
if isinstance(parameters, dict):
parameters = [parameters[name] for name in model.param_names]
if "log_fit" in test_parameters:
if test_parameters["log_fit"]:
x = np.logspace(x_lim[0], x_lim[1], self.N)
else:
x = np.linspace(x_lim[0], x_lim[1], self.N)
np.random.seed(0)
# add 10% noise to the amplitude
relative_noise_amplitude = 0.01
data = (1 + relative_noise_amplitude * np.random.randn(len(x))) * model(x)
new_model = fitter(model, x, data)
# Only check parameters that were free in the fit
params = [getattr(new_model, name) for name in new_model.param_names]
fixed = [param.fixed for param in params]
expected = np.array([val for val, fixed in zip(parameters, fixed) if not fixed])
fitted = np.array([param.value for param in params if not param.fixed])
assert_allclose(fitted, expected, atol=self.fit_error)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.filterwarnings(r"ignore:.*:RuntimeWarning")
@pytest.mark.parametrize("fitter", fitters)
def test_deriv_1D(self, model_class, test_parameters, fitter):
"""
Test the derivative of a model by comparing results with an estimated
derivative.
"""
fitter = fitter()
if model_class in self._non_finite_models:
return
x_lim = test_parameters["x_lim"]
if model_class.fit_deriv is None or issubclass(model_class, PolynomialBase):
return
if "log_fit" in test_parameters:
if test_parameters["log_fit"]:
x = np.logspace(x_lim[0], x_lim[1], self.N)
else:
x = np.linspace(x_lim[0], x_lim[1], self.N)
parameters = test_parameters["parameters"]
model_with_deriv = create_model(
model_class, test_parameters, use_constraints=False
)
model_no_deriv = create_model(
model_class, test_parameters, use_constraints=False
)
# NOTE: PR 10644 replaced deprecated usage of RandomState but could not
# find a new seed that did not cause test failure, resorted to hardcoding.
# add 10% noise to the amplitude
# fmt: off
rsn_rand_1234567890 = np.array(
[
0.61879477, 0.59162363, 0.88868359, 0.89165480, 0.45756748,
0.77818808, 0.26706377, 0.99610621, 0.54009489, 0.53752161,
0.40099938, 0.70540579, 0.40518559, 0.94999075, 0.03075388,
0.13602495, 0.08297726, 0.42352224, 0.23449723, 0.74743526,
0.65177865, 0.68998682, 0.16413419, 0.87642114, 0.44733314,
0.57871104, 0.52377835, 0.62689056, 0.34869427, 0.26209748,
0.07498055, 0.17940570, 0.82999425, 0.98759822, 0.11326099,
0.63846415, 0.73056694, 0.88321124, 0.52721004, 0.66487673,
0.74209309, 0.94083846, 0.70123128, 0.29534353, 0.76134369,
0.77593881, 0.36985514, 0.89519067, 0.33082813, 0.86108824,
0.76897859, 0.61343376, 0.43870907, 0.91913538, 0.76958966,
0.51063556, 0.04443249, 0.57463611, 0.31382006, 0.41221713,
0.21531811, 0.03237521, 0.04166386, 0.73109303, 0.74556052,
0.64716325, 0.77575353, 0.64599254, 0.16885816, 0.48485480,
0.53844248, 0.99690349, 0.23657074, 0.04119088, 0.46501519,
0.35739006, 0.23002665, 0.53420791, 0.71639475, 0.81857486,
0.73994342, 0.07948837, 0.75688276, 0.13240193, 0.48465576,
0.20624753, 0.02298276, 0.54257873, 0.68123230, 0.35887468,
0.36296147, 0.67368397, 0.29505730, 0.66558885, 0.93652252,
0.36755130, 0.91787687, 0.75922703, 0.48668067, 0.45967890
]
)
# fmt: on
n = 0.1 * parameters[0] * (rsn_rand_1234567890 - 0.5)
data = model_with_deriv(x) + n
fitter_with_deriv = fitter
new_model_with_deriv = fitter_with_deriv(model_with_deriv, x, data)
fitter_no_deriv = fitter
new_model_no_deriv = fitter_no_deriv(
model_no_deriv, x, data, estimate_jacobian=True
)
assert_allclose(
new_model_with_deriv.parameters, new_model_no_deriv.parameters, atol=0.15
)
def create_model(
model_class, test_parameters, use_constraints=True, parameter_key="parameters"
):
"""Create instance of model class."""
constraints = {}
if issubclass(model_class, PolynomialBase):
return model_class(**test_parameters[parameter_key])
elif issubclass(model_class, FittableModel):
if "requires_scipy" in test_parameters and not HAS_SCIPY:
pytest.skip("SciPy not found")
if use_constraints:
if "constraints" in test_parameters:
constraints = test_parameters["constraints"]
return model_class(*test_parameters[parameter_key], **constraints)
@pytest.mark.filterwarnings(r"ignore:Model is linear in parameters.*")
@pytest.mark.filterwarnings(r"ignore:The fit may be unsuccessful.*")
@pytest.mark.parametrize(
("model_class", "test_parameters"),
sorted(models_1D.items(), key=lambda x: str(x[0])),
)
class TestFittable1DModels(Fittable1DModelTester):
pass
@pytest.mark.filterwarnings(r"ignore:Model is linear in parameters.*")
@pytest.mark.parametrize(
("model_class", "test_parameters"),
sorted(models_2D.items(), key=lambda x: str(x[0])),
)
class TestFittable2DModels(Fittable2DModelTester):
pass
def test_ShiftModel():
# Shift by a scalar
m = models.Shift(42)
assert m(0) == 42
assert_equal(m([1, 2]), [43, 44])
# Shift by a list
m = models.Shift([42, 43], n_models=2)
assert_equal(m(0), [42, 43])
assert_equal(m([1, 2], model_set_axis=False), [[43, 44], [44, 45]])
def test_ScaleModel():
# Scale by a scalar
m = models.Scale(42)
assert m(0) == 0
assert_equal(m([1, 2]), [42, 84])
# Scale by a list
m = models.Scale([42, 43], n_models=2)
assert_equal(m(0), [0, 0])
assert_equal(m([1, 2], model_set_axis=False), [[42, 84], [43, 86]])
@pytest.mark.filterwarnings(r"ignore:humlicek2 has been deprecated since .*")
def test_voigt_model():
"""
Currently just tests that the model peaks at its origin.
Regression test for https://github.com/astropy/astropy/issues/3942
"""
m = models.Voigt1D(x_0=5, amplitude_L=10, fwhm_L=0.5, fwhm_G=0.9)
x = np.arange(0, 10, 0.01)
y = m(x)
assert y[500] == y.max() # y[500] is right at the center
def test_model_instance_repr():
m = models.Gaussian1D(1.5, 2.5, 3.5)
assert repr(m) == "<Gaussian1D(amplitude=1.5, mean=2.5, stddev=3.5)>"
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_tabular_interp_1d():
"""
Test Tabular1D model.
"""
points = np.arange(0, 5)
values = [1.0, 10, 2, 45, -3]
LookupTable = models.tabular_model(1)
model = LookupTable(points=points, lookup_table=values)
xnew = [0.0, 0.7, 1.4, 2.1, 3.9]
ans1 = [1.0, 7.3, 6.8, 6.3, 1.8]
assert_allclose(model(xnew), ans1)
# Test evaluate without passing `points`.
model = LookupTable(lookup_table=values)
assert_allclose(model(xnew), ans1)
# Test bounds error.
xextrap = [0.0, 0.7, 1.4, 2.1, 3.9, 4.1]
MESSAGE = r"One of the requested xi is out of bounds in dimension 0"
with pytest.raises(ValueError, match=MESSAGE):
model(xextrap)
# test extrapolation and fill value
model = LookupTable(lookup_table=values, bounds_error=False, fill_value=None)
assert_allclose(model(xextrap), [1.0, 7.3, 6.8, 6.3, 1.8, -7.8])
# Test unit support
xnew = xnew * u.nm
ans1 = ans1 * u.nJy
model = LookupTable(points=points * u.nm, lookup_table=values * u.nJy)
assert_quantity_allclose(model(xnew), ans1)
assert_quantity_allclose(model(xnew.to(u.nm)), ans1)
assert model.bounding_box == (0 * u.nm, 4 * u.nm)
# Test fill value unit conversion and unitless input on table with unit
model = LookupTable(
[1, 2, 3],
[10, 20, 30] * u.nJy,
bounds_error=False,
fill_value=1e-33 * (u.W / (u.m * u.m * u.Hz)),
)
assert_quantity_allclose(model(np.arange(5)), [100, 10, 20, 30, 100] * u.nJy)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_tabular_interp_2d():
table = np.array(
[
[-0.04614432, -0.02512547, -0.00619557, 0.0144165, 0.0297525],
[-0.04510594, -0.03183369, -0.01118008, 0.01201388, 0.02496205],
[-0.05464094, -0.02804499, -0.00960086, 0.01134333, 0.02284104],
[-0.04879338, -0.02539565, -0.00440462, 0.01795145, 0.02122417],
[-0.03637372, -0.01630025, -0.00157902, 0.01649774, 0.01952131],
]
)
points = np.arange(0, 5)
points = (points, points)
xnew = np.array([0.0, 0.7, 1.4, 2.1, 3.9])
LookupTable = models.tabular_model(2)
model = LookupTable(points, table)
znew = model(xnew, xnew)
result = np.array([-0.04614432, -0.03450009, -0.02241028, -0.0069727, 0.01938675])
assert_allclose(znew, result, atol=1e-7)
# test 2D arrays as input
a = np.arange(12).reshape((3, 4))
y, x = np.mgrid[:3, :4]
t = models.Tabular2D(lookup_table=a)
r = t(y, x)
assert_allclose(a, r)
MESSAGE = r"Only n_models=1 is supported"
with pytest.raises(NotImplementedError, match=MESSAGE):
model = LookupTable(n_models=2)
MESSAGE = r"Must provide a lookup table"
with pytest.raises(ValueError, match=MESSAGE):
model = LookupTable(points=([1.2, 2.3], [1.2, 6.7], [3, 4]))
MESSAGE = r"lookup_table should be an array with 2 dimensions"
with pytest.raises(ValueError, match=MESSAGE):
model = LookupTable(lookup_table=[1, 2, 3])
MESSAGE = r"lookup_table should be an array with 2 dimensions"
with pytest.raises(ValueError, match=MESSAGE):
model = LookupTable(([1, 2], [3, 4]), [5, 6])
MESSAGE = r"points must all have the same unit"
with pytest.raises(ValueError, match=MESSAGE):
model = LookupTable(([1, 2] * u.m, [3, 4]), [[5, 6], [7, 8]])
MESSAGE = r"fill value is in Jy but expected to be unitless"
with pytest.raises(ValueError, match=MESSAGE):
model = LookupTable(points, table, bounds_error=False, fill_value=1 * u.Jy)
# Test unit support
points = points[0] * u.nm
points = (points, points)
xnew = xnew * u.nm
model = LookupTable(points, table * u.nJy)
result = result * u.nJy
assert_quantity_allclose(model(xnew, xnew), result, atol=1e-7 * u.nJy)
xnew = xnew.to(u.m)
assert_quantity_allclose(model(xnew, xnew), result, atol=1e-7 * u.nJy)
bbox = (0 * u.nm, 4 * u.nm)
bbox = (bbox, bbox)
assert model.bounding_box == bbox
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_tabular_nd():
a = np.arange(24).reshape((2, 3, 4))
x, y, z = np.mgrid[:2, :3, :4]
tab = models.tabular_model(3)
t = tab(lookup_table=a)
result = t(x, y, z)
assert_allclose(a, result)
MESSAGE = r"Lookup table must have at least one dimension"
with pytest.raises(ValueError, match=MESSAGE):
models.tabular_model(0)
def test_with_bounding_box():
"""
Test the option to evaluate a model respecting
its bunding_box.
"""
p = models.Polynomial2D(2) & models.Polynomial2D(2)
m = models.Mapping((0, 1, 0, 1)) | p
with NumpyRNGContext(1234567):
m.parameters = np.random.rand(12)
m.bounding_box = ((3, 9), (1, 8))
x, y = np.mgrid[:10, :10]
a, b = m(x, y)
aw, bw = m(x, y, with_bounding_box=True)
ind = (~np.isnan(aw)).nonzero()
assert_allclose(a[ind], aw[ind])
assert_allclose(b[ind], bw[ind])
aw, bw = m(x, y, with_bounding_box=True, fill_value=1000)
ind = (aw != 1000).nonzero()
assert_allclose(a[ind], aw[ind])
assert_allclose(b[ind], bw[ind])
# test the order of bbox is not reversed for 1D models
p = models.Polynomial1D(1, c0=12, c1=2.3)
p.bounding_box = (0, 5)
assert p(1) == p(1, with_bounding_box=True)
t3 = models.Shift(10) & models.Scale(2) & models.Shift(-1)
t3.bounding_box = ((4.3, 6.9), (6, 15), (-1, 10))
assert_allclose(
t3([1, 1], [7, 7], [3, 5], with_bounding_box=True),
[[np.nan, 11], [np.nan, 14], [np.nan, 4]],
)
trans3 = models.Shift(10) & models.Scale(2) & models.Shift(-1)
trans3.bounding_box = ((4.3, 6.9), (6, 15), (-1, 10))
assert_allclose(trans3(1, 7, 5, with_bounding_box=True), [11, 14, 4])
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_tabular_with_bounding_box():
points = np.arange(5)
values = np.array([1.5, 3.4, 6.7, 7, 32])
t = models.Tabular1D(points, values)
result = t(1, with_bounding_box=True)
assert result == 3.4
assert t.inverse(result, with_bounding_box=True) == 1.0
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_tabular_bounding_box_with_units():
points = np.arange(5) * u.pix
lt = np.arange(5) * u.AA
t = models.Tabular1D(points, lt)
result = t(1 * u.pix, with_bounding_box=True)
assert result == 1.0 * u.AA
assert t.inverse(result, with_bounding_box=True) == 1 * u.pix
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_tabular1d_inverse():
"""Test that the Tabular1D inverse is defined"""
points = np.arange(5)
values = np.array([1.5, 3.4, 6.7, 7, 32])
t = models.Tabular1D(points, values)
result = t.inverse((3.4, 6.7))
assert_allclose(result, np.array((1.0, 2.0)))
# Check that it works for descending values in lookup_table
t2 = models.Tabular1D(points, values[::-1])
assert_allclose(t2.inverse.points[0], t2.lookup_table[::-1])
result2 = t2.inverse((7, 6.7))
assert_allclose(result2, np.array((1.0, 2.0)))
# Check that it errors on double-valued lookup_table
points = np.arange(5)
values = np.array([1.5, 3.4, 3.4, 32, 25])
t = models.Tabular1D(points, values)
with pytest.raises(NotImplementedError, match=r""):
t.inverse((3.4, 7.0))
# Check that Tabular2D.inverse raises an error
table = np.arange(5 * 5).reshape(5, 5)
points = np.arange(0, 5)
points = (points, points)
t3 = models.Tabular2D(points=points, lookup_table=table)
with pytest.raises(NotImplementedError, match=r""):
t3.inverse((3, 3))
# Check that it uses the same kwargs as the original model
points = np.arange(5)
values = np.array([1.5, 3.4, 6.7, 7, 32])
t = models.Tabular1D(points, values)
MESSAGE = r"One of the requested xi is out of bounds in dimension 0"
with pytest.raises(ValueError, match=MESSAGE):
t.inverse(100)
t = models.Tabular1D(points, values, bounds_error=False, fill_value=None)
result = t.inverse(100)
assert_allclose(t(result), 100)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_tabular_grid_shape_mismatch_error():
points = np.arange(5)
lt = np.mgrid[0:5, 0:5][0]
MESSAGE = r"Expected grid points in 2 directions, got 5."
with pytest.raises(ValueError, match=MESSAGE):
models.Tabular2D(points, lt)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_tabular_repr():
points = np.arange(5)
lt = np.arange(5)
t = models.Tabular1D(points, lt)
assert (
repr(t)
== "<Tabular1D(points=(array([0, 1, 2, 3, 4]),), lookup_table=[0 1 2 3 4])>"
)
table = np.arange(5 * 5).reshape(5, 5)
points = np.arange(0, 5)
points = (points, points)
t = models.Tabular2D(points=points, lookup_table=table)
assert (
repr(t)
== "<Tabular2D(points=(array([0, 1, 2, 3, 4]), array([0, 1, 2, 3, 4])), "
"lookup_table=[[ 0 1 2 3 4]\n"
" [ 5 6 7 8 9]\n"
" [10 11 12 13 14]\n"
" [15 16 17 18 19]\n"
" [20 21 22 23 24]])>"
)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_tabular_str():
points = np.arange(5)
lt = np.arange(5)
t = models.Tabular1D(points, lt)
assert (
str(t) == "Model: Tabular1D\n"
"N_inputs: 1\n"
"N_outputs: 1\n"
"Parameters: \n"
" points: (array([0, 1, 2, 3, 4]),)\n"
" lookup_table: [0 1 2 3 4]\n"
" method: linear\n"
" fill_value: nan\n"
" bounds_error: True"
)
table = np.arange(5 * 5).reshape(5, 5)
points = np.arange(0, 5)
points = (points, points)
t = models.Tabular2D(points=points, lookup_table=table)
assert (
str(t) == "Model: Tabular2D\n"
"N_inputs: 2\n"
"N_outputs: 1\n"
"Parameters: \n"
" points: (array([0, 1, 2, 3, 4]), array([0, 1, 2, 3, 4]))\n"
" lookup_table: [[ 0 1 2 3 4]\n"
" [ 5 6 7 8 9]\n"
" [10 11 12 13 14]\n"
" [15 16 17 18 19]\n"
" [20 21 22 23 24]]\n"
" method: linear\n"
" fill_value: nan\n"
" bounds_error: True"
)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_tabular_evaluate():
points = np.arange(5)
lt = np.arange(5)[::-1]
t = models.Tabular1D(points, lt)
assert (t.evaluate([1, 2, 3]) == [3, 2, 1]).all()
assert (t.evaluate(np.array([1, 2, 3]) * u.m) == [3, 2, 1]).all()
t.n_outputs = 2
value = [np.array([3, 2, 1]), np.array([1, 2, 3])]
with mk.patch.object(
tabular_models, "interpn", autospec=True, return_value=value
) as mkInterpn:
outputs = t.evaluate([1, 2, 3])
for index, output in enumerate(outputs):
assert np.all(value[index] == output)
assert mkInterpn.call_count == 1
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_tabular_module_name():
"""
The module name must be set manually because
these classes are created dynamically.
"""
for model in [models.Tabular1D, models.Tabular2D]:
assert model.__module__ == "astropy.modeling.tabular"
class classmodel(FittableModel):
f = Parameter(default=1)
x = Parameter(default=0)
y = Parameter(default=2)
def __init__(self, f=f.default, x=x.default, y=y.default):
super().__init__(f, x, y)
def evaluate(self):
pass
class subclassmodel(classmodel):
f = Parameter(default=3, fixed=True)
x = Parameter(default=10)
y = Parameter(default=12)
h = Parameter(default=5)
def __init__(self, f=f.default, x=x.default, y=y.default, h=h.default):
super().__init__(f, x, y)
def evaluate(self):
pass
def test_parameter_inheritance():
b = subclassmodel()
assert b.param_names == ("f", "x", "y", "h")
assert b.h == 5
assert b.f == 3
assert b.f.fixed == True # noqa: E712
@pytest.mark.filterwarnings(r"ignore:humlicek2 has been deprecated since .*")
def test_parameter_description():
model = models.Gaussian1D(1.5, 2.5, 3.5)
assert model.amplitude._description == "Amplitude (peak value) of the Gaussian"
assert model.mean._description == "Position of peak (Gaussian)"
model = models.Voigt1D(x_0=5, amplitude_L=10, fwhm_L=0.5, fwhm_G=0.9)
assert model.amplitude_L._description == "The Lorentzian amplitude"
assert model.fwhm_L._description == "The Lorentzian full width at half maximum"
assert model.fwhm_G._description == "The Gaussian full width at half maximum"
def test_SmoothlyBrokenPowerLaw1D_validators():
MESSAGE = r"amplitude parameter must be > 0"
with pytest.raises(InputParameterError, match=MESSAGE):
SmoothlyBrokenPowerLaw1D(amplitude=-1)
MESSAGE = r"delta parameter must be >= 0.001"
with pytest.raises(InputParameterError, match=MESSAGE):
SmoothlyBrokenPowerLaw1D(delta=0)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.filterwarnings(r"ignore:.*:RuntimeWarning")
@pytest.mark.filterwarnings(r"ignore:The fit may be unsuccessful.*")
def test_SmoothlyBrokenPowerLaw1D_fit_deriv():
x_lim = [0.01, 100]
x = np.logspace(x_lim[0], x_lim[1], 100)
parameters = {
"parameters": [1, 10, -2, 2, 0.5],
"constraints": {"fixed": {"x_break": True, "delta": True}},
}
model_with_deriv = create_model(
SmoothlyBrokenPowerLaw1D, parameters, use_constraints=False
)
model_no_deriv = create_model(
SmoothlyBrokenPowerLaw1D, parameters, use_constraints=False
)
# NOTE: PR 10644 replaced deprecated usage of RandomState but could not
# find a new seed that did not cause test failure, resorted to hardcoding.
# add 10% noise to the amplitude
# fmt: off
rsn_rand_1234567890 = np.array(
[
0.61879477, 0.59162363, 0.88868359, 0.89165480, 0.45756748,
0.77818808, 0.26706377, 0.99610621, 0.54009489, 0.53752161,
0.40099938, 0.70540579, 0.40518559, 0.94999075, 0.03075388,
0.13602495, 0.08297726, 0.42352224, 0.23449723, 0.74743526,
0.65177865, 0.68998682, 0.16413419, 0.87642114, 0.44733314,
0.57871104, 0.52377835, 0.62689056, 0.34869427, 0.26209748,
0.07498055, 0.17940570, 0.82999425, 0.98759822, 0.11326099,
0.63846415, 0.73056694, 0.88321124, 0.52721004, 0.66487673,
0.74209309, 0.94083846, 0.70123128, 0.29534353, 0.76134369,
0.77593881, 0.36985514, 0.89519067, 0.33082813, 0.86108824,
0.76897859, 0.61343376, 0.43870907, 0.91913538, 0.76958966,
0.51063556, 0.04443249, 0.57463611, 0.31382006, 0.41221713,
0.21531811, 0.03237521, 0.04166386, 0.73109303, 0.74556052,
0.64716325, 0.77575353, 0.64599254, 0.16885816, 0.48485480,
0.53844248, 0.99690349, 0.23657074, 0.04119088, 0.46501519,
0.35739006, 0.23002665, 0.53420791, 0.71639475, 0.81857486,
0.73994342, 0.07948837, 0.75688276, 0.13240193, 0.48465576,
0.20624753, 0.02298276, 0.54257873, 0.68123230, 0.35887468,
0.36296147, 0.67368397, 0.29505730, 0.66558885, 0.93652252,
0.36755130, 0.91787687, 0.75922703, 0.48668067, 0.45967890
]
)
# fmt: on
n = 0.1 * parameters["parameters"][0] * (rsn_rand_1234567890 - 0.5)
data = model_with_deriv(x) + n
fitter_with_deriv = fitting.LevMarLSQFitter()
new_model_with_deriv = fitter_with_deriv(model_with_deriv, x, data)
fitter_no_deriv = fitting.LevMarLSQFitter()
new_model_no_deriv = fitter_no_deriv(
model_no_deriv, x, data, estimate_jacobian=True
)
assert_allclose(
new_model_with_deriv.parameters, new_model_no_deriv.parameters, atol=0.5
)
class _ExtendedModelMeta(_ModelMeta):
@classmethod
def __prepare__(mcls, name, bases, **kwds):
# this shows the parent class machinery still applies
namespace = super().__prepare__(name, bases, **kwds)
# the custom bit
namespace.update(kwds)
return namespace
model = models.Gaussian1D(1.5, 2.5, 3.5)
assert model.amplitude._description == "Amplitude (peak value) of the Gaussian"
assert model.mean._description == "Position of peak (Gaussian)"
def test_metaclass_kwargs():
"""Test can pass kwargs to Models"""
class ClassModel(FittableModel, flag="flag"):
def evaluate(self):
pass
# Nothing further to test, just making the class is good enough.
def test_submetaclass_kwargs():
"""Test can pass kwargs to Model subclasses."""
class ClassModel(FittableModel, metaclass=_ExtendedModelMeta, flag="flag"):
def evaluate(self):
pass
assert ClassModel.flag == "flag"
class ModelDefault(Model):
slope = Parameter()
intercept = Parameter()
_separable = False
@staticmethod
def evaluate(x, slope, intercept):
return slope * x + intercept
class ModelCustom(ModelDefault):
def _calculate_separability_matrix(self):
return np.array([[0]])
def test_custom_separability_matrix():
original = separability_matrix(ModelDefault(slope=1, intercept=2))
assert original.all()
custom = separability_matrix(ModelCustom(slope=1, intercept=2))
assert not custom.any()
|
fc6866f0b00994a24abf37274d5c2f18587986eb5428d6064bc0264625f1ff14 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests that relate to fitting models with quantity parameters
"""
import numpy as np
import pytest
from astropy import units as u
from astropy.modeling import fitting, models
from astropy.modeling.core import Fittable1DModel
from astropy.modeling.parameters import Parameter
from astropy.tests.helper import assert_quantity_allclose
from astropy.units import UnitsError
from astropy.utils import NumpyRNGContext
from astropy.utils.compat.optional_deps import HAS_SCIPY
# Fitting should be as intuitive as possible to the user. Essentially, models
# and fitting should work without units, but if one has units, the other should
# have units too, and the resulting fitted parameters will also have units.
fitters = [
fitting.LevMarLSQFitter,
fitting.TRFLSQFitter,
fitting.LMLSQFitter,
fitting.DogBoxLSQFitter,
]
def _fake_gaussian_data():
# Generate fake data
with NumpyRNGContext(12345):
x = np.linspace(-5.0, 5.0, 2000)
y = 3 * np.exp(-0.5 * (x - 1.3) ** 2 / 0.8**2)
y += np.random.normal(0.0, 0.2, x.shape)
# Attach units to data
x = x * u.m
y = y * u.Jy
return x, y
compound_models_no_units = [
models.Linear1D() + models.Gaussian1D() + models.Gaussian1D(),
models.Linear1D() + models.Gaussian1D() | models.Scale(),
models.Linear1D() + models.Gaussian1D() | models.Shift(),
]
class CustomInputNamesModel(Fittable1DModel):
n_inputs = 1
n_outputs = 1
a = Parameter(default=1.0)
b = Parameter(default=1.0)
def __init__(self, a=a, b=b):
super().__init__(a=a, b=b)
self.inputs = ("inn",)
self.outputs = ("out",)
@staticmethod
def evaluate(inn, a, b):
return a * inn + b
@property
def input_units(self):
if self.a.unit is None and self.b.unit is None:
return None
else:
return {"inn": self.b.unit / self.a.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {"a": outputs_unit["out"] / inputs_unit["inn"], "b": outputs_unit["out"]}
def models_with_custom_names():
line = models.Linear1D(1 * u.m / u.s, 2 * u.m)
line.inputs = ("inn",)
line.outputs = ("out",)
custom_names_model = CustomInputNamesModel(1 * u.m / u.s, 2 * u.m)
return [line, custom_names_model]
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize("fitter", fitters)
def test_fitting_simple(fitter):
fitter = fitter()
x, y = _fake_gaussian_data()
# Fit the data using a Gaussian with units
g_init = models.Gaussian1D()
g = fitter(g_init, x, y)
# TODO: update actual numerical results once implemented, but these should
# be close to the values below.
assert_quantity_allclose(g.amplitude, 3 * u.Jy, rtol=0.05)
assert_quantity_allclose(g.mean, 1.3 * u.m, rtol=0.05)
assert_quantity_allclose(g.stddev, 0.8 * u.m, rtol=0.05)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize("fitter", fitters)
def test_fitting_with_initial_values(fitter):
fitter = fitter()
x, y = _fake_gaussian_data()
# Fit the data using a Gaussian with units
g_init = models.Gaussian1D(amplitude=1.0 * u.mJy, mean=3 * u.cm, stddev=2 * u.mm)
g = fitter(g_init, x, y)
# TODO: update actual numerical results once implemented, but these should
# be close to the values below.
assert_quantity_allclose(g.amplitude, 3 * u.Jy, rtol=0.05)
assert_quantity_allclose(g.mean, 1.3 * u.m, rtol=0.05)
assert_quantity_allclose(g.stddev, 0.8 * u.m, rtol=0.05)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize("fitter", fitters)
def test_fitting_missing_data_units(fitter):
"""
Raise an error if the model has units but the data doesn't
"""
fitter = fitter()
class UnorderedGaussian1D(models.Gaussian1D):
# Parameters are ordered differently here from Gaussian1D
# to ensure the order does not break functionality.
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"amplitude": outputs_unit["y"],
"mean": inputs_unit["x"],
"stddev": inputs_unit["x"],
}
g_init = UnorderedGaussian1D(amplitude=1.0 * u.mJy, mean=3 * u.cm, stddev=2 * u.mm)
# We define flux unit so that conversion fails at wavelength unit.
# This is because the order of parameter unit conversion seems to
# follow the order defined in _parameter_units_for_data_units method.
MESSAGE = r"'cm' .* and '' .* are not convertible"
with pytest.raises(UnitsError, match=MESSAGE):
fitter(g_init, [1, 2, 3], [4, 5, 6] * (u.erg / (u.s * u.cm * u.cm * u.Hz)))
MESSAGE = r"'mJy' .* and '' .* are not convertible"
with pytest.raises(UnitsError, match=MESSAGE):
fitter(g_init, [1, 2, 3] * u.m, [4, 5, 6])
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize("fitter", fitters)
def test_fitting_missing_model_units(fitter):
"""
Proceed if the data has units but the model doesn't
"""
fitter = fitter()
x, y = _fake_gaussian_data()
g_init = models.Gaussian1D(amplitude=1.0, mean=3, stddev=2)
g = fitter(g_init, x, y)
assert_quantity_allclose(g.amplitude, 3 * u.Jy, rtol=0.05)
assert_quantity_allclose(g.mean, 1.3 * u.m, rtol=0.05)
assert_quantity_allclose(g.stddev, 0.8 * u.m, rtol=0.05)
g_init = models.Gaussian1D(amplitude=1.0, mean=3 * u.m, stddev=2 * u.m)
g = fitter(g_init, x, y)
assert_quantity_allclose(g.amplitude, 3 * u.Jy, rtol=0.05)
assert_quantity_allclose(g.mean, 1.3 * u.m, rtol=0.05)
assert_quantity_allclose(g.stddev, 0.8 * u.m, rtol=0.05)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize("fitter", fitters)
def test_fitting_incompatible_units(fitter):
"""
Raise an error if the data and model have incompatible units
"""
fitter = fitter()
g_init = models.Gaussian1D(amplitude=1.0 * u.Jy, mean=3 * u.m, stddev=2 * u.cm)
MESSAGE = r"'Hz' .* and 'm' .* are not convertible"
with pytest.raises(UnitsError, match=MESSAGE):
fitter(g_init, [1, 2, 3] * u.Hz, [4, 5, 6] * u.Jy)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.filterwarnings(r"ignore:The fit may be unsuccessful.*")
@pytest.mark.filterwarnings(r"ignore:divide by zero encountered.*")
@pytest.mark.parametrize("model", compound_models_no_units)
@pytest.mark.parametrize("fitter", fitters)
def test_compound_without_units(model, fitter):
fitter = fitter()
x = np.linspace(-5, 5, 10) * u.Angstrom
with NumpyRNGContext(12345):
y = np.random.sample(10)
res_fit = fitter(model, x, y * u.Hz)
for param_name in res_fit.param_names:
print(getattr(res_fit, param_name))
assert all(res_fit[i]._has_units for i in range(3))
z = res_fit(x)
assert isinstance(z, u.Quantity)
res_fit = fitter(model, np.arange(10) * u.Unit("Angstrom"), y)
assert all(res_fit[i]._has_units for i in range(3))
z = res_fit(x)
assert isinstance(z, np.ndarray)
# FIXME: See https://github.com/astropy/astropy/issues/10675
# @pytest.mark.skipif(not HAS_SCIPY, reason='requires scipy')
@pytest.mark.skip(reason="Flaky and ill-conditioned")
@pytest.mark.parametrize("fitter", fitters)
def test_compound_fitting_with_units(fitter):
fitter = fitter()
x = np.linspace(-5, 5, 15) * u.Angstrom
y = np.linspace(-5, 5, 15) * u.Angstrom
fitter = fitter()
m = models.Gaussian2D(
10 * u.Hz, 3 * u.Angstrom, 4 * u.Angstrom, 1 * u.Angstrom, 2 * u.Angstrom
)
p = models.Planar2D(3 * u.Hz / u.Angstrom, 4 * u.Hz / u.Angstrom, 1 * u.Hz)
model = m + p
z = model(x, y)
res = fitter(model, x, y, z)
assert isinstance(res(x, y), np.ndarray)
assert all(res[i]._has_units for i in range(2))
model = models.Gaussian2D() + models.Planar2D()
res = fitter(model, x, y, z)
assert isinstance(res(x, y), np.ndarray)
assert all(res[i]._has_units for i in range(2))
# A case of a mixture of models with and without units
model = models.BlackBody(temperature=3000 * u.K) * models.Const1D(amplitude=1.0)
x = np.linspace(1, 3, 10000) * u.micron
with NumpyRNGContext(12345):
n = np.random.normal(3)
y = model(x)
res = fitter(model, x, y * (1 + n))
# The large rtol here is due to different results on linux and macosx, likely
# the model is ill-conditioned.
np.testing.assert_allclose(
res.parameters, [3000, 2.1433621e00, 2.647347e00], rtol=0.4
)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.filterwarnings(r"ignore:Model is linear in parameters*")
@pytest.mark.parametrize("model", models_with_custom_names())
@pytest.mark.parametrize("fitter", fitters)
def test_fitting_custom_names(model, fitter):
"""Tests fitting of models with custom inputs and outsputs names."""
fitter = fitter()
x = np.linspace(0, 10, 100) * u.s
y = model(x)
new_model = fitter(model, x, y)
for param_name in model.param_names:
assert_quantity_allclose(
getattr(new_model, param_name).quantity, getattr(model, param_name).quantity
)
|
0c5b5aa4a3a46f9cad3a84229e9233ad17d24aca6ec608b27ab97cc9a6be7380 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# pylint: disable=invalid-name
import os
import subprocess
import sys
import unittest.mock as mk
from inspect import signature
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_equal
import astropy
import astropy.modeling.core as core
import astropy.units as u
from astropy.convolution import convolve_models
from astropy.modeling import models
from astropy.modeling.bounding_box import CompoundBoundingBox, ModelBoundingBox
from astropy.modeling.core import (
SPECIAL_OPERATORS,
CompoundModel,
Model,
_add_special_operator,
bind_bounding_box,
bind_compound_bounding_box,
custom_model,
fix_inputs,
)
from astropy.modeling.parameters import Parameter
from astropy.modeling.separable import separability_matrix
from astropy.tests.helper import assert_quantity_allclose
from astropy.utils.compat.optional_deps import HAS_SCIPY
class NonFittableModel(Model):
"""An example class directly subclassing Model for testing."""
a = Parameter()
def __init__(self, a, model_set_axis=None):
super().__init__(a, model_set_axis=model_set_axis)
@staticmethod
def evaluate():
pass
def test_Model_instance_repr_and_str():
m = NonFittableModel(42.5)
assert repr(m) == "<NonFittableModel(a=42.5)>"
assert (
str(m) == "Model: NonFittableModel\n"
"Inputs: ()\n"
"Outputs: ()\n"
"Model set size: 1\n"
"Parameters:\n"
" a \n"
" ----\n"
" 42.5"
)
assert len(m) == 1
def test_Model_array_parameter():
model = models.Gaussian1D(4, 2, 1)
assert_allclose(model.param_sets, [[4], [2], [1]])
def test_inputless_model():
"""
Regression test for
https://github.com/astropy/astropy/pull/3772#issuecomment-101821641
"""
class TestModel(Model):
n_outputs = 1
a = Parameter()
@staticmethod
def evaluate(a):
return a
m = TestModel(1)
assert m.a == 1
assert m() == 1
# Test array-like output
m = TestModel([1, 2, 3], model_set_axis=False)
assert len(m) == 1
assert np.all(m() == [1, 2, 3])
# Test a model set
m = TestModel(a=[1, 2, 3], model_set_axis=0)
assert len(m) == 3
assert np.all(m() == [1, 2, 3])
# Test a model set
m = TestModel(a=[[1, 2, 3], [4, 5, 6]], model_set_axis=0)
assert len(m) == 2
assert np.all(m() == [[1, 2, 3], [4, 5, 6]])
# Test a model set
m = TestModel(a=[[1, 2, 3], [4, 5, 6]], model_set_axis=np.int64(0))
assert len(m) == 2
assert np.all(m() == [[1, 2, 3], [4, 5, 6]])
def test_ParametericModel():
MESSAGE = r"Gaussian1D.__init__.* got an unrecognized parameter 'wrong'"
with pytest.raises(TypeError, match=MESSAGE):
models.Gaussian1D(1, 2, 3, wrong=4)
def test_custom_model_signature():
"""
Tests that the signatures for the __init__ and __call__
methods of custom models are useful.
"""
@custom_model
def model_a(x):
return x
assert model_a.param_names == ()
assert model_a.n_inputs == 1
sig = signature(model_a.__init__)
assert list(sig.parameters.keys()) == ["self", "args", "meta", "name", "kwargs"]
sig = signature(model_a.__call__)
assert list(sig.parameters.keys()) == [
"self",
"inputs",
"model_set_axis",
"with_bounding_box",
"fill_value",
"equivalencies",
"inputs_map",
"new_inputs",
]
@custom_model
def model_b(x, a=1, b=2):
return x + a + b
assert model_b.param_names == ("a", "b")
assert model_b.n_inputs == 1
sig = signature(model_b.__init__)
assert list(sig.parameters.keys()) == ["self", "a", "b", "kwargs"]
assert [x.default for x in sig.parameters.values()] == [sig.empty, 1, 2, sig.empty]
sig = signature(model_b.__call__)
assert list(sig.parameters.keys()) == [
"self",
"inputs",
"model_set_axis",
"with_bounding_box",
"fill_value",
"equivalencies",
"inputs_map",
"new_inputs",
]
@custom_model
def model_c(x, y, a=1, b=2):
return x + y + a + b
assert model_c.param_names == ("a", "b")
assert model_c.n_inputs == 2
sig = signature(model_c.__init__)
assert list(sig.parameters.keys()) == ["self", "a", "b", "kwargs"]
assert [x.default for x in sig.parameters.values()] == [sig.empty, 1, 2, sig.empty]
sig = signature(model_c.__call__)
assert list(sig.parameters.keys()) == [
"self",
"inputs",
"model_set_axis",
"with_bounding_box",
"fill_value",
"equivalencies",
"inputs_map",
"new_inputs",
]
def test_custom_model_subclass():
"""Test that custom models can be subclassed."""
@custom_model
def model_a(x, a=1):
return x * a
class model_b(model_a):
# Override the evaluate from model_a
@classmethod
def evaluate(cls, x, a):
return -super().evaluate(x, a)
b = model_b()
assert b.param_names == ("a",)
assert b.a == 1
assert b(1) == -1
sig = signature(model_b.__init__)
assert list(sig.parameters.keys()) == ["self", "a", "kwargs"]
sig = signature(model_b.__call__)
assert list(sig.parameters.keys()) == [
"self",
"inputs",
"model_set_axis",
"with_bounding_box",
"fill_value",
"equivalencies",
"inputs_map",
"new_inputs",
]
def test_custom_model_parametrized_decorator():
"""Tests using custom_model as a decorator with parameters."""
def cosine(x, amplitude=1):
return [amplitude * np.cos(x)]
@custom_model(fit_deriv=cosine)
def sine(x, amplitude=1):
return amplitude * np.sin(x)
assert issubclass(sine, Model)
s = sine(2)
assert_allclose(s(np.pi / 2), 2)
assert_allclose(s.fit_deriv(0, 2), 2)
def test_custom_model_n_outputs():
"""
Test creating a custom_model which has more than one output, which
requires special handling.
Demonstrates issue #11791's ``n_outputs`` error has been solved
"""
@custom_model
def model(x, y, n_outputs=2):
return x + 1, y + 1
m = model()
assert not isinstance(m.n_outputs, Parameter)
assert isinstance(m.n_outputs, int)
assert m.n_outputs == 2
assert m.outputs == ("x0", "x1")
assert (
separability_matrix(m)
== [
[True, True],
[True, True],
]
).all()
@custom_model
def model(x, y, z, n_outputs=3):
return x + 1, y + 1, z + 1
m = model()
assert not isinstance(m.n_outputs, Parameter)
assert isinstance(m.n_outputs, int)
assert m.n_outputs == 3
assert m.outputs == ("x0", "x1", "x2")
assert (
separability_matrix(m)
== [
[True, True, True],
[True, True, True],
[True, True, True],
]
).all()
def test_custom_model_settable_parameters():
"""
Test creating a custom_model which specifically sets adjustable model
parameters.
Demonstrates part of issue #11791's notes about what passed parameters
should/shouldn't be allowed. In this case, settable parameters
should be allowed to have defaults set.
"""
@custom_model
def model(x, y, n_outputs=2, bounding_box=((1, 2), (3, 4))):
return x + 1, y + 1
m = model()
assert m.n_outputs == 2
assert m.bounding_box == ((1, 2), (3, 4))
m.bounding_box = ((9, 10), (11, 12))
assert m.bounding_box == ((9, 10), (11, 12))
m = model(bounding_box=((5, 6), (7, 8)))
assert m.n_outputs == 2
assert m.bounding_box == ((5, 6), (7, 8))
m.bounding_box = ((9, 10), (11, 12))
assert m.bounding_box == ((9, 10), (11, 12))
@custom_model
def model(x, y, n_outputs=2, outputs=("z0", "z1")):
return x + 1, y + 1
m = model()
assert m.n_outputs == 2
assert m.outputs == ("z0", "z1")
m.outputs = ("a0", "a1")
assert m.outputs == ("a0", "a1")
m = model(outputs=("w0", "w1"))
assert m.n_outputs == 2
assert m.outputs == ("w0", "w1")
m.outputs = ("a0", "a1")
assert m.outputs == ("a0", "a1")
def test_custom_model_regected_parameters():
"""
Test creating a custom_model which attempts to override non-overridable
parameters.
Demonstrates part of issue #11791's notes about what passed parameters
should/shouldn't be allowed. In this case, non-settable parameters
should raise an error (unexpected behavior may occur).
"""
with pytest.raises(
ValueError, match=r"Parameter 'n_inputs' cannot be a model property: *"
):
@custom_model
def model1(x, y, n_outputs=2, n_inputs=3):
return x + 1, y + 1
with pytest.raises(
ValueError, match=r"Parameter 'uses_quantity' cannot be a model property: *"
):
@custom_model
def model2(x, y, n_outputs=2, uses_quantity=True):
return x + 1, y + 1
def test_custom_inverse():
"""Test setting a custom inverse on a model."""
p = models.Polynomial1D(1, c0=-2, c1=3)
# A trivial inverse for a trivial polynomial
inv = models.Polynomial1D(1, c0=(2.0 / 3.0), c1=(1.0 / 3.0))
MESSAGE = (
r"No analytical or user-supplied inverse transform has been implemented for"
r" this model"
)
with pytest.raises(NotImplementedError, match=MESSAGE):
p.inverse
p.inverse = inv
x = np.arange(100)
assert_allclose(x, p(p.inverse(x)))
assert_allclose(x, p.inverse(p(x)))
p.inverse = None
with pytest.raises(NotImplementedError, match=MESSAGE):
p.inverse
def test_custom_inverse_reset():
"""Test resetting a custom inverse to the model's default inverse."""
class TestModel(Model):
n_inputs = 0
outputs = ("y",)
@property
def inverse(self):
return models.Shift()
@staticmethod
def evaluate():
return 0
# The above test model has no meaning, nor does its inverse--this just
# tests that setting an inverse and resetting to the default inverse works
m = TestModel()
assert isinstance(m.inverse, models.Shift)
m.inverse = models.Scale()
assert isinstance(m.inverse, models.Scale)
del m.inverse
assert isinstance(m.inverse, models.Shift)
def test_render_model_2d():
imshape = (71, 141)
image = np.zeros(imshape)
coords = y, x = np.indices(imshape)
model = models.Gaussian2D(x_stddev=6.1, y_stddev=3.9, theta=np.pi / 3)
# test points for edges
ye, xe = [0, 35, 70], [0, 70, 140]
# test points for floating point positions
yf, xf = [35.1, 35.5, 35.9], [70.1, 70.5, 70.9]
test_pts = [(a, b) for a in xe for b in ye]
test_pts += [(a, b) for a in xf for b in yf]
for x0, y0 in test_pts:
model.x_mean = x0
model.y_mean = y0
expected = model(x, y)
for xy in [coords, None]:
for im in [image.copy(), None]:
if (im is None) & (xy is None):
# this case is tested in Fittable2DModelTester
continue
actual = model.render(out=im, coords=xy)
if im is None:
assert_allclose(actual, model.render(coords=xy))
# assert images match
assert_allclose(expected, actual, atol=3e-7)
# assert model fully captured
if (x0, y0) == (70, 35):
boxed = model.render()
flux = np.sum(expected)
assert ((flux - np.sum(boxed)) / flux) < 1e-7
# test an error is raised when the bounding box is larger than the input array
try:
actual = model.render(out=np.zeros((1, 1)))
except ValueError:
pass
def test_render_model_1d():
npix = 101
image = np.zeros(npix)
coords = np.arange(npix)
model = models.Gaussian1D()
# test points
test_pts = [0, 49.1, 49.5, 49.9, 100]
# test widths
test_stdv = np.arange(5.5, 6.7, 0.2)
for x0, stdv in [(p, s) for p in test_pts for s in test_stdv]:
model.mean = x0
model.stddev = stdv
expected = model(coords)
for x in [coords, None]:
for im in [image.copy(), None]:
if (im is None) & (x is None):
# this case is tested in Fittable1DModelTester
continue
actual = model.render(out=im, coords=x)
# assert images match
assert_allclose(expected, actual, atol=3e-7)
# assert model fully captured
if (x0, stdv) == (49.5, 5.5):
boxed = model.render()
flux = np.sum(expected)
assert ((flux - np.sum(boxed)) / flux) < 1e-7
@pytest.mark.filterwarnings("ignore:invalid value encountered in less")
def test_render_model_3d():
imshape = (17, 21, 27)
image = np.zeros(imshape)
coords = np.indices(imshape)
def ellipsoid(x, y, z, x0=13.0, y0=10.0, z0=8.0, a=4.0, b=3.0, c=2.0, amp=1.0):
rsq = ((x - x0) / a) ** 2 + ((y - y0) / b) ** 2 + ((z - z0) / c) ** 2
val = (rsq < 1) * amp
return val
class Ellipsoid3D(custom_model(ellipsoid)):
@property
def bounding_box(self):
return (
(self.z0 - self.c, self.z0 + self.c),
(self.y0 - self.b, self.y0 + self.b),
(self.x0 - self.a, self.x0 + self.a),
)
model = Ellipsoid3D()
# test points for edges
ze, ye, xe = [0, 8, 16], [0, 10, 20], [0, 13, 26]
# test points for floating point positions
zf, yf, xf = [8.1, 8.5, 8.9], [10.1, 10.5, 10.9], [13.1, 13.5, 13.9]
test_pts = [(x, y, z) for x in xe for y in ye for z in ze]
test_pts += [(x, y, z) for x in xf for y in yf for z in zf]
for x0, y0, z0 in test_pts:
model.x0 = x0
model.y0 = y0
model.z0 = z0
expected = model(*coords[::-1])
for c in [coords, None]:
for im in [image.copy(), None]:
if (im is None) & (c is None):
continue
actual = model.render(out=im, coords=c)
boxed = model.render()
# assert images match
assert_allclose(expected, actual)
# assert model fully captured
if (z0, y0, x0) == (8, 10, 13):
boxed = model.render()
assert (np.sum(expected) - np.sum(boxed)) == 0
def test_render_model_out_dtype():
"""Test different out.dtype for model.render."""
MESSAGE = (
r"Cannot cast ufunc 'add' output from .* to .* with casting rule 'same_kind"
)
for model in [models.Gaussian2D(), models.Gaussian2D() + models.Planar2D()]:
for dtype in [np.float64, np.float32, np.complex64]:
im = np.zeros((40, 40), dtype=dtype)
imout = model.render(out=im)
assert imout is im
assert imout.sum() != 0
with pytest.raises(TypeError, match=MESSAGE):
im = np.zeros((40, 40), dtype=np.int32)
imout = model.render(out=im)
def test_custom_bounding_box_1d():
"""
Tests that the bounding_box setter works.
"""
# 1D models
g1 = models.Gaussian1D()
bb = g1.bounding_box
expected = g1.render()
# assign the same bounding_box, now through the bounding_box setter
g1.bounding_box = bb
assert_allclose(g1.render(), expected)
# 2D models
g2 = models.Gaussian2D()
bb = g2.bounding_box
expected = g2.render()
# assign the same bounding_box, now through the bounding_box setter
g2.bounding_box = bb
assert_allclose(g2.render(), expected)
def test_n_submodels_in_single_models():
assert models.Gaussian1D().n_submodels == 1
assert models.Gaussian2D().n_submodels == 1
def test_compound_deepcopy():
model = (models.Gaussian1D(10, 2, 3) | models.Shift(2)) & models.Rotation2D(21.3)
new_model = model.deepcopy()
assert id(model) != id(new_model)
assert id(model._leaflist) != id(new_model._leaflist)
assert id(model[0]) != id(new_model[0])
assert id(model[1]) != id(new_model[1])
assert id(model[2]) != id(new_model[2])
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_units_with_bounding_box():
points = np.arange(10, 20)
table = np.arange(10) * u.Angstrom
t = models.Tabular1D(points, lookup_table=table)
assert isinstance(t(10), u.Quantity)
assert isinstance(t(10, with_bounding_box=True), u.Quantity)
assert_quantity_allclose(t(10), t(10, with_bounding_box=True))
RENAMED_MODEL = models.Gaussian1D.rename("CustomGaussian")
MODEL_RENAME_CODE = """
from astropy.modeling.models import Gaussian1D
print(repr(Gaussian1D))
print(repr(Gaussian1D.rename('CustomGaussian')))
""".strip()
MODEL_RENAME_EXPECTED = b"""
<class 'astropy.modeling.functional_models.Gaussian1D'>
Name: Gaussian1D
N_inputs: 1
N_outputs: 1
Fittable parameters: ('amplitude', 'mean', 'stddev')
<class '__main__.CustomGaussian'>
Name: CustomGaussian (Gaussian1D)
N_inputs: 1
N_outputs: 1
Fittable parameters: ('amplitude', 'mean', 'stddev')
""".strip()
def test_rename_path(tmp_path):
# Regression test for a bug that caused the path to the class to be
# incorrect in a renamed model's __repr__.
assert (
repr(RENAMED_MODEL).splitlines()[0]
== "<class 'astropy.modeling.tests.test_core.CustomGaussian'>"
)
# Make sure that when called from a user script, the class name includes
# __main__.
env = os.environ.copy()
paths = [os.path.dirname(astropy.__path__[0])] + sys.path
env["PYTHONPATH"] = os.pathsep.join(paths)
script = tmp_path / "rename.py"
with open(script, "w") as f:
f.write(MODEL_RENAME_CODE)
output = subprocess.check_output([sys.executable, script], env=env)
assert output.splitlines() == MODEL_RENAME_EXPECTED.splitlines()
@pytest.mark.parametrize(
"model_class",
[models.Gaussian1D, models.Polynomial1D, models.Shift, models.Tabular1D],
)
def test_rename_1d(model_class):
new_model = model_class.rename(name="Test1D")
assert new_model.name == "Test1D"
@pytest.mark.parametrize(
"model_class", [models.Gaussian2D, models.Polynomial2D, models.Tabular2D]
)
def test_rename_2d(model_class):
new_model = model_class.rename(name="Test2D")
assert new_model.name == "Test2D"
def test_fix_inputs_integer():
"""
Tests that numpy integers can be passed as dictionary keys to fix_inputs
Issue #11358
"""
m = models.Identity(2)
mf = models.fix_inputs(m, {1: 22})
assert mf(1) == (1, 22)
mf_int32 = models.fix_inputs(m, {np.int32(1): 33})
assert mf_int32(1) == (1, 33)
mf_int64 = models.fix_inputs(m, {np.int64(1): 44})
assert mf_int64(1) == (1, 44)
def test_fix_inputs_empty_dict():
"""
Tests that empty dictionary can be passed to fix_inputs
Issue #11355
"""
m = models.Identity(2)
mf = models.fix_inputs(m, {})
assert mf(1, 2) == (1, 2)
def test_rename_inputs_outputs():
g2 = models.Gaussian2D(10, 2, 3, 1, 2)
assert g2.inputs == ("x", "y")
assert g2.outputs == ("z",)
MESSAGE = r"Expected .* number of .*, got .*"
with pytest.raises(ValueError, match=MESSAGE):
g2.inputs = ("w",)
with pytest.raises(ValueError, match=MESSAGE):
g2.outputs = ("w", "e")
def test__prepare_output_single_model():
model = models.Gaussian1D()
# No broadcast
assert (
np.array([1, 2]) == model._prepare_output_single_model(np.array([1, 2]), None)
).all()
# Broadcast to scalar
assert model._prepare_output_single_model(np.array([1]), ()) == 1
assert model._prepare_output_single_model(np.asanyarray(2), ()) == 2
# Broadcast reshape
output = np.array([[1, 2, 3], [4, 5, 6]])
reshape = np.array([[1, 2], [3, 4], [5, 6]])
assert (output == model._prepare_output_single_model(output, (2, 3))).all()
assert (reshape == model._prepare_output_single_model(output, (3, 2))).all()
# Broadcast reshape scalar
assert model._prepare_output_single_model(np.array([1]), (1, 2)) == 1
assert model._prepare_output_single_model(np.asanyarray(2), (3, 4)) == 2
# Fail to broadcast
assert (output == model._prepare_output_single_model(output, (1, 2))).all()
assert (output == model._prepare_output_single_model(output, (3, 4))).all()
def test_prepare_outputs_mixed_broadcast():
"""
Tests that _prepare_outputs_single_model does not fail when a smaller
array is passed as first input, but output is broadcast to larger
array.
Issue #10170
"""
model = models.Gaussian2D(1, 2, 3, 4, 5)
output = model([1, 2], 3)
assert output.shape == (2,)
np.testing.assert_array_equal(output, [0.9692332344763441, 1.0])
output = model(4, [5, 6])
assert output.shape == (2,)
np.testing.assert_array_equal(output, [0.8146473164114145, 0.7371233743916278])
def test_prepare_outputs_complex_reshape():
x = np.array(
[
[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15],
]
)
y = np.array(
[
[16, 17, 18, 19, 20],
[21, 22, 23, 24, 25],
[26, 27, 28, 29, 30],
]
)
m = models.Identity(3) | models.Mapping((2, 1, 0))
m.bounding_box = ((0, 100), (0, 200), (0, 50))
mf = models.fix_inputs(m, {2: 22})
t = mf | models.Mapping((2, 1), n_inputs=3)
output = mf(1, 2)
assert output == (22, 2, 1)
output = t(1, 2)
assert output == (1, 2)
output = t(x, y)
assert len(output) == 2
np.testing.assert_array_equal(output[0], x)
np.testing.assert_array_equal(output[1], y)
m = models.Identity(3) | models.Mapping((0, 1, 2))
m.bounding_box = ((0, 100), (0, 200), (0, 50))
mf = models.fix_inputs(m, {2: 22})
t = mf | models.Mapping((0, 1), n_inputs=3)
output = mf(1, 2)
assert output == (1, 2, 22)
output = t(1, 2)
assert output == (1, 2)
output = t(x, y)
assert len(output) == 2
np.testing.assert_array_equal(output[0], x)
np.testing.assert_array_equal(output[1], y)
def test_prepare_outputs_single_entry_vector():
"""
jwst and gwcs both require that single entry vectors produce single
entry output vectors, not scalars. This tests for that behavior.
"""
model = models.Gaussian2D(1, 2, 3, 4, 5)
output = model(np.array([1]), np.array([2]))
assert output.shape == (1,)
np.testing.assert_allclose(output, [0.9500411305585278])
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.filterwarnings("ignore: Using a non-tuple")
def test_prepare_outputs_sparse_grid():
"""
Test to show that #11060 has been solved.
"""
shape = (3, 3)
data = np.arange(np.prod(shape)).reshape(shape) * u.m / u.s
points_unit = u.pix
points = [np.arange(size) * points_unit for size in shape]
kwargs = {
"bounds_error": False,
"fill_value": np.nan,
"method": "nearest",
}
transform = models.Tabular2D(points, data, **kwargs)
truth = (
np.array(
[
[0.0, 1.0, 2.0],
[3.0, 4.0, 5.0],
[6.0, 7.0, 8.0],
]
)
* u.m
/ u.s
)
points = np.meshgrid(np.arange(3), np.arange(3), indexing="ij", sparse=True)
x = points[0] * u.pix
y = points[1] * u.pix
value = transform(x, y)
assert (value == truth).all()
points = (
np.meshgrid(np.arange(3), np.arange(3), indexing="ij", sparse=False) * u.pix
)
value = transform(*points)
assert (value == truth).all()
def test_coerce_units():
model = models.Polynomial1D(1, c0=1, c1=2)
MESSAGE = r"Can only apply 'add' function to dimensionless quantities when other .*"
with pytest.raises(u.UnitsError, match=MESSAGE):
model(u.Quantity(10, u.m))
with_input_units = model.coerce_units({"x": u.m})
result = with_input_units(u.Quantity(10, u.m))
assert np.isclose(result, 21.0)
with_input_units_tuple = model.coerce_units((u.m,))
result = with_input_units_tuple(u.Quantity(10, u.m))
assert np.isclose(result, 21.0)
with_return_units = model.coerce_units(return_units={"y": u.s})
result = with_return_units(10)
assert np.isclose(result.value, 21.0)
assert result.unit == u.s
with_return_units_tuple = model.coerce_units(return_units=(u.s,))
result = with_return_units_tuple(10)
assert np.isclose(result.value, 21.0)
assert result.unit == u.s
with_both = model.coerce_units({"x": u.m}, {"y": u.s})
result = with_both(u.Quantity(10, u.m))
assert np.isclose(result.value, 21.0)
assert result.unit == u.s
with pytest.raises(
ValueError, match=r"input_units keys.*do not match model inputs"
):
model.coerce_units({"q": u.m})
with pytest.raises(ValueError, match=r"input_units length does not match n_inputs"):
model.coerce_units((u.m, u.s))
model_with_existing_input_units = models.BlackBody()
with pytest.raises(
ValueError,
match=r"Cannot specify input_units for model with existing input units",
):
model_with_existing_input_units.coerce_units({"x": u.m})
with pytest.raises(
ValueError, match=r"return_units keys.*do not match model outputs"
):
model.coerce_units(return_units={"q": u.m})
with pytest.raises(
ValueError, match=r"return_units length does not match n_outputs"
):
model.coerce_units(return_units=(u.m, u.s))
def test_bounding_box_general_inverse():
model = NonFittableModel(42.5)
MESSAGE = r"No bounding box is defined for this model"
with pytest.raises(NotImplementedError, match=MESSAGE):
model.bounding_box
model.bounding_box = ()
assert model.bounding_box.bounding_box() == ()
model.inverse = NonFittableModel(3.14)
inverse_model = model.inverse
with pytest.raises(NotImplementedError, match=MESSAGE):
inverse_model.bounding_box
def test__add_special_operator():
sop_name = "name"
sop = "value"
key = _add_special_operator(sop_name, "value")
assert key[0] == sop_name
assert key[1] == SPECIAL_OPERATORS._unique_id
assert key in SPECIAL_OPERATORS
assert SPECIAL_OPERATORS[key] == sop
def test_print_special_operator_CompoundModel(capsys):
"""
Test that issue #11310 has been fixed
"""
model = convolve_models(models.Sersic2D(), models.Gaussian2D())
with astropy.conf.set_temp("max_width", 80):
# fmt: off
assert str(model) == (
"Model: CompoundModel\n"
"Inputs: ('x', 'y')\n"
"Outputs: ('z',)\n"
"Model set size: 1\n"
"Expression: convolve_fft (([0]), ([1]))\n"
"Components: \n"
" [0]: <Sersic2D(amplitude=1., r_eff=1., n=4., "
"x_0=0., y_0=0., ellip=0., theta=0.)>\n"
"\n"
" [1]: <Gaussian2D(amplitude=1., x_mean=0., y_mean=0., "
"x_stddev=1., y_stddev=1., theta=0.)>\n"
"Parameters:\n"
" amplitude_0 r_eff_0 n_0 x_0_0 y_0_0 ... y_mean_1 x_stddev_1 y_stddev_1 theta_1\n"
" ----------- ------- --- ----- ----- ... -------- ---------- ---------- -------\n"
" 1.0 1.0 4.0 0.0 0.0 ... 0.0 1.0 1.0 0.0"
)
# fmt: on
def test__validate_input_shape():
model = models.Gaussian1D()
model._n_models = 2
_input = np.array(
[
[1, 2, 3],
[4, 5, 6],
]
)
# Successful validation
assert model._validate_input_shape(_input, 0, model.inputs, 1, False) == (2, 3)
# Fail number of axes
MESSAGE = r"For model_set_axis=2, all inputs must be at least 3-dimensional"
with pytest.raises(ValueError, match=MESSAGE):
model._validate_input_shape(_input, 0, model.inputs, 2, True)
# Fail number of models (has argname)
MESSAGE = r"Input argument '.*' does not have the correct dimensions in .*"
with pytest.raises(ValueError, match=MESSAGE):
model._validate_input_shape(_input, 0, model.inputs, 1, True)
# Fail number of models (no argname)
with pytest.raises(ValueError, match=MESSAGE):
model._validate_input_shape(_input, 0, [], 1, True)
def test__validate_input_shapes():
model = models.Gaussian1D()
model._n_models = 2
inputs = [mk.MagicMock() for _ in range(3)]
argnames = mk.MagicMock()
model_set_axis = mk.MagicMock()
all_shapes = [mk.MagicMock() for _ in inputs]
# Successful validation
with mk.patch.object(
Model, "_validate_input_shape", autospec=True, side_effect=all_shapes
) as mkValidate:
with mk.patch.object(core, "check_broadcast", autospec=True) as mkCheck:
assert mkCheck.return_value == model._validate_input_shapes(
inputs, argnames, model_set_axis
)
assert mkCheck.call_args_list == [mk.call(*all_shapes)]
assert mkValidate.call_args_list == [
mk.call(model, _input, idx, argnames, model_set_axis, True)
for idx, _input in enumerate(inputs)
]
# Fail check_broadcast
MESSAGE = r"All inputs must have identical shapes or must be scalars"
with mk.patch.object(
Model, "_validate_input_shape", autospec=True, side_effect=all_shapes
) as mkValidate:
with mk.patch.object(
core, "check_broadcast", autospec=True, return_value=None
) as mkCheck:
with pytest.raises(ValueError, match=MESSAGE):
model._validate_input_shapes(inputs, argnames, model_set_axis)
assert mkCheck.call_args_list == [mk.call(*all_shapes)]
assert mkValidate.call_args_list == [
mk.call(model, _input, idx, argnames, model_set_axis, True)
for idx, _input in enumerate(inputs)
]
def test__remove_axes_from_shape():
model = models.Gaussian1D()
# len(shape) == 0
assert model._remove_axes_from_shape((), mk.MagicMock()) == ()
# axis < 0
assert model._remove_axes_from_shape((1, 2, 3), -1) == (1, 2)
assert model._remove_axes_from_shape((1, 2, 3), -2) == (1, 3)
assert model._remove_axes_from_shape((1, 2, 3), -3) == (2, 3)
# axis >= len(shape)
assert model._remove_axes_from_shape((1, 2, 3), 3) == ()
assert model._remove_axes_from_shape((1, 2, 3), 4) == ()
# 0 <= axis < len(shape)
assert model._remove_axes_from_shape((1, 2, 3), 0) == (2, 3)
assert model._remove_axes_from_shape((1, 2, 3), 1) == (3,)
assert model._remove_axes_from_shape((1, 2, 3), 2) == ()
def test_get_bounding_box():
model = models.Const2D(2)
# No with_bbox
assert model.get_bounding_box(False) is None
# No bounding_box
MESSAGE = r"No bounding box is defined for this model"
with pytest.raises(NotImplementedError, match=MESSAGE):
model.bounding_box
assert model.get_bounding_box(True) is None
# Normal bounding_box
model.bounding_box = ((0, 1), (0, 1))
assert not isinstance(model.bounding_box, CompoundBoundingBox)
assert model.get_bounding_box(True) == ((0, 1), (0, 1))
# CompoundBoundingBox with no removal
bbox = CompoundBoundingBox.validate(
model,
{(1,): ((-1, 0), (-1, 0)), (2,): ((0, 1), (0, 1))},
selector_args=[("y", False)],
)
model.bounding_box = bbox
assert isinstance(model.bounding_box, CompoundBoundingBox)
# Get using argument not with_bbox
assert model.get_bounding_box(True) == bbox
# Get using with_bbox not argument
assert model.get_bounding_box((1,)) == ((-1, 0), (-1, 0))
assert model.get_bounding_box((2,)) == ((0, 1), (0, 1))
def test_compound_bounding_box():
model = models.Gaussian1D()
truth = models.Gaussian1D()
bbox1 = CompoundBoundingBox.validate(
model, {(1,): (-1, 0), (2,): (0, 1)}, selector_args=[("x", False)]
)
bbox2 = CompoundBoundingBox.validate(
model, {(-0.5,): (-1, 0), (0.5,): (0, 1)}, selector_args=[("x", False)]
)
# Using with_bounding_box to pass a selector
model.bounding_box = bbox1
assert model(-0.5) == truth(-0.5)
assert model(-0.5, with_bounding_box=(1,)) == truth(-0.5)
assert np.isnan(model(-0.5, with_bounding_box=(2,)))
assert model(0.5) == truth(0.5)
assert model(0.5, with_bounding_box=(2,)) == truth(0.5)
assert np.isnan(model(0.5, with_bounding_box=(1,)))
# Using argument value to pass bounding_box
model.bounding_box = bbox2
assert model(-0.5) == truth(-0.5)
assert model(-0.5, with_bounding_box=True) == truth(-0.5)
assert model(0.5) == truth(0.5)
assert model(0.5, with_bounding_box=True) == truth(0.5)
MESSAGE = r"No bounding box is defined for selector: .*"
with pytest.raises(RuntimeError, match=MESSAGE):
model(0, with_bounding_box=True)
model1 = models.Gaussian1D()
truth1 = models.Gaussian1D()
model2 = models.Const1D(2)
truth2 = models.Const1D(2)
model = model1 + model2
truth = truth1 + truth2
assert isinstance(model, CompoundModel)
model.bounding_box = bbox1
assert model(-0.5) == truth(-0.5)
assert model(-0.5, with_bounding_box=1) == truth(-0.5)
assert np.isnan(model(-0.5, with_bounding_box=(2,)))
assert model(0.5) == truth(0.5)
assert model(0.5, with_bounding_box=2) == truth(0.5)
assert np.isnan(model(0.5, with_bounding_box=(1,)))
model.bounding_box = bbox2
assert model(-0.5) == truth(-0.5)
assert model(-0.5, with_bounding_box=True) == truth(-0.5)
assert model(0.5) == truth(0.5)
assert model(0.5, with_bounding_box=True) == truth(0.5)
with pytest.raises(RuntimeError, match=MESSAGE):
model(0, with_bounding_box=True)
def test_bind_bounding_box():
model = models.Polynomial2D(3)
bbox = ((-1, 1), (-2, 2))
bind_bounding_box(model, bbox)
assert model.get_bounding_box() is not None
assert model.bounding_box == bbox
assert model.bounding_box["x"] == (-2, 2)
assert model.bounding_box["y"] == (-1, 1)
bind_bounding_box(model, bbox, order="F")
assert model.get_bounding_box() is not None
assert model.bounding_box == bbox
assert model.bounding_box["x"] == (-1, 1)
assert model.bounding_box["y"] == (-2, 2)
def test_bind_compound_bounding_box_using_with_bounding_box_select():
"""
This demonstrates how to bind multiple bounding_boxes which are
selectable using the `with_bounding_box`, note there must be a
fall-back to implicit.
"""
model = models.Gaussian1D()
truth = models.Gaussian1D()
bbox = (0, 1)
MESSAGE = r"'tuple' object has no attribute 'items"
with pytest.raises(AttributeError, match=MESSAGE):
bind_compound_bounding_box(model, bbox, "x")
bbox = {0: (-1, 0), 1: (0, 1)}
bind_compound_bounding_box(model, bbox, [("x", False)])
# No bounding box
assert model(-0.5) == truth(-0.5)
assert model(0.5) == truth(0.5)
assert model(0) == truth(0)
assert model(1) == truth(1)
# `with_bounding_box` selects as `-0.5` will not be a key
assert model(-0.5, with_bounding_box=0) == truth(-0.5)
assert np.isnan(model(-0.5, with_bounding_box=1))
# `with_bounding_box` selects as `0.5` will not be a key
assert model(0.5, with_bounding_box=1) == truth(0.5)
assert np.isnan(model(0.5, with_bounding_box=(0,)))
# Fall back onto implicit selector
assert model(0, with_bounding_box=True) == truth(0)
assert model(1, with_bounding_box=True) == truth(1)
# Attempt to fall-back on implicit selector, but no bounding_box
MESSAGE = r"No bounding box is defined for selector: .*"
with pytest.raises(RuntimeError, match=MESSAGE):
model(0.5, with_bounding_box=True)
# Override implicit selector
assert np.isnan(model(1, with_bounding_box=0))
def test_fix_inputs_compound_bounding_box():
base_model = models.Gaussian2D(1, 2, 3, 4, 5)
bbox = {2.5: (-1, 1), 3.14: (-7, 3)}
model = fix_inputs(base_model, {"y": 2.5}, bounding_boxes=bbox)
assert model.bounding_box == (-1, 1)
model = fix_inputs(base_model, {"x": 2.5}, bounding_boxes=bbox)
assert model.bounding_box == (-1, 1)
model = fix_inputs(
base_model, {"y": 2.5}, bounding_boxes=bbox, selector_args=(("y", True),)
)
assert model.bounding_box == (-1, 1)
model = fix_inputs(
base_model, {"x": 2.5}, bounding_boxes=bbox, selector_args=(("x", True),)
)
assert model.bounding_box == (-1, 1)
model = fix_inputs(
base_model, {"x": 2.5}, bounding_boxes=bbox, selector_args=((0, True),)
)
assert model.bounding_box == (-1, 1)
base_model = models.Identity(4)
bbox = {(2.5, 1.3): ((-1, 1), (-3, 3)), (2.5, 2.71): ((-3, 3), (-1, 1))}
model = fix_inputs(base_model, {"x0": 2.5, "x1": 1.3}, bounding_boxes=bbox)
assert model.bounding_box == ((-1, 1), (-3, 3))
model = fix_inputs(
base_model,
{"x0": 2.5, "x1": 1.3},
bounding_boxes=bbox,
selector_args=(("x0", True), ("x1", True)),
)
assert model.bounding_box == ((-1, 1), (-3, 3))
model = fix_inputs(
base_model,
{"x0": 2.5, "x1": 1.3},
bounding_boxes=bbox,
selector_args=((0, True), (1, True)),
)
assert model.bounding_box == ((-1, 1), (-3, 3))
def test_model_copy_with_bounding_box():
model = models.Polynomial2D(2)
bbox = ModelBoundingBox.validate(model, ((-0.5, 1047.5), (-0.5, 2047.5)), order="F")
# No bbox
model_copy = model.copy()
assert id(model_copy) != id(model)
assert model_copy.get_bounding_box() is None
assert model.get_bounding_box() is None
# with bbox
model.bounding_box = bbox
model_copy = model.copy()
assert id(model_copy) != id(model)
assert id(model_copy.bounding_box) != id(model.bounding_box)
for index, interval in model.bounding_box.intervals.items():
interval_copy = model_copy.bounding_box.intervals[index]
assert interval == interval_copy
assert id(interval) != interval_copy
# add model to compound model
model1 = model | models.Identity(1)
model_copy = model1.copy()
assert id(model_copy) != id(model1)
assert model_copy.get_bounding_box() is None
assert model1.get_bounding_box() is None
def test_compound_model_copy_with_bounding_box():
model = models.Shift(1) & models.Shift(2) & models.Identity(1)
model.inputs = ("x", "y", "slit_id")
bbox = ModelBoundingBox.validate(
model, ((-0.5, 1047.5), (-0.5, 2047.5), (-np.inf, np.inf)), order="F"
)
# No bbox
model_copy = model.copy()
assert id(model_copy) != id(model)
assert model_copy.get_bounding_box() is None
assert model.get_bounding_box() is None
# with bbox
model.bounding_box = bbox
model_copy = model.copy()
assert id(model_copy) != id(model)
assert id(model_copy.bounding_box) != id(model.bounding_box)
for index, interval in model.bounding_box.intervals.items():
interval_copy = model_copy.bounding_box.intervals[index]
assert interval == interval_copy
assert id(interval) != interval_copy
# add model to compound model
model1 = model | models.Identity(3)
model_copy = model1.copy()
assert id(model_copy) != id(model1)
assert model_copy.get_bounding_box() is None
assert model1.get_bounding_box() is None
def test_model_copy_with_compound_bounding_box():
model = models.Polynomial2D(2)
bbox = {(0,): (-0.5, 1047.5), (1,): (-0.5, 3047.5)}
cbbox = CompoundBoundingBox.validate(
model, bbox, selector_args=[("x", True)], order="F"
)
# No cbbox
model_copy = model.copy()
assert id(model_copy) != id(model)
assert model_copy.get_bounding_box() is None
assert model.get_bounding_box() is None
# with cbbox
model.bounding_box = cbbox
model_copy = model.copy()
assert id(model_copy) != id(model)
assert id(model_copy.bounding_box) != id(model.bounding_box)
assert model_copy.bounding_box.selector_args == model.bounding_box.selector_args
assert id(model_copy.bounding_box.selector_args) != id(
model.bounding_box.selector_args
)
for selector, bbox in model.bounding_box.bounding_boxes.items():
for index, interval in bbox.intervals.items():
interval_copy = model_copy.bounding_box.bounding_boxes[selector].intervals[
index
]
assert interval == interval_copy
assert id(interval) != interval_copy
# add model to compound model
model1 = model | models.Identity(1)
model_copy = model1.copy()
assert id(model_copy) != id(model1)
assert model_copy.get_bounding_box() is None
assert model1.get_bounding_box() is None
def test_compound_model_copy_with_compound_bounding_box():
model = models.Shift(1) & models.Shift(2) & models.Identity(1)
model.inputs = ("x", "y", "slit_id")
bbox = {
(0,): ((-0.5, 1047.5), (-0.5, 2047.5)),
(1,): ((-0.5, 3047.5), (-0.5, 4047.5)),
}
cbbox = CompoundBoundingBox.validate(
model, bbox, selector_args=[("slit_id", True)], order="F"
)
# No cbbox
model_copy = model.copy()
assert id(model_copy) != id(model)
assert model_copy.get_bounding_box() is None
assert model.get_bounding_box() is None
# with cbbox
model.bounding_box = cbbox
model_copy = model.copy()
assert id(model_copy) != id(model)
assert id(model_copy.bounding_box) != id(model.bounding_box)
assert model_copy.bounding_box.selector_args == model.bounding_box.selector_args
assert id(model_copy.bounding_box.selector_args) != id(
model.bounding_box.selector_args
)
for selector, bbox in model.bounding_box.bounding_boxes.items():
for index, interval in bbox.intervals.items():
interval_copy = model_copy.bounding_box.bounding_boxes[selector].intervals[
index
]
assert interval == interval_copy
assert id(interval) != interval_copy
# add model to compound model
model1 = model | models.Identity(3)
model_copy = model1.copy()
assert id(model_copy) != id(model1)
assert model_copy.get_bounding_box() is None
assert model1.get_bounding_box() is None
def test_compound_model_copy_user_attribute():
"""Regression test for issue #12370"""
model = models.Gaussian2D(100, 25, 25, 5, 5) | models.Identity(1)
model.xname = "x_mean" # user-defined attribute
assert hasattr(model, "xname")
assert model.xname == "x_mean"
model_copy = model.copy()
model_copy.xname
assert hasattr(model_copy, "xname")
assert model_copy.xname == "x_mean"
def test_model_mixed_array_scalar_bounding_box():
"""Regression test for issue #12319"""
model = models.Gaussian2D()
bbox = ModelBoundingBox.validate(model, ((-1, 1), (-np.inf, np.inf)), order="F")
model.bounding_box = bbox
x = np.array([-0.5, 0.5])
y = 0
# Everything works when its all in the bounding box
assert (model(x, y) == (model(x, y, with_bounding_box=True))).all()
def test_compound_model_mixed_array_scalar_bounding_box():
"""Regression test for issue #12319"""
model = models.Shift(1) & models.Shift(2) & models.Identity(1)
model.inputs = ("x", "y", "slit_id")
bbox = ModelBoundingBox.validate(
model, ((-0.5, 1047.5), (-0.5, 2047.5), (-np.inf, np.inf)), order="F"
)
model.bounding_box = bbox
x = np.array([1000, 1001])
y = np.array([2000, 2001])
slit_id = 0
# Everything works when its all in the bounding box
value0 = model(x, y, slit_id)
value1 = model(x, y, slit_id, with_bounding_box=True)
assert_equal(value0, value1)
def test_model_with_bounding_box_true_and_single_output():
"""Regression test for issue #12373"""
model = models.Mapping((1,))
x = [1, 2]
y = [3, 4]
# Check baseline
assert_equal(model(x, y), [3, 4])
# Check with_bounding_box=True should be the same
assert_equal(model(x, y, with_bounding_box=True), [3, 4])
model.bounding_box = ((-np.inf, np.inf), (-np.inf, np.inf))
# Check baseline
assert_equal(model(x, y), [3, 4])
# Check with_bounding_box=True should be the same
assert_equal(model(x, y, with_bounding_box=True), [3, 4])
def test_compound_model_with_bounding_box_true_and_single_output():
"""Regression test for issue #12373"""
model = models.Mapping((1,)) | models.Shift(1)
x = [1, 2]
y = [3, 4]
# Check baseline
assert_equal(model(x, y), [4, 5])
# Check with_bounding_box=True should be the same
assert_equal(model(x, y, with_bounding_box=True), [4, 5])
model.bounding_box = ((-np.inf, np.inf), (-np.inf, np.inf))
# Check baseline
assert_equal(model(x, y), [4, 5])
# Check with_bounding_box=True should be the same
assert_equal(model(x, y, with_bounding_box=True), [4, 5])
def test_bounding_box_pass_with_ignored():
"""Test the possibility of setting ignored variables in bounding box"""
model = models.Polynomial2D(2)
bbox = ModelBoundingBox.validate(model, (-1, 1), ignored=["y"])
model.bounding_box = bbox
assert model.bounding_box.bounding_box() == (-1, 1)
assert model.bounding_box == bbox
model = models.Polynomial2D(2)
bind_bounding_box(model, (-1, 1), ignored=["y"])
assert model.bounding_box.bounding_box() == (-1, 1)
assert model.bounding_box == bbox
def test_compound_bounding_box_pass_with_ignored():
model = models.Shift(1) & models.Shift(2) & models.Identity(1)
model.inputs = ("x", "y", "slit_id")
bbox = {
(0,): (-0.5, 1047.5),
(1,): (-0.5, 2047.5),
}
cbbox = CompoundBoundingBox.validate(
model, bbox, selector_args=[("slit_id", True)], ignored=["y"], order="F"
)
model.bounding_box = cbbox
model = models.Shift(1) & models.Shift(2) & models.Identity(1)
model.inputs = ("x", "y", "slit_id")
bind_compound_bounding_box(
model, bbox, selector_args=[("slit_id", True)], ignored=["y"], order="F"
)
assert model.bounding_box == cbbox
@pytest.mark.parametrize("int_type", [int, np.int32, np.int64, np.uint32, np.uint64])
def test_model_integer_indexing(int_type):
"""Regression for PR 12561; verify that compound model components
can be accessed by integer index"""
gauss = models.Gaussian2D()
airy = models.AiryDisk2D()
compound = gauss + airy
assert compound[int_type(0)] == gauss
assert compound[int_type(1)] == airy
def test_model_string_indexing():
"""Regression for PR 12561; verify that compound model components
can be accessed by indexing with model name"""
gauss = models.Gaussian2D()
gauss.name = "Model1"
airy = models.AiryDisk2D()
airy.name = "Model2"
compound = gauss + airy
assert compound["Model1"] == gauss
assert compound["Model2"] == airy
|
44423f15585771d7f6679f903232dcd5adc16056b071a14d6faeed0bd9e058ec | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# pylint: disable=invalid-name, no-member
import numpy as np
import pytest
from astropy import units as u
from astropy.modeling.bounding_box import ModelBoundingBox
from astropy.modeling.core import fix_inputs
from astropy.modeling.fitting import (
DogBoxLSQFitter,
LevMarLSQFitter,
LMLSQFitter,
TRFLSQFitter,
)
from astropy.modeling.functional_models import (
AiryDisk2D,
ArcCosine1D,
ArcSine1D,
ArcTangent1D,
Box1D,
Box2D,
Const1D,
Const2D,
Cosine1D,
Disk2D,
Ellipse2D,
Exponential1D,
Gaussian1D,
Gaussian2D,
KingProjectedAnalytic1D,
Linear1D,
Logarithmic1D,
Lorentz1D,
Moffat1D,
Moffat2D,
Multiply,
Planar2D,
RickerWavelet1D,
RickerWavelet2D,
Ring2D,
Scale,
Sersic1D,
Sersic2D,
Sine1D,
Tangent1D,
Trapezoid1D,
TrapezoidDisk2D,
Voigt1D,
)
from astropy.modeling.parameters import InputParameterError
from astropy.modeling.physical_models import Drude1D, Plummer1D
from astropy.modeling.polynomial import Polynomial1D, Polynomial2D
from astropy.modeling.powerlaws import (
BrokenPowerLaw1D,
ExponentialCutoffPowerLaw1D,
LogParabola1D,
PowerLaw1D,
Schechter1D,
SmoothlyBrokenPowerLaw1D,
)
from astropy.tests.helper import assert_quantity_allclose
from astropy.utils.compat.optional_deps import HAS_SCIPY
fitters = [LevMarLSQFitter, TRFLSQFitter, LMLSQFitter, DogBoxLSQFitter]
FUNC_MODELS_1D = [
{
"class": Gaussian1D,
"parameters": {"amplitude": 3 * u.Jy, "mean": 2 * u.m, "stddev": 30 * u.cm},
"evaluation": [(2600 * u.mm, 3 * u.Jy * np.exp(-2))],
"bounding_box": [0.35, 3.65] * u.m,
},
{
"class": Sersic1D,
"parameters": {"amplitude": 3 * u.MJy / u.sr, "r_eff": 2 * u.arcsec, "n": 4},
"evaluation": [(3 * u.arcsec, 1.3237148119468918 * u.MJy / u.sr)],
"bounding_box": False,
},
{
"class": Sine1D,
"parameters": {
"amplitude": 3 * u.km / u.s,
"frequency": 0.25 * u.Hz,
"phase": 0.5,
},
"evaluation": [(1 * u.s, -3 * u.km / u.s)],
"bounding_box": False,
},
{
"class": Cosine1D,
"parameters": {
"amplitude": 3 * u.km / u.s,
"frequency": 0.25 * u.Hz,
"phase": 0.25,
},
"evaluation": [(1 * u.s, -3 * u.km / u.s)],
"bounding_box": False,
},
{
"class": Tangent1D,
"parameters": {
"amplitude": 3 * u.km / u.s,
"frequency": 0.125 * u.Hz,
"phase": 0.25,
},
"evaluation": [(1 * u.s, -3 * u.km / u.s)],
"bounding_box": [-4, 0] / u.Hz,
},
{
"class": ArcSine1D,
"parameters": {
"amplitude": 3 * u.km / u.s,
"frequency": 0.25 * u.Hz,
"phase": 0.5,
},
"evaluation": [(0 * u.km / u.s, -2 * u.s)],
"bounding_box": [-3, 3] * u.km / u.s,
},
{
"class": ArcCosine1D,
"parameters": {
"amplitude": 3 * u.km / u.s,
"frequency": 0.25 * u.Hz,
"phase": 0.5,
},
"evaluation": [(0 * u.km / u.s, -1 * u.s)],
"bounding_box": [-3, 3] * u.km / u.s,
},
{
"class": ArcTangent1D,
"parameters": {
"amplitude": 3 * u.km / u.s,
"frequency": 0.125 * u.Hz,
"phase": 0.25,
},
"evaluation": [(0 * u.km / u.s, -2 * u.s)],
"bounding_box": False,
},
{
"class": Linear1D,
"parameters": {"slope": 3 * u.km / u.s, "intercept": 5000 * u.m},
"evaluation": [(6000 * u.ms, 23 * u.km)],
"bounding_box": False,
},
{
"class": Lorentz1D,
"parameters": {"amplitude": 2 * u.Jy, "x_0": 505 * u.nm, "fwhm": 100 * u.AA},
"evaluation": [(0.51 * u.micron, 1 * u.Jy)],
"bounding_box": [255, 755] * u.nm,
},
{
"class": Voigt1D,
"parameters": {
"amplitude_L": 2 * u.Jy,
"x_0": 505 * u.nm,
"fwhm_L": 100 * u.AA,
"fwhm_G": 50 * u.AA,
},
"evaluation": [(0.51 * u.micron, 1.0621795524 * u.Jy)],
"bounding_box": False,
},
{
"class": Voigt1D,
"parameters": {
"amplitude_L": 2 * u.Jy,
"x_0": 505 * u.nm,
"fwhm_L": 100 * u.AA,
"fwhm_G": 50 * u.AA,
"method": "humlicek2",
},
"evaluation": [(0.51 * u.micron, 1.0621795524 * u.Jy)],
"bounding_box": False,
},
{
"class": Const1D,
"parameters": {"amplitude": 3 * u.Jy},
"evaluation": [(0.6 * u.micron, 3 * u.Jy)],
"bounding_box": False,
},
{
"class": Box1D,
"parameters": {"amplitude": 3 * u.Jy, "x_0": 4.4 * u.um, "width": 1 * u.um},
"evaluation": [(4200 * u.nm, 3 * u.Jy), (1 * u.m, 0 * u.Jy)],
"bounding_box": [3.9, 4.9] * u.um,
},
{
"class": Trapezoid1D,
"parameters": {
"amplitude": 3 * u.Jy,
"x_0": 4.4 * u.um,
"width": 1 * u.um,
"slope": 5 * u.Jy / u.um,
},
"evaluation": [(4200 * u.nm, 3 * u.Jy), (1 * u.m, 0 * u.Jy)],
"bounding_box": [3.3, 5.5] * u.um,
},
{
"class": RickerWavelet1D,
"parameters": {"amplitude": 3 * u.Jy, "x_0": 4.4 * u.um, "sigma": 1e-3 * u.mm},
"evaluation": [(1000 * u.nm, -0.09785050 * u.Jy)],
"bounding_box": [-5.6, 14.4] * u.um,
},
{
"class": Moffat1D,
"parameters": {
"amplitude": 3 * u.Jy,
"x_0": 4.4 * u.um,
"gamma": 1e-3 * u.mm,
"alpha": 1,
},
"evaluation": [(1000 * u.nm, 0.238853503 * u.Jy)],
"bounding_box": False,
},
{
"class": KingProjectedAnalytic1D,
"parameters": {
"amplitude": 1.0 * u.Msun / u.pc**2,
"r_core": 1.0 * u.pc,
"r_tide": 2.0 * u.pc,
},
"evaluation": [(0.5 * u.pc, 0.2 * u.Msun / u.pc**2)],
"bounding_box": [0.0 * u.pc, 2.0 * u.pc],
},
{
"class": Logarithmic1D,
"parameters": {"amplitude": 5 * u.m, "tau": 2 * u.m},
"evaluation": [(4 * u.m, 3.4657359027997265 * u.m)],
"bounding_box": False,
},
{
"class": Exponential1D,
"parameters": {"amplitude": 5 * u.m, "tau": 2 * u.m},
"evaluation": [(4 * u.m, 36.945280494653254 * u.m)],
"bounding_box": False,
},
]
SCALE_MODELS = [
{
"class": Scale,
"parameters": {"factor": 2 * u.m},
"evaluation": [(1 * u.m, 2 * u.m)],
"bounding_box": False,
},
{
"class": Multiply,
"parameters": {"factor": 2 * u.m},
"evaluation": [(1 * u.m / u.m, 2 * u.m)],
"bounding_box": False,
},
]
PHYS_MODELS_1D = [
{
"class": Plummer1D,
"parameters": {"mass": 3 * u.kg, "r_plum": 0.5 * u.m},
"evaluation": [(1 * u.m, 0.10249381 * u.kg / (u.m**3))],
"bounding_box": False,
},
{
"class": Drude1D,
"parameters": {
"amplitude": 1.0 * u.m,
"x_0": 2175.0 * u.AA,
"fwhm": 400.0 * u.AA,
},
"evaluation": [(2000 * u.AA, 0.5452317018423869 * u.m)],
"bounding_box": [-17825, 22175] * u.AA,
},
]
FUNC_MODELS_2D = [
{
"class": Gaussian2D,
"parameters": {
"amplitude": 3 * u.Jy,
"x_mean": 2 * u.m,
"y_mean": 1 * u.m,
"x_stddev": 3 * u.m,
"y_stddev": 2 * u.m,
"theta": 45 * u.deg,
},
"evaluation": [
(412.1320343 * u.cm, 3.121320343 * u.m, 3 * u.Jy * np.exp(-0.5))
],
"bounding_box": [[-13.02230366, 15.02230366], [-12.02230366, 16.02230366]]
* u.m,
},
{
"class": Const2D,
"parameters": {"amplitude": 3 * u.Jy},
"evaluation": [(0.6 * u.micron, 0.2 * u.m, 3 * u.Jy)],
"bounding_box": False,
},
{
"class": Disk2D,
"parameters": {
"amplitude": 3 * u.Jy,
"x_0": 3 * u.m,
"y_0": 2 * u.m,
"R_0": 300 * u.cm,
},
"evaluation": [(5.8 * u.m, 201 * u.cm, 3 * u.Jy)],
"bounding_box": [[-1, 5], [0, 6]] * u.m,
},
{
"class": TrapezoidDisk2D,
"parameters": {
"amplitude": 3 * u.Jy,
"x_0": 1 * u.m,
"y_0": 2 * u.m,
"R_0": 100 * u.cm,
"slope": 1 * u.Jy / u.m,
},
"evaluation": [(3.5 * u.m, 2 * u.m, 1.5 * u.Jy)],
"bounding_box": [[-2, 6], [-3, 5]] * u.m,
},
{
"class": Ellipse2D,
"parameters": {
"amplitude": 3 * u.Jy,
"x_0": 3 * u.m,
"y_0": 2 * u.m,
"a": 300 * u.cm,
"b": 200 * u.cm,
"theta": 45 * u.deg,
},
"evaluation": [(4 * u.m, 300 * u.cm, 3 * u.Jy)],
"bounding_box": [
[-0.5495097567963922, 4.549509756796392],
[0.4504902432036073, 5.549509756796393],
]
* u.m,
},
{
"class": Ring2D,
"parameters": {
"amplitude": 3 * u.Jy,
"x_0": 3 * u.m,
"y_0": 2 * u.m,
"r_in": 2 * u.cm,
"r_out": 2.1 * u.cm,
},
"evaluation": [(302.05 * u.cm, 2 * u.m + 10 * u.um, 3 * u.Jy)],
"bounding_box": [[1.979, 2.021], [2.979, 3.021]] * u.m,
},
{
"class": Box2D,
"parameters": {
"amplitude": 3 * u.Jy,
"x_0": 3 * u.m,
"y_0": 2 * u.s,
"x_width": 4 * u.cm,
"y_width": 3 * u.s,
},
"evaluation": [(301 * u.cm, 3 * u.s, 3 * u.Jy)],
"bounding_box": [[0.5 * u.s, 3.5 * u.s], [2.98 * u.m, 3.02 * u.m]],
},
{
"class": RickerWavelet2D,
"parameters": {
"amplitude": 3 * u.Jy,
"x_0": 3 * u.m,
"y_0": 2 * u.m,
"sigma": 1 * u.m,
},
"evaluation": [(4 * u.m, 2.5 * u.m, 0.602169107 * u.Jy)],
"bounding_box": False,
},
{
"class": AiryDisk2D,
"parameters": {
"amplitude": 3 * u.Jy,
"x_0": 3 * u.m,
"y_0": 2 * u.m,
"radius": 1 * u.m,
},
"evaluation": [(4 * u.m, 2.1 * u.m, 4.76998480e-05 * u.Jy)],
"bounding_box": False,
},
{
"class": Moffat2D,
"parameters": {
"amplitude": 3 * u.Jy,
"x_0": 4.4 * u.um,
"y_0": 3.5 * u.um,
"gamma": 1e-3 * u.mm,
"alpha": 1,
},
"evaluation": [(1000 * u.nm, 2 * u.um, 0.202565833 * u.Jy)],
"bounding_box": False,
},
{
"class": Sersic2D,
"parameters": {
"amplitude": 3 * u.MJy / u.sr,
"x_0": 1 * u.arcsec,
"y_0": 2 * u.arcsec,
"r_eff": 2 * u.arcsec,
"n": 4,
"ellip": 0,
"theta": 0,
},
"evaluation": [(3 * u.arcsec, 2.5 * u.arcsec, 2.829990489 * u.MJy / u.sr)],
"bounding_box": False,
},
{
"class": Planar2D,
"parameters": {"slope_x": 2 * u.m, "slope_y": 3 * u.m, "intercept": 4 * u.m},
"evaluation": [(5 * u.m / u.m, 6 * u.m / u.m, 32 * u.m)],
"bounding_box": False,
},
]
POWERLAW_MODELS = [
{
"class": PowerLaw1D,
"parameters": {"amplitude": 5 * u.kg, "x_0": 10 * u.cm, "alpha": 1},
"evaluation": [(1 * u.m, 500 * u.g)],
"bounding_box": False,
},
{
"class": BrokenPowerLaw1D,
"parameters": {
"amplitude": 5 * u.kg,
"x_break": 10 * u.cm,
"alpha_1": 1,
"alpha_2": -1,
},
"evaluation": [(1 * u.m, 50 * u.kg), (1 * u.cm, 50 * u.kg)],
"bounding_box": False,
},
{
"class": SmoothlyBrokenPowerLaw1D,
"parameters": {
"amplitude": 5 * u.kg,
"x_break": 10 * u.cm,
"alpha_1": 1,
"alpha_2": -1,
"delta": 1,
},
"evaluation": [(1 * u.cm, 15.125 * u.kg), (1 * u.m, 15.125 * u.kg)],
"bounding_box": False,
},
{
"class": ExponentialCutoffPowerLaw1D,
"parameters": {
"amplitude": 5 * u.kg,
"x_0": 10 * u.cm,
"alpha": 1,
"x_cutoff": 1 * u.m,
},
"evaluation": [(1 * u.um, 499999.5 * u.kg), (10 * u.m, 50 * np.exp(-10) * u.g)],
"bounding_box": False,
},
{
"class": LogParabola1D,
"parameters": {"amplitude": 5 * u.kg, "x_0": 10 * u.cm, "alpha": 1, "beta": 2},
"evaluation": [(1 * u.cm, 5 * 0.1 ** (-1 - 2 * np.log(0.1)) * u.kg)],
"bounding_box": False,
},
{
"class": Schechter1D,
"parameters": {
"phi_star": 1.0e-4 * (u.Mpc**-3),
"m_star": -20.0 * u.ABmag,
"alpha": -1.9,
},
"evaluation": [(-23 * u.ABmag, 1.002702276867279e-12 * (u.Mpc**-3))],
"bounding_box": False,
},
]
POLY_MODELS = [
{
"class": Polynomial1D,
"parameters": {"degree": 2, "c0": 3 * u.one, "c1": 2 / u.m, "c2": 3 / u.m**2},
"evaluation": [(3 * u.m, 36 * u.one)],
"bounding_box": False,
},
{
"class": Polynomial1D,
"parameters": {
"degree": 2,
"c0": 3 * u.kg,
"c1": 2 * u.kg / u.m,
"c2": 3 * u.kg / u.m**2,
},
"evaluation": [(3 * u.m, 36 * u.kg)],
"bounding_box": False,
},
{
"class": Polynomial1D,
"parameters": {"degree": 2, "c0": 3 * u.kg, "c1": 2 * u.kg, "c2": 3 * u.kg},
"evaluation": [(3 * u.one, 36 * u.kg)],
"bounding_box": False,
},
{
"class": Polynomial2D,
"parameters": {
"degree": 2,
"c0_0": 3 * u.one,
"c1_0": 2 / u.m,
"c2_0": 3 / u.m**2,
"c0_1": 3 / u.s,
"c0_2": -2 / u.s**2,
"c1_1": 5 / u.m / u.s,
},
"evaluation": [(3 * u.m, 2 * u.s, 64 * u.one)],
"bounding_box": False,
},
{
"class": Polynomial2D,
"parameters": {
"degree": 2,
"c0_0": 3 * u.kg,
"c1_0": 2 * u.kg / u.m,
"c2_0": 3 * u.kg / u.m**2,
"c0_1": 3 * u.kg / u.s,
"c0_2": -2 * u.kg / u.s**2,
"c1_1": 5 * u.kg / u.m / u.s,
},
"evaluation": [(3 * u.m, 2 * u.s, 64 * u.kg)],
"bounding_box": False,
},
{
"class": Polynomial2D,
"parameters": {
"degree": 2,
"c0_0": 3 * u.kg,
"c1_0": 2 * u.kg,
"c2_0": 3 * u.kg,
"c0_1": 3 * u.kg,
"c0_2": -2 * u.kg,
"c1_1": 5 * u.kg,
},
"evaluation": [(3 * u.one, 2 * u.one, 64 * u.kg)],
"bounding_box": False,
},
]
MODELS = (
FUNC_MODELS_1D
+ SCALE_MODELS
+ FUNC_MODELS_2D
+ POWERLAW_MODELS
+ PHYS_MODELS_1D
+ POLY_MODELS
)
SCIPY_MODELS = {Sersic1D, Sersic2D, AiryDisk2D}
# These models will fail fitting test, because built in fitting data
# will produce non-finite values
NON_FINITE_LevMar_MODELS = [
Sersic1D,
ArcSine1D,
ArcCosine1D,
PowerLaw1D,
ExponentialCutoffPowerLaw1D,
BrokenPowerLaw1D,
LogParabola1D,
Schechter1D,
]
# These models will fail the TRFLSQFitter fitting test due to non-finite
NON_FINITE_TRF_MODELS = [
ArcSine1D,
ArcCosine1D,
Sersic1D,
Sersic2D,
PowerLaw1D,
ExponentialCutoffPowerLaw1D,
BrokenPowerLaw1D,
]
# These models will fail the LMLSQFitter fitting test due to non-finite
NON_FINITE_LM_MODELS = [
Sersic1D,
ArcSine1D,
ArcCosine1D,
PowerLaw1D,
LogParabola1D,
Schechter1D,
ExponentialCutoffPowerLaw1D,
BrokenPowerLaw1D,
]
# These models will fail the DogBoxLSQFitter fitting test due to non-finite
NON_FINITE_DogBox_MODELS = [
Sersic1D,
Sersic2D,
ArcSine1D,
ArcCosine1D,
SmoothlyBrokenPowerLaw1D,
ExponentialCutoffPowerLaw1D,
LogParabola1D,
]
@pytest.mark.parametrize("model", MODELS)
@pytest.mark.filterwarnings(r"ignore:humlicek2 has been deprecated since .*")
def test_models_evaluate_without_units(model):
if not HAS_SCIPY and model["class"] in SCIPY_MODELS:
pytest.skip()
m = model["class"](**model["parameters"])
for args in model["evaluation"]:
if len(args) == 2:
kwargs = dict(zip(("x", "y"), args))
else:
kwargs = dict(zip(("x", "y", "z"), args))
if kwargs["x"].unit.is_equivalent(kwargs["y"].unit):
kwargs["x"] = kwargs["x"].to(kwargs["y"].unit)
mnu = m.without_units_for_data(**kwargs)
args = [x.value for x in kwargs.values()]
assert_quantity_allclose(mnu(*args[:-1]), args[-1])
@pytest.mark.parametrize("model", MODELS)
@pytest.mark.filterwarnings(r"ignore:humlicek2 has been deprecated since .*")
def test_models_evaluate_with_units(model):
if not HAS_SCIPY and model["class"] in SCIPY_MODELS:
pytest.skip()
m = model["class"](**model["parameters"])
for args in model["evaluation"]:
assert_quantity_allclose(m(*args[:-1]), args[-1])
@pytest.mark.parametrize("model", MODELS)
@pytest.mark.filterwarnings(r"ignore:humlicek2 has been deprecated since .*")
def test_models_evaluate_with_units_x_array(model):
if not HAS_SCIPY and model["class"] in SCIPY_MODELS:
pytest.skip()
m = model["class"](**model["parameters"])
for args in model["evaluation"]:
if len(args) == 2:
x, y = args
x_arr = u.Quantity([x, x], subok=True)
result = m(x_arr)
assert_quantity_allclose(result, u.Quantity([y, y], subok=True))
else:
x, y, z = args
x_arr = u.Quantity([x, x])
y_arr = u.Quantity([y, y])
result = m(x_arr, y_arr)
assert_quantity_allclose(result, u.Quantity([z, z]))
@pytest.mark.parametrize("model", MODELS)
@pytest.mark.filterwarnings(r"ignore:humlicek2 has been deprecated since .*")
def test_models_evaluate_with_units_param_array(model):
if not HAS_SCIPY and model["class"] in SCIPY_MODELS:
pytest.skip()
params = {}
for key, value in model["parameters"].items():
if value is None or key in ("degree", "method"):
params[key] = value
else:
params[key] = np.repeat(value, 2)
params["n_models"] = 2
m = model["class"](**params)
for args in model["evaluation"]:
if len(args) == 2:
x, y = args
x_arr = u.Quantity([x, x], subok=True)
result = m(x_arr)
assert_quantity_allclose(result, u.Quantity([y, y], subok=True))
else:
x, y, z = args
x_arr = u.Quantity([x, x])
y_arr = u.Quantity([y, y])
result = m(x_arr, y_arr)
assert_quantity_allclose(result, u.Quantity([z, z]))
if model["class"] == Drude1D:
params["x_0"][-1] = 0 * u.AA
MESSAGE = r"0 is not an allowed value for x_0"
with pytest.raises(InputParameterError, match=MESSAGE):
model["class"](**params)
@pytest.mark.parametrize("model", MODELS)
@pytest.mark.filterwarnings(r"ignore:humlicek2 has been deprecated since .*")
def test_models_bounding_box(model):
# In some cases, having units in parameters caused bounding_box to break,
# so this is to ensure that it works correctly.
if not HAS_SCIPY and model["class"] in SCIPY_MODELS:
pytest.skip()
m = model["class"](**model["parameters"])
# In the following we need to explicitly test that the value is False
# since Quantities no longer evaluate as as True
if model["bounding_box"] is False:
# Check that NotImplementedError is raised, so that if bounding_box is
# implemented we remember to set bounding_box=True in the list of models
# above
MESSAGE = r"No bounding box is defined for this model"
with pytest.raises(NotImplementedError, match=MESSAGE):
m.bounding_box
else:
# A bounding box may have inhomogeneous units so we need to check the
# values one by one.
for i in range(len(model["bounding_box"])):
bbox = m.bounding_box
if isinstance(bbox, ModelBoundingBox):
bbox = bbox.bounding_box()
assert_quantity_allclose(bbox[i], model["bounding_box"][i])
@pytest.mark.parametrize("model", MODELS)
@pytest.mark.filterwarnings(r"ignore:humlicek2 has been deprecated since .*")
def test_compound_model_input_units_equivalencies_defaults(model):
m = model["class"](**model["parameters"])
assert m.input_units_equivalencies is None
compound_model = m + m
assert compound_model.inputs_map()["x"][0].input_units_equivalencies is None
fixed_input_model = fix_inputs(compound_model, {"x": 1})
assert fixed_input_model.input_units_equivalencies is None
compound_model = m - m
assert compound_model.inputs_map()["x"][0].input_units_equivalencies is None
fixed_input_model = fix_inputs(compound_model, {"x": 1})
assert fixed_input_model.input_units_equivalencies is None
compound_model = m & m
assert compound_model.inputs_map()["x1"][0].input_units_equivalencies is None
fixed_input_model = fix_inputs(compound_model, {"x0": 1})
assert fixed_input_model.inputs_map()["x1"][0].input_units_equivalencies is None
assert fixed_input_model.input_units_equivalencies is None
if m.n_outputs == m.n_inputs:
compound_model = m | m
assert compound_model.inputs_map()["x"][0].input_units_equivalencies is None
fixed_input_model = fix_inputs(compound_model, {"x": 1})
assert fixed_input_model.input_units_equivalencies is None
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.filterwarnings(r"ignore:.*:RuntimeWarning")
@pytest.mark.filterwarnings(r"ignore:Model is linear in parameters.*")
@pytest.mark.filterwarnings(r"ignore:The fit may be unsuccessful.*")
@pytest.mark.filterwarnings(r"ignore:humlicek2 has been deprecated since .*")
@pytest.mark.parametrize("model", MODELS)
@pytest.mark.parametrize("fitter", fitters)
def test_models_fitting(model, fitter):
fitter = fitter()
bad_voigt = model["class"] == Voigt1D and ("method" not in model["parameters"])
if (
(
isinstance(fitter, LevMarLSQFitter)
and model["class"] in NON_FINITE_LevMar_MODELS
)
or (
isinstance(fitter, TRFLSQFitter)
and (model["class"] in NON_FINITE_TRF_MODELS or bad_voigt)
)
or (
isinstance(fitter, LMLSQFitter)
and (model["class"] in NON_FINITE_LM_MODELS or bad_voigt)
)
or (
isinstance(fitter, DogBoxLSQFitter)
and model["class"] in NON_FINITE_DogBox_MODELS
)
):
return
m = model["class"](**model["parameters"])
if len(model["evaluation"][0]) == 2:
x = np.linspace(1, 3, 100) * model["evaluation"][0][0].unit
y = np.exp(-x.value**2) * model["evaluation"][0][1].unit
args = [x, y]
else:
x = np.linspace(1, 3, 100) * model["evaluation"][0][0].unit
y = np.linspace(1, 3, 100) * model["evaluation"][0][1].unit
z = np.exp(-x.value**2 - y.value**2) * model["evaluation"][0][2].unit
args = [x, y, z]
# Test that the model fits even if it has units on parameters
m_new = fitter(m, *args)
# Check that units have been put back correctly
for param_name in m.param_names:
par_bef = getattr(m, param_name)
par_aft = getattr(m_new, param_name)
if par_bef.unit is None:
# If the parameter used to not have a unit then had a radian unit
# for example, then we should allow that
assert par_aft.unit is None or par_aft.unit is u.rad
else:
assert par_aft.unit.is_equivalent(par_bef.unit)
unit_mismatch_models = [
{
"class": Gaussian2D,
"parameters": {
"amplitude": 3 * u.Jy,
"x_mean": 2 * u.m,
"y_mean": 1 * u.m,
"x_stddev": 3 * u.m,
"y_stddev": 2 * u.m,
"theta": 45 * u.deg,
},
"evaluation": [
(412.1320343 * u.cm, 3.121320343 * u.K, 3 * u.Jy * np.exp(-0.5)),
(412.1320343 * u.K, 3.121320343 * u.m, 3 * u.Jy * np.exp(-0.5)),
],
"bounding_box": [[-14.18257445, 16.18257445], [-10.75693665, 14.75693665]]
* u.m,
},
{
"class": Ellipse2D,
"parameters": {
"amplitude": 3 * u.Jy,
"x_0": 3 * u.m,
"y_0": 2 * u.m,
"a": 300 * u.cm,
"b": 200 * u.cm,
"theta": 45 * u.deg,
},
"evaluation": [(4 * u.m, 300 * u.K, 3 * u.Jy), (4 * u.K, 300 * u.cm, 3 * u.Jy)],
"bounding_box": [[-0.76046808, 4.76046808], [0.68055697, 5.31944302]] * u.m,
},
{
"class": Disk2D,
"parameters": {
"amplitude": 3 * u.Jy,
"x_0": 3 * u.m,
"y_0": 2 * u.m,
"R_0": 300 * u.cm,
},
"evaluation": [
(5.8 * u.m, 201 * u.K, 3 * u.Jy),
(5.8 * u.K, 201 * u.cm, 3 * u.Jy),
],
"bounding_box": [[-1, 5], [0, 6]] * u.m,
},
{
"class": Ring2D,
"parameters": {
"amplitude": 3 * u.Jy,
"x_0": 3 * u.m,
"y_0": 2 * u.m,
"r_in": 2 * u.cm,
"r_out": 2.1 * u.cm,
},
"evaluation": [
(302.05 * u.cm, 2 * u.K + 10 * u.K, 3 * u.Jy),
(302.05 * u.K, 2 * u.m + 10 * u.um, 3 * u.Jy),
],
"bounding_box": [[1.979, 2.021], [2.979, 3.021]] * u.m,
},
{
"class": TrapezoidDisk2D,
"parameters": {
"amplitude": 3 * u.Jy,
"x_0": 1 * u.m,
"y_0": 2 * u.m,
"R_0": 100 * u.cm,
"slope": 1 * u.Jy / u.m,
},
"evaluation": [
(3.5 * u.m, 2 * u.K, 1.5 * u.Jy),
(3.5 * u.K, 2 * u.m, 1.5 * u.Jy),
],
"bounding_box": [[-2, 6], [-3, 5]] * u.m,
},
{
"class": RickerWavelet2D,
"parameters": {
"amplitude": 3 * u.Jy,
"x_0": 3 * u.m,
"y_0": 2 * u.m,
"sigma": 1 * u.m,
},
"evaluation": [
(4 * u.m, 2.5 * u.K, 0.602169107 * u.Jy),
(4 * u.K, 2.5 * u.m, 0.602169107 * u.Jy),
],
"bounding_box": False,
},
{
"class": AiryDisk2D,
"parameters": {
"amplitude": 3 * u.Jy,
"x_0": 3 * u.m,
"y_0": 2 * u.m,
"radius": 1 * u.m,
},
"evaluation": [
(4 * u.m, 2.1 * u.K, 4.76998480e-05 * u.Jy),
(4 * u.K, 2.1 * u.m, 4.76998480e-05 * u.Jy),
],
"bounding_box": False,
},
{
"class": Moffat2D,
"parameters": {
"amplitude": 3 * u.Jy,
"x_0": 4.4 * u.um,
"y_0": 3.5 * u.um,
"gamma": 1e-3 * u.mm,
"alpha": 1,
},
"evaluation": [
(1000 * u.nm, 2 * u.K, 0.202565833 * u.Jy),
(1000 * u.K, 2 * u.um, 0.202565833 * u.Jy),
],
"bounding_box": False,
},
{
"class": Sersic2D,
"parameters": {
"amplitude": 3 * u.MJy / u.sr,
"x_0": 1 * u.arcsec,
"y_0": 2 * u.arcsec,
"r_eff": 2 * u.arcsec,
"n": 4,
"ellip": 0,
"theta": 0,
},
"evaluation": [
(3 * u.arcsec, 2.5 * u.m, 2.829990489 * u.MJy / u.sr),
(3 * u.m, 2.5 * u.arcsec, 2.829990489 * u.MJy / u.sr),
],
"bounding_box": False,
},
]
@pytest.mark.parametrize("model", unit_mismatch_models)
def test_input_unit_mismatch_error(model):
if not HAS_SCIPY and model["class"] in SCIPY_MODELS:
pytest.skip()
MESSAGE = "Units of 'x' and 'y' inputs should match"
m = model["class"](**model["parameters"])
for args in model["evaluation"]:
if len(args) == 2:
kwargs = dict(zip(("x", "y"), args))
else:
kwargs = dict(zip(("x", "y", "z"), args))
if kwargs["x"].unit.is_equivalent(kwargs["y"].unit):
kwargs["x"] = kwargs["x"].to(kwargs["y"].unit)
with pytest.raises(u.UnitsError, match=MESSAGE):
m.without_units_for_data(**kwargs)
mag_models = [
{
"class": Const1D,
"parameters": {"amplitude": 3 * u.ABmag},
"evaluation": [(0.6 * u.ABmag, 3 * u.ABmag)],
},
{
"class": Const1D,
"parameters": {"amplitude": 3 * u.ABmag},
"evaluation": [(0.6 * u.mag, 3 * u.ABmag)],
},
{
"class": Const1D,
"parameters": {"amplitude": 3 * u.mag},
"evaluation": [(0.6 * u.ABmag, 3 * u.mag)],
},
{
"class": Const1D,
"parameters": {"amplitude": 3 * u.mag},
"evaluation": [(0.6 * u.mag, 3 * u.mag)],
},
{
"class": Const2D,
"parameters": {"amplitude": 3 * u.ABmag},
"evaluation": [(0.6 * u.micron, 0.2 * u.m, 3 * u.ABmag)],
},
{
"class": Ellipse2D,
"parameters": {
"amplitude": 3 * u.ABmag,
"x_0": 3 * u.m,
"y_0": 2 * u.m,
"a": 300 * u.cm,
"b": 200 * u.cm,
"theta": 45 * u.deg,
},
"evaluation": [(4 * u.m, 300 * u.cm, 3 * u.ABmag)],
},
{
"class": Disk2D,
"parameters": {
"amplitude": 3 * u.ABmag,
"x_0": 3 * u.m,
"y_0": 2 * u.m,
"R_0": 300 * u.cm,
},
"evaluation": [(5.8 * u.m, 201 * u.cm, 3 * u.ABmag)],
},
{
"class": Ring2D,
"parameters": {
"amplitude": 3 * u.ABmag,
"x_0": 3 * u.m,
"y_0": 2 * u.m,
"r_in": 2 * u.cm,
"r_out": 2.1 * u.cm,
},
"evaluation": [(302.05 * u.cm, 2 * u.m + 10 * u.um, 3 * u.ABmag)],
},
{
"class": Box2D,
"parameters": {
"amplitude": 3 * u.ABmag,
"x_0": 3 * u.m,
"y_0": 2 * u.s,
"x_width": 4 * u.cm,
"y_width": 3 * u.s,
},
"evaluation": [(301 * u.cm, 3 * u.s, 3 * u.ABmag)],
},
{
"class": SmoothlyBrokenPowerLaw1D,
"parameters": {
"amplitude": 5 * u.ABmag,
"x_break": 10 * u.cm,
"alpha_1": 1,
"alpha_2": -1,
"delta": 1,
},
"evaluation": [(1 * u.cm, 15.125 * u.ABmag), (1 * u.m, 15.125 * u.ABmag)],
},
{
"class": Box1D,
"parameters": {"amplitude": 3 * u.ABmag, "x_0": 4.4 * u.um, "width": 1 * u.um},
"evaluation": [(4200 * u.nm, 3 * u.ABmag), (1 * u.m, 0 * u.ABmag)],
"bounding_box": [3.9, 4.9] * u.um,
},
{
"class": Schechter1D,
"parameters": {
"phi_star": 1.0e-4 * (u.Mpc**-3),
"m_star": -20.0 * u.ABmag,
"alpha": -1.9,
},
"evaluation": [(-23 * u.ABmag, 1.002702276867279e-12 * (u.Mpc**-3))],
},
{
"class": Schechter1D,
"parameters": {
"phi_star": 1.0e-4 * (u.Mpc**-3),
"m_star": -20.0 * u.mag,
"alpha": -1.9,
},
"evaluation": [(-23 * u.mag, 1.002702276867279e-12 * (u.Mpc**-3))],
},
]
@pytest.mark.parametrize("model", mag_models)
def test_models_evaluate_magunits(model):
if not HAS_SCIPY and model["class"] in SCIPY_MODELS:
pytest.skip()
m = model["class"](**model["parameters"])
for args in model["evaluation"]:
assert_quantity_allclose(m(*args[:-1]), args[-1])
def test_Schechter1D_errors():
# Non magnitude units are bad
model = Schechter1D(
phi_star=1.0e-4 * (u.Mpc**-3), m_star=-20.0 * u.km, alpha=-1.9
)
MESSAGE = r"The units of magnitude and m_star must be a magnitude"
with pytest.raises(u.UnitsError, match=MESSAGE):
model(-23 * u.km)
# Differing magnitude systems are bad
model = Schechter1D(
phi_star=1.0e-4 * (u.Mpc**-3), m_star=-20.0 * u.ABmag, alpha=-1.9
)
MESSAGE = (
r".*: Units of input 'x', .*, could not be converted to required input units"
r" of .*"
)
with pytest.raises(u.UnitsError, match=MESSAGE):
model(-23 * u.STmag)
# Differing magnitude systems are bad
model = Schechter1D(
phi_star=1.0e-4 * (u.Mpc**-3), m_star=-20.0 * u.ABmag, alpha=-1.9
)
with pytest.raises(u.UnitsError, match=MESSAGE):
model(-23 * u.mag)
|
736741340f6b72746c04621825fbffa81deec80143bd562a5cccccba9a073499 | import threading
import time
import xmlrpc.client as xmlrpc
from astropy.samp.client import SAMPClient
from astropy.samp.errors import SAMPClientError, SAMPHubError
from astropy.samp.hub import WebProfileDialog
from astropy.samp.hub_proxy import SAMPHubProxy
from astropy.samp.integrated_client import SAMPIntegratedClient
from astropy.samp.utils import ServerProxyPool
class AlwaysApproveWebProfileDialog(WebProfileDialog):
def __init__(self):
self.polling = True
WebProfileDialog.__init__(self)
def show_dialog(self, *args):
self.consent()
def poll(self):
while self.polling:
self.handle_queue()
time.sleep(0.1)
def stop(self):
self.polling = False
class SAMPWebHubProxy(SAMPHubProxy):
"""
Proxy class to simplify the client interaction with a SAMP hub (via the web
profile).
In practice web clients should run from the browser, so this is provided as
a means of testing a hub's support for the web profile from Python.
"""
def connect(self, pool_size=20, web_port=21012):
"""
Connect to the current SAMP Hub on localhost:web_port.
Parameters
----------
pool_size : int, optional
The number of socket connections opened to communicate with the
Hub.
"""
self._connected = False
try:
self.proxy = ServerProxyPool(
pool_size,
xmlrpc.ServerProxy,
f"http://127.0.0.1:{web_port}",
allow_none=1,
)
self.ping()
self._connected = True
except xmlrpc.ProtocolError as p:
raise SAMPHubError(f"Protocol Error {p.errcode}: {p.errmsg}")
@property
def _samp_hub(self):
"""
Property to abstract away the path to the hub, which allows this class
to be used for both the standard and the web profile.
"""
return self.proxy.samp.webhub
def set_xmlrpc_callback(self, private_key, xmlrpc_addr):
raise NotImplementedError(
"set_xmlrpc_callback is not defined for the web profile"
)
def register(self, identity_info):
"""
Proxy to ``register`` SAMP Hub method.
"""
return self._samp_hub.register(identity_info)
def allow_reverse_callbacks(self, private_key, allow):
"""
Proxy to ``allowReverseCallbacks`` SAMP Hub method.
"""
return self._samp_hub.allowReverseCallbacks(private_key, allow)
def pull_callbacks(self, private_key, timeout):
"""
Proxy to ``pullCallbacks`` SAMP Hub method.
"""
return self._samp_hub.pullCallbacks(private_key, timeout)
class SAMPWebClient(SAMPClient):
"""
Utility class which provides facilities to create and manage a SAMP
compliant XML-RPC server that acts as SAMP callable web client application.
In practice web clients should run from the browser, so this is provided as
a means of testing a hub's support for the web profile from Python.
Parameters
----------
hub : :class:`~astropy.samp.hub_proxy.SAMPWebHubProxy`
An instance of :class:`~astropy.samp.hub_proxy.SAMPWebHubProxy` to
be used for messaging with the SAMP Hub.
name : str, optional
Client name (corresponding to ``samp.name`` metadata keyword).
description : str, optional
Client description (corresponding to ``samp.description.text`` metadata
keyword).
metadata : dict, optional
Client application metadata in the standard SAMP format.
callable : bool, optional
Whether the client can receive calls and notifications. If set to
`False`, then the client can send notifications and calls, but can not
receive any.
"""
def __init__(self, hub, name=None, description=None, metadata=None, callable=True):
# GENERAL
self._is_running = False
self._is_registered = False
if metadata is None:
metadata = {}
if name is not None:
metadata["samp.name"] = name
if description is not None:
metadata["samp.description.text"] = description
self._metadata = metadata
self._callable = callable
# HUB INTERACTION
self.client = None
self._public_id = None
self._private_key = None
self._hub_id = None
self._notification_bindings = {}
self._call_bindings = {
"samp.app.ping": [self._ping, {}],
"client.env.get": [self._client_env_get, {}],
}
self._response_bindings = {}
self.hub = hub
self._registration_lock = threading.Lock()
self._registered_event = threading.Event()
if self._callable:
self._thread = threading.Thread(target=self._serve_forever)
self._thread.daemon = True
def _serve_forever(self):
while self.is_running:
# Wait until we are actually registered before trying to do
# anything, to avoid busy looping
# Watch for callbacks here
self._registered_event.wait()
with self._registration_lock:
if not self._is_registered:
return
results = self.hub.pull_callbacks(self.get_private_key(), 0)
for result in results:
if result["samp.methodName"] == "receiveNotification":
self.receive_notification(
self._private_key, *result["samp.params"]
)
elif result["samp.methodName"] == "receiveCall":
self.receive_call(self._private_key, *result["samp.params"])
elif result["samp.methodName"] == "receiveResponse":
self.receive_response(self._private_key, *result["samp.params"])
self.hub.disconnect()
def register(self):
"""
Register the client to the SAMP Hub.
"""
if self.hub.is_connected:
if self._private_key is not None:
raise SAMPClientError("Client already registered")
result = self.hub.register("Astropy SAMP Web Client")
if result["samp.self-id"] == "":
raise SAMPClientError(
"Registration failed - samp.self-id was not set by the hub."
)
if result["samp.private-key"] == "":
raise SAMPClientError(
"Registration failed - samp.private-key was not set by the hub."
)
self._public_id = result["samp.self-id"]
self._private_key = result["samp.private-key"]
self._hub_id = result["samp.hub-id"]
if self._callable:
self._declare_subscriptions()
self.hub.allow_reverse_callbacks(self._private_key, True)
if self._metadata != {}:
self.declare_metadata()
self._is_registered = True
# Let the client thread proceed
self._registered_event.set()
else:
raise SAMPClientError(
"Unable to register to the SAMP Hub. Hub proxy not connected."
)
def unregister(self):
# We have to hold the registration lock if the client is callable
# to avoid a race condition where the client queries the hub for
# pushCallbacks after it has already been unregistered from the hub
with self._registration_lock:
super().unregister()
class SAMPIntegratedWebClient(SAMPIntegratedClient):
"""
A Simple SAMP web client.
In practice web clients should run from the browser, so this is provided as
a means of testing a hub's support for the web profile from Python.
This class is meant to simplify the client usage providing a proxy class
that merges the :class:`~astropy.samp.client.SAMPWebClient` and
:class:`~astropy.samp.hub_proxy.SAMPWebHubProxy` functionalities in a
simplified API.
Parameters
----------
name : str, optional
Client name (corresponding to ``samp.name`` metadata keyword).
description : str, optional
Client description (corresponding to ``samp.description.text`` metadata
keyword).
metadata : dict, optional
Client application metadata in the standard SAMP format.
callable : bool, optional
Whether the client can receive calls and notifications. If set to
`False`, then the client can send notifications and calls, but can not
receive any.
"""
def __init__(self, name=None, description=None, metadata=None, callable=True):
self.hub = SAMPWebHubProxy()
self.client = SAMPWebClient(self.hub, name, description, metadata, callable)
def connect(self, pool_size=20, web_port=21012):
"""
Connect with the current or specified SAMP Hub, start and register the
client.
Parameters
----------
pool_size : int, optional
The number of socket connections opened to communicate with the
Hub.
"""
self.hub.connect(pool_size, web_port=web_port)
self.client.start()
self.client.register()
|
e692d89b6cb836fc341000b4852d3ae71044f58de83c4b875949fe9878ab88d5 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
The astropy.utils.iers package provides access to the tables provided by
the International Earth Rotation and Reference Systems Service, in
particular allowing interpolation of published UT1-UTC values for given
times. These are used in `astropy.time` to provide UT1 values. The polar
motions are also used for determining earth orientation for
celestial-to-terrestrial coordinate transformations
(in `astropy.coordinates`).
"""
import re
from datetime import datetime
from urllib.parse import urlparse
from warnings import warn
import erfa
import numpy as np
from astropy import config as _config
from astropy import units as u
from astropy import utils
from astropy.table import MaskedColumn, QTable
from astropy.time import Time, TimeDelta
from astropy.utils.data import (
clear_download_cache,
get_pkg_data_filename,
get_readable_fileobj,
is_url_in_cache,
)
from astropy.utils.exceptions import AstropyWarning
from astropy.utils.state import ScienceState
__all__ = [
"Conf",
"conf",
"earth_orientation_table",
"IERS",
"IERS_B",
"IERS_A",
"IERS_Auto",
"FROM_IERS_B",
"FROM_IERS_A",
"FROM_IERS_A_PREDICTION",
"TIME_BEFORE_IERS_RANGE",
"TIME_BEYOND_IERS_RANGE",
"IERS_A_FILE",
"IERS_A_URL",
"IERS_A_URL_MIRROR",
"IERS_A_README",
"IERS_B_FILE",
"IERS_B_URL",
"IERS_B_README",
"IERSRangeError",
"IERSStaleWarning",
"IERSWarning",
"IERSDegradedAccuracyWarning",
"LeapSeconds",
"IERS_LEAP_SECOND_FILE",
"IERS_LEAP_SECOND_URL",
"IETF_LEAP_SECOND_URL",
]
# IERS-A default file name, URL, and ReadMe with content description
IERS_A_FILE = "finals2000A.all"
IERS_A_URL = "https://datacenter.iers.org/data/9/finals2000A.all"
IERS_A_URL_MIRROR = "https://maia.usno.navy.mil/ser7/finals2000A.all"
IERS_A_README = get_pkg_data_filename("data/ReadMe.finals2000A")
# IERS-B default file name, URL, and ReadMe with content description
IERS_B_FILE = get_pkg_data_filename("data/eopc04.1962-now")
IERS_B_URL = "https://hpiers.obspm.fr/iers/eop/eopc04/eopc04.1962-now"
IERS_B_README = get_pkg_data_filename("data/ReadMe.eopc04")
# LEAP SECONDS default file name, URL, and alternative format/URL
IERS_LEAP_SECOND_FILE = get_pkg_data_filename("data/Leap_Second.dat")
IERS_LEAP_SECOND_URL = "https://hpiers.obspm.fr/iers/bul/bulc/Leap_Second.dat"
IETF_LEAP_SECOND_URL = "https://www.ietf.org/timezones/data/leap-seconds.list"
# Status/source values returned by IERS.ut1_utc
FROM_IERS_B = 0
FROM_IERS_A = 1
FROM_IERS_A_PREDICTION = 2
TIME_BEFORE_IERS_RANGE = -1
TIME_BEYOND_IERS_RANGE = -2
MJD_ZERO = 2400000.5
INTERPOLATE_ERROR = """\
interpolating from IERS_Auto using predictive values that are more
than {0} days old.
Normally you should not see this error because this class
automatically downloads the latest IERS-A table. Perhaps you are
offline? If you understand what you are doing then this error can be
suppressed by setting the auto_max_age configuration variable to
``None``:
from astropy.utils.iers import conf
conf.auto_max_age = None
"""
MONTH_ABBR = [
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sep",
"Oct",
"Nov",
"Dec",
]
class IERSWarning(AstropyWarning):
"""
Generic warning class for IERS.
"""
class IERSDegradedAccuracyWarning(AstropyWarning):
"""
IERS time conversion has degraded accuracy normally due to setting
``conf.auto_download = False`` and ``conf.iers_degraded_accuracy = 'warn'``.
"""
class IERSStaleWarning(IERSWarning):
"""
Downloaded IERS table may be stale.
"""
def download_file(*args, **kwargs):
"""
Overload astropy.utils.data.download_file within iers module to use a
custom (longer) wait time. This just passes through ``*args`` and
``**kwargs`` after temporarily setting the download_file remote timeout to
the local ``iers.conf.remote_timeout`` value.
"""
kwargs.setdefault(
"http_headers",
{
"User-Agent": "astropy/iers",
"Accept": "*/*",
},
)
with utils.data.conf.set_temp("remote_timeout", conf.remote_timeout):
return utils.data.download_file(*args, **kwargs)
def _none_to_float(value):
"""
Convert None to a valid floating point value. Especially
for auto_max_age = None.
"""
return value if value is not None else np.finfo(float).max
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy.utils.iers`.
"""
auto_download = _config.ConfigItem(
True,
"Enable auto-downloading of the latest IERS data. If set to False "
"then the local IERS-B file will be used by default (even if the "
"full IERS file with predictions was already downloaded and cached). "
"This parameter also controls whether internet resources will be "
"queried to update the leap second table if the installed version is "
"out of date. Default is True.",
)
auto_max_age = _config.ConfigItem(
30.0,
"Maximum age (days) of predictive data before auto-downloading. "
'See "Auto refresh behavior" in astropy.utils.iers documentation for details. '
"Default is 30.",
)
iers_auto_url = _config.ConfigItem(
IERS_A_URL, "URL for auto-downloading IERS file data."
)
iers_auto_url_mirror = _config.ConfigItem(
IERS_A_URL_MIRROR, "Mirror URL for auto-downloading IERS file data."
)
remote_timeout = _config.ConfigItem(
10.0, "Remote timeout downloading IERS file data (seconds)."
)
iers_degraded_accuracy = _config.ConfigItem(
["error", "warn", "ignore"],
"IERS behavior if the range of available IERS data does not "
"cover the times when converting time scales, potentially leading "
"to degraded accuracy.",
)
system_leap_second_file = _config.ConfigItem("", "System file with leap seconds.")
iers_leap_second_auto_url = _config.ConfigItem(
IERS_LEAP_SECOND_URL, "URL for auto-downloading leap seconds."
)
ietf_leap_second_auto_url = _config.ConfigItem(
IETF_LEAP_SECOND_URL, "Alternate URL for auto-downloading leap seconds."
)
conf = Conf()
class IERSRangeError(IndexError):
"""
Any error for when dates are outside of the valid range for IERS.
"""
class IERS(QTable):
"""Generic IERS table class, defining interpolation functions.
Sub-classed from `astropy.table.QTable`. The table should hold columns
'MJD', 'UT1_UTC', 'dX_2000A'/'dY_2000A', and 'PM_x'/'PM_y'.
"""
iers_table = None
"""Cached table, returned if ``open`` is called without arguments."""
@classmethod
def open(cls, file=None, cache=False, **kwargs):
"""Open an IERS table, reading it from a file if not loaded before.
Parameters
----------
file : str or None
full local or network path to the ascii file holding IERS data,
for passing on to the ``read`` class methods (further optional
arguments that are available for some IERS subclasses can be added).
If None, use the default location from the ``read`` class method.
cache : bool
Whether to use cache. Defaults to False, since IERS files
are regularly updated.
Returns
-------
IERS
An IERS table class instance
Notes
-----
On the first call in a session, the table will be memoized (in the
``iers_table`` class attribute), and further calls to ``open`` will
return this stored table if ``file=None`` (the default).
If a table needs to be re-read from disk, pass on an explicit file
location or use the (sub-class) close method and re-open.
If the location is a network location it is first downloaded via
download_file.
For the IERS class itself, an IERS_B sub-class instance is opened.
"""
if file is not None or cls.iers_table is None:
if file is not None:
if urlparse(file).netloc:
kwargs.update(file=download_file(file, cache=cache))
else:
kwargs.update(file=file)
# TODO: the below is really ugly and probably a bad idea. Instead,
# there should probably be an IERSBase class, which provides
# useful methods but cannot really be used on its own, and then
# *perhaps* an IERS class which provides best defaults. But for
# backwards compatibility, we use the IERS_B reader for IERS here.
if cls is IERS:
cls.iers_table = IERS_B.read(**kwargs)
else:
cls.iers_table = cls.read(**kwargs)
return cls.iers_table
@classmethod
def close(cls):
"""Remove the IERS table from the class.
This allows the table to be re-read from disk during one's session
(e.g., if one finds it is out of date and has updated the file).
"""
cls.iers_table = None
def mjd_utc(self, jd1, jd2=0.0):
"""Turn a time to MJD, returning integer and fractional parts.
Parameters
----------
jd1 : float, array, or `~astropy.time.Time`
first part of two-part JD, or Time object
jd2 : float or array, optional
second part of two-part JD.
Default is 0., ignored if jd1 is `~astropy.time.Time`.
Returns
-------
mjd : float or array
integer part of MJD
utc : float or array
fractional part of MJD
"""
try: # see if this is a Time object
jd1, jd2 = jd1.utc.jd1, jd1.utc.jd2
except Exception:
pass
mjd = np.floor(jd1 - MJD_ZERO + jd2)
utc = jd1 - (MJD_ZERO + mjd) + jd2
return mjd, utc
def ut1_utc(self, jd1, jd2=0.0, return_status=False):
"""Interpolate UT1-UTC corrections in IERS Table for given dates.
Parameters
----------
jd1 : float, array of float, or `~astropy.time.Time` object
first part of two-part JD, or Time object
jd2 : float or float array, optional
second part of two-part JD.
Default is 0., ignored if jd1 is `~astropy.time.Time`.
return_status : bool
Whether to return status values. If False (default),
raise ``IERSRangeError`` if any time is out of the range covered
by the IERS table.
Returns
-------
ut1_utc : float or float array
UT1-UTC, interpolated in IERS Table
status : int or int array
Status values (if ``return_status``=``True``)::
``iers.FROM_IERS_B``
``iers.FROM_IERS_A``
``iers.FROM_IERS_A_PREDICTION``
``iers.TIME_BEFORE_IERS_RANGE``
``iers.TIME_BEYOND_IERS_RANGE``
"""
return self._interpolate(
jd1, jd2, ["UT1_UTC"], self.ut1_utc_source if return_status else None
)
def dcip_xy(self, jd1, jd2=0.0, return_status=False):
"""Interpolate CIP corrections in IERS Table for given dates.
Parameters
----------
jd1 : float, array of float, or `~astropy.time.Time` object
first part of two-part JD, or Time object
jd2 : float or float array, optional
second part of two-part JD (default 0., ignored if jd1 is Time)
return_status : bool
Whether to return status values. If False (default),
raise ``IERSRangeError`` if any time is out of the range covered
by the IERS table.
Returns
-------
D_x : `~astropy.units.Quantity` ['angle']
x component of CIP correction for the requested times.
D_y : `~astropy.units.Quantity` ['angle']
y component of CIP correction for the requested times
status : int or int array
Status values (if ``return_status``=``True``)::
``iers.FROM_IERS_B``
``iers.FROM_IERS_A``
``iers.FROM_IERS_A_PREDICTION``
``iers.TIME_BEFORE_IERS_RANGE``
``iers.TIME_BEYOND_IERS_RANGE``
"""
return self._interpolate(
jd1,
jd2,
["dX_2000A", "dY_2000A"],
self.dcip_source if return_status else None,
)
def pm_xy(self, jd1, jd2=0.0, return_status=False):
"""Interpolate polar motions from IERS Table for given dates.
Parameters
----------
jd1 : float, array of float, or `~astropy.time.Time` object
first part of two-part JD, or Time object
jd2 : float or float array, optional
second part of two-part JD.
Default is 0., ignored if jd1 is `~astropy.time.Time`.
return_status : bool
Whether to return status values. If False (default),
raise ``IERSRangeError`` if any time is out of the range covered
by the IERS table.
Returns
-------
PM_x : `~astropy.units.Quantity` ['angle']
x component of polar motion for the requested times.
PM_y : `~astropy.units.Quantity` ['angle']
y component of polar motion for the requested times.
status : int or int array
Status values (if ``return_status``=``True``)::
``iers.FROM_IERS_B``
``iers.FROM_IERS_A``
``iers.FROM_IERS_A_PREDICTION``
``iers.TIME_BEFORE_IERS_RANGE``
``iers.TIME_BEYOND_IERS_RANGE``
"""
return self._interpolate(
jd1, jd2, ["PM_x", "PM_y"], self.pm_source if return_status else None
)
def _check_interpolate_indices(self, indices_orig, indices_clipped, max_input_mjd):
"""
Check that the indices from interpolation match those after clipping
to the valid table range. This method gets overridden in the IERS_Auto
class because it has different requirements.
"""
if np.any(indices_orig != indices_clipped):
if conf.iers_degraded_accuracy == "error":
msg = (
"(some) times are outside of range covered by IERS table. Cannot"
" convert with full accuracy. To allow conversion with degraded"
" accuracy set astropy.utils.iers.conf.iers_degraded_accuracy to"
' "warn" or "silent". For more information about setting this'
" configuration parameter or controlling its value globally, see"
" the Astropy configuration system documentation"
" https://docs.astropy.org/en/stable/config/index.html."
)
raise IERSRangeError(msg)
elif conf.iers_degraded_accuracy == "warn":
# No IERS data covering the time(s) and user requested a warning.
msg = (
"(some) times are outside of range covered by IERS table, "
"accuracy is degraded."
)
warn(msg, IERSDegradedAccuracyWarning)
# No IERS data covering the time(s) and user is OK with no warning.
def _interpolate(self, jd1, jd2, columns, source=None):
mjd, utc = self.mjd_utc(jd1, jd2)
# enforce array
is_scalar = not hasattr(mjd, "__array__") or mjd.ndim == 0
if is_scalar:
mjd = np.array([mjd])
utc = np.array([utc])
elif mjd.size == 0:
# Short-cut empty input.
return np.array([])
self._refresh_table_as_needed(mjd)
# For typical format, will always find a match (since MJD are integer)
# hence, important to define which side we will be; this ensures
# self['MJD'][i-1]<=mjd<self['MJD'][i]
i = np.searchsorted(self["MJD"].value, mjd, side="right")
# Get index to MJD at or just below given mjd, clipping to ensure we
# stay in range of table (status will be set below for those outside)
i1 = np.clip(i, 1, len(self) - 1)
i0 = i1 - 1
mjd_0, mjd_1 = self["MJD"][i0].value, self["MJD"][i1].value
results = []
for column in columns:
val_0, val_1 = self[column][i0], self[column][i1]
d_val = val_1 - val_0
if column == "UT1_UTC":
# Check & correct for possible leap second (correcting diff.,
# not 1st point, since jump can only happen right at 2nd point)
d_val -= d_val.round()
# Linearly interpolate (which is what TEMPO does for UT1-UTC, but
# may want to follow IERS gazette #13 for more precise
# interpolation and correction for tidal effects;
# https://maia.usno.navy.mil/iers-gaz13)
val = val_0 + (mjd - mjd_0 + utc) / (mjd_1 - mjd_0) * d_val
# Do not extrapolate outside range, instead just propagate last values.
val[i == 0] = self[column][0]
val[i == len(self)] = self[column][-1]
if is_scalar:
val = val[0]
results.append(val)
if source:
# Set status to source, using the routine passed in.
status = source(i1)
# Check for out of range
status[i == 0] = TIME_BEFORE_IERS_RANGE
status[i == len(self)] = TIME_BEYOND_IERS_RANGE
if is_scalar:
status = status[0]
results.append(status)
return results
else:
self._check_interpolate_indices(i1, i, np.max(mjd))
return results[0] if len(results) == 1 else results
def _refresh_table_as_needed(self, mjd):
"""
Potentially update the IERS table in place depending on the requested
time values in ``mdj`` and the time span of the table. The base behavior
is not to update the table. ``IERS_Auto`` overrides this method.
"""
pass
def ut1_utc_source(self, i):
"""Source for UT1-UTC. To be overridden by subclass."""
return np.zeros_like(i)
def dcip_source(self, i):
"""Source for CIP correction. To be overridden by subclass."""
return np.zeros_like(i)
def pm_source(self, i):
"""Source for polar motion. To be overridden by subclass."""
return np.zeros_like(i)
@property
def time_now(self):
"""
Property to provide the current time, but also allow for explicitly setting
the _time_now attribute for testing purposes.
"""
try:
return self._time_now
except Exception:
return Time.now()
def _convert_col_for_table(self, col):
# Fill masked columns with units to avoid dropped-mask warnings
# when converting to Quantity.
# TODO: Once we support masked quantities, we can drop this and
# in the code below replace b_bad with table['UT1_UTC_B'].mask, etc.
if getattr(col, "unit", None) is not None and isinstance(col, MaskedColumn):
col = col.filled(np.nan)
return super()._convert_col_for_table(col)
class IERS_A(IERS):
"""IERS Table class targeted to IERS A, provided by USNO.
These include rapid turnaround and predicted times.
See https://datacenter.iers.org/eop.php
Notes
-----
The IERS A file is not part of astropy. It can be downloaded from
``iers.IERS_A_URL`` or ``iers.IERS_A_URL_MIRROR``. See ``iers.__doc__``
for instructions on use in ``Time``, etc.
"""
iers_table = None
@classmethod
def _combine_a_b_columns(cls, iers_a):
"""
Return a new table with appropriate combination of IERS_A and B columns.
"""
# IERS A has some rows at the end that hold nothing but dates & MJD
# presumably to be filled later. Exclude those a priori -- there
# should at least be a predicted UT1-UTC and PM!
table = iers_a[np.isfinite(iers_a["UT1_UTC_A"]) & (iers_a["PolPMFlag_A"] != "")]
# This does nothing for IERS_A, but allows IERS_Auto to ensure the
# IERS B values in the table are consistent with the true ones.
table = cls._substitute_iers_b(table)
# Combine A and B columns, using B where possible.
b_bad = np.isnan(table["UT1_UTC_B"])
table["UT1_UTC"] = np.where(b_bad, table["UT1_UTC_A"], table["UT1_UTC_B"])
table["UT1Flag"] = np.where(b_bad, table["UT1Flag_A"], "B")
# Repeat for polar motions.
b_bad = np.isnan(table["PM_X_B"]) | np.isnan(table["PM_Y_B"])
table["PM_x"] = np.where(b_bad, table["PM_x_A"], table["PM_X_B"])
table["PM_y"] = np.where(b_bad, table["PM_y_A"], table["PM_Y_B"])
table["PolPMFlag"] = np.where(b_bad, table["PolPMFlag_A"], "B")
b_bad = np.isnan(table["dX_2000A_B"]) | np.isnan(table["dY_2000A_B"])
table["dX_2000A"] = np.where(b_bad, table["dX_2000A_A"], table["dX_2000A_B"])
table["dY_2000A"] = np.where(b_bad, table["dY_2000A_A"], table["dY_2000A_B"])
table["NutFlag"] = np.where(b_bad, table["NutFlag_A"], "B")
# Get the table index for the first row that has predictive values
# PolPMFlag_A IERS (I) or Prediction (P) flag for
# Bull. A polar motion values
# UT1Flag_A IERS (I) or Prediction (P) flag for
# Bull. A UT1-UTC values
# Since only 'P' and 'I' are possible and 'P' is guaranteed to come
# after 'I', we can use searchsorted for 100 times speed up over
# finding the first index where the flag equals 'P'.
p_index = min(
np.searchsorted(table["UT1Flag_A"], "P"),
np.searchsorted(table["PolPMFlag_A"], "P"),
)
table.meta["predictive_index"] = p_index
table.meta["predictive_mjd"] = table["MJD"][p_index].value
return table
@classmethod
def _substitute_iers_b(cls, table):
# See documentation in IERS_Auto.
return table
@classmethod
def read(cls, file=None, readme=None):
"""Read IERS-A table from a finals2000a.* file provided by USNO.
Parameters
----------
file : str
full path to ascii file holding IERS-A data.
Defaults to ``iers.IERS_A_FILE``.
readme : str
full path to ascii file holding CDS-style readme.
Defaults to package version, ``iers.IERS_A_README``.
Returns
-------
``IERS_A`` class instance
"""
if file is None:
file = IERS_A_FILE
if readme is None:
readme = IERS_A_README
iers_a = super().read(file, format="cds", readme=readme)
# Combine the A and B data for UT1-UTC and PM columns
table = cls._combine_a_b_columns(iers_a)
table.meta["data_path"] = file
table.meta["readme_path"] = readme
return table
def ut1_utc_source(self, i):
"""Set UT1-UTC source flag for entries in IERS table."""
ut1flag = self["UT1Flag"][i]
source = np.ones_like(i) * FROM_IERS_B
source[ut1flag == "I"] = FROM_IERS_A
source[ut1flag == "P"] = FROM_IERS_A_PREDICTION
return source
def dcip_source(self, i):
"""Set CIP correction source flag for entries in IERS table."""
nutflag = self["NutFlag"][i]
source = np.ones_like(i) * FROM_IERS_B
source[nutflag == "I"] = FROM_IERS_A
source[nutflag == "P"] = FROM_IERS_A_PREDICTION
return source
def pm_source(self, i):
"""Set polar motion source flag for entries in IERS table."""
pmflag = self["PolPMFlag"][i]
source = np.ones_like(i) * FROM_IERS_B
source[pmflag == "I"] = FROM_IERS_A
source[pmflag == "P"] = FROM_IERS_A_PREDICTION
return source
class IERS_B(IERS):
"""IERS Table class targeted to IERS B, provided by IERS itself.
These are final values; see https://www.iers.org/IERS/EN/Home/home_node.html
Notes
-----
If the package IERS B file (```iers.IERS_B_FILE``) is out of date, a new
version can be downloaded from ``iers.IERS_B_URL``.
See `~astropy.utils.iers.IERS_B.read` for instructions on how to read
a pre-2023 style IERS B file (usually named ``eopc04_IAU2000.62-now``).
"""
iers_table = None
@classmethod
def read(cls, file=None, readme=None, data_start=6):
"""Read IERS-B table from a eopc04.* file provided by IERS.
Parameters
----------
file : str
full path to ascii file holding IERS-B data.
Defaults to package version, ``iers.IERS_B_FILE``.
readme : str
full path to ascii file holding CDS-style readme.
Defaults to package version, ``iers.IERS_B_README``.
data_start : int
Starting row. Default is 6, appropriate for standard IERS files.
Returns
-------
``IERS_B`` class instance
Notes
-----
To read a pre-2023 style IERS B file (usually named something like
``eopc04_IAU2000.62-now``), do something like this example with an
excerpt that is used for testing::
>>> from astropy.utils.iers import IERS_B
>>> from astropy.utils.data import get_pkg_data_filename
>>> old_style_file = get_pkg_data_filename(
... "tests/data/iers_b_old_style_excerpt",
... package="astropy.utils.iers")
>>> iers_b = IERS_B.read(
... old_style_file,
... readme=get_pkg_data_filename("data/ReadMe.eopc04_IAU2000",
... package="astropy.utils.iers"),
... data_start=14)
"""
if file is None:
file = IERS_B_FILE
if readme is None:
readme = IERS_B_README
table = super().read(file, format="cds", readme=readme, data_start=data_start)
table.meta["data_path"] = file
table.meta["readme_path"] = readme
return table
def ut1_utc_source(self, i):
"""Set UT1-UTC source flag for entries in IERS table."""
return np.ones_like(i) * FROM_IERS_B
def dcip_source(self, i):
"""Set CIP correction source flag for entries in IERS table."""
return np.ones_like(i) * FROM_IERS_B
def pm_source(self, i):
"""Set PM source flag for entries in IERS table."""
return np.ones_like(i) * FROM_IERS_B
class IERS_Auto(IERS_A):
"""
Provide most-recent IERS data and automatically handle downloading
of updated values as necessary.
"""
iers_table = None
@classmethod
def open(cls):
"""If the configuration setting ``astropy.utils.iers.conf.auto_download``
is set to True (default), then open a recent version of the IERS-A
table with predictions for UT1-UTC and polar motion out to
approximately one year from now. If the available version of this file
is older than ``astropy.utils.iers.conf.auto_max_age`` days old
(or non-existent) then it will be downloaded over the network and cached.
If the configuration setting ``astropy.utils.iers.conf.auto_download``
is set to False then ``astropy.utils.iers.IERS()`` is returned. This
is normally the IERS-B table that is supplied with astropy.
On the first call in a session, the table will be memoized (in the
``iers_table`` class attribute), and further calls to ``open`` will
return this stored table.
Returns
-------
`~astropy.table.QTable` instance
With IERS (Earth rotation) data columns
"""
if not conf.auto_download:
cls.iers_table = IERS_B.open()
return cls.iers_table
all_urls = (conf.iers_auto_url, conf.iers_auto_url_mirror)
if cls.iers_table is not None:
# If the URL has changed, we need to redownload the file, so we
# should ignore the internally cached version.
if cls.iers_table.meta.get("data_url") in all_urls:
return cls.iers_table
for url in all_urls:
try:
filename = download_file(url, cache=True)
except Exception as err:
warn(f"failed to download {url}: {err}", IERSWarning)
continue
try:
cls.iers_table = cls.read(file=filename)
except Exception as err:
warn(f"malformed IERS table from {url}: {err}", IERSWarning)
continue
cls.iers_table.meta["data_url"] = url
break
else:
# Issue a warning here, perhaps user is offline. An exception
# will be raised downstream if actually trying to interpolate
# predictive values.
warn("unable to download valid IERS file, using local IERS-B", IERSWarning)
cls.iers_table = IERS_B.open()
return cls.iers_table
def _check_interpolate_indices(self, indices_orig, indices_clipped, max_input_mjd):
"""Check that the indices from interpolation match those after clipping to the
valid table range. The IERS_Auto class is exempted as long as it has
sufficiently recent available data so the clipped interpolation is
always within the confidence bounds of current Earth rotation
knowledge.
"""
predictive_mjd = self.meta["predictive_mjd"]
# See explanation in _refresh_table_as_needed for these conditions
auto_max_age = _none_to_float(conf.auto_max_age)
if (
max_input_mjd > predictive_mjd
and self.time_now.mjd - predictive_mjd > auto_max_age
):
raise ValueError(INTERPOLATE_ERROR.format(auto_max_age))
def _refresh_table_as_needed(self, mjd):
"""Potentially update the IERS table in place depending on the requested
time values in ``mjd`` and the time span of the table.
For IERS_Auto the behavior is that the table is refreshed from the IERS
server if both the following apply:
- Any of the requested IERS values are predictive. The IERS-A table
contains predictive data out for a year after the available
definitive values.
- The first predictive values are at least ``conf.auto_max_age days`` old.
In other words the IERS-A table was created by IERS long enough
ago that it can be considered stale for predictions.
"""
max_input_mjd = np.max(mjd)
now_mjd = self.time_now.mjd
# IERS-A table contains predictive data out for a year after
# the available definitive values.
fpi = self.meta["predictive_index"]
predictive_mjd = self.meta["predictive_mjd"]
# Update table in place if necessary
auto_max_age = _none_to_float(conf.auto_max_age)
# If auto_max_age is smaller than IERS update time then repeated downloads may
# occur without getting updated values (giving a IERSStaleWarning).
if auto_max_age < 10:
raise ValueError(
"IERS auto_max_age configuration value must be larger than 10 days"
)
if max_input_mjd > predictive_mjd and (now_mjd - predictive_mjd) > auto_max_age:
all_urls = (conf.iers_auto_url, conf.iers_auto_url_mirror)
# Get the latest version
try:
filename = download_file(all_urls[0], sources=all_urls, cache="update")
except Exception as err:
# Issue a warning here, perhaps user is offline. An exception
# will be raised downstream when actually trying to interpolate
# predictive values.
warn(
AstropyWarning(
f'failed to download {" and ".join(all_urls)}: {err}.\nA'
" coordinate or time-related calculation might be compromised"
" or fail because the dates are not covered by the available"
' IERS file. See the "IERS data access" section of the'
" astropy documentation for additional information on working"
" offline."
)
)
return
new_table = self.__class__.read(file=filename)
new_table.meta["data_url"] = str(all_urls[0])
# New table has new values?
if new_table["MJD"][-1] > self["MJD"][-1]:
# Replace *replace* current values from the first predictive index through
# the end of the current table. This replacement is much faster than just
# deleting all rows and then using add_row for the whole duration.
new_fpi = np.searchsorted(
new_table["MJD"].value, predictive_mjd, side="right"
)
n_replace = len(self) - fpi
self[fpi:] = new_table[new_fpi : new_fpi + n_replace]
# Sanity check for continuity
if new_table["MJD"][new_fpi + n_replace] - self["MJD"][-1] != 1.0 * u.d:
raise ValueError("unexpected gap in MJD when refreshing IERS table")
# Now add new rows in place
for row in new_table[new_fpi + n_replace :]:
self.add_row(row)
self.meta.update(new_table.meta)
else:
warn(
IERSStaleWarning(
"IERS_Auto predictive values are older than"
f" {conf.auto_max_age} days but downloading the latest table"
" did not find newer values"
)
)
@classmethod
def _substitute_iers_b(cls, table):
"""Substitute IERS B values with those from a real IERS B table.
IERS-A has IERS-B values included, but for reasons unknown these
do not match the latest IERS-B values (see comments in #4436).
Here, we use the bundled astropy IERS-B table to overwrite the values
in the downloaded IERS-A table.
"""
iers_b = IERS_B.open()
# Substitute IERS-B values for existing B values in IERS-A table
mjd_b = table["MJD"][np.isfinite(table["UT1_UTC_B"])]
i0 = np.searchsorted(iers_b["MJD"], mjd_b[0], side="left")
i1 = np.searchsorted(iers_b["MJD"], mjd_b[-1], side="right")
iers_b = iers_b[i0:i1]
n_iers_b = len(iers_b)
# If there is overlap then replace IERS-A values from available IERS-B
if n_iers_b > 0:
# Sanity check that we are overwriting the correct values
if not u.allclose(table["MJD"][:n_iers_b], iers_b["MJD"]):
raise ValueError(
"unexpected mismatch when copying IERS-B values into IERS-A table."
)
# Finally do the overwrite
table["UT1_UTC_B"][:n_iers_b] = iers_b["UT1_UTC"]
table["PM_X_B"][:n_iers_b] = iers_b["PM_x"]
table["PM_Y_B"][:n_iers_b] = iers_b["PM_y"]
table["dX_2000A_B"][:n_iers_b] = iers_b["dX_2000A"]
table["dY_2000A_B"][:n_iers_b] = iers_b["dY_2000A"]
return table
class earth_orientation_table(ScienceState):
"""Default IERS table for Earth rotation and reference systems service.
These tables are used to calculate the offsets between ``UT1`` and ``UTC``
and for conversion to Earth-based coordinate systems.
The state itself is an IERS table, as an instance of one of the
`~astropy.utils.iers.IERS` classes. The default, the auto-updating
`~astropy.utils.iers.IERS_Auto` class, should suffice for most
purposes.
Examples
--------
To temporarily use the IERS-B file packaged with astropy::
>>> from astropy.utils import iers
>>> from astropy.time import Time
>>> iers_b = iers.IERS_B.open(iers.IERS_B_FILE)
>>> with iers.earth_orientation_table.set(iers_b):
... print(Time('2000-01-01').ut1.isot)
2000-01-01T00:00:00.355
To use the most recent IERS-A file for the whole session::
>>> iers_a = iers.IERS_A.open(iers.IERS_A_URL) # doctest: +SKIP
>>> iers.earth_orientation_table.set(iers_a) # doctest: +SKIP
<ScienceState earth_orientation_table: <IERS_A length=17463>...>
To go back to the default (of `~astropy.utils.iers.IERS_Auto`)::
>>> iers.earth_orientation_table.set(None) # doctest: +SKIP
<ScienceState earth_orientation_table: <IERS_Auto length=17428>...>
"""
_value = None
@classmethod
def validate(cls, value):
if value is None:
value = IERS_Auto.open()
if not isinstance(value, IERS):
raise ValueError("earth_orientation_table requires an IERS Table.")
return value
class LeapSeconds(QTable):
"""Leap seconds class, holding TAI-UTC differences.
The table should hold columns 'year', 'month', 'tai_utc'.
Methods are provided to initialize the table from IERS ``Leap_Second.dat``,
IETF/ntp ``leap-seconds.list``, or built-in ERFA/SOFA, and to update the
list used by ERFA.
Notes
-----
Astropy has a built-in ``iers.IERS_LEAP_SECONDS_FILE``. Up to date versions
can be downloaded from ``iers.IERS_LEAP_SECONDS_URL`` or
``iers.LEAP_SECONDS_LIST_URL``. Many systems also store a version
of ``leap-seconds.list`` for use with ``ntp`` (e.g., on Debian/Ubuntu
systems, ``/usr/share/zoneinfo/leap-seconds.list``).
To prevent querying internet resources if the available local leap second
file(s) are out of date, set ``iers.conf.auto_download = False``. This
must be done prior to performing any ``Time`` scale transformations related
to UTC (e.g. converting from UTC to TAI).
"""
# Note: Time instances in this class should use scale='tai' to avoid
# needing leap seconds in their creation or interpretation.
_re_expires = re.compile(r"^#.*File expires on[:\s]+(\d+\s\w+\s\d+)\s*$")
_expires = None
_auto_open_files = [
"erfa",
IERS_LEAP_SECOND_FILE,
"system_leap_second_file",
"iers_leap_second_auto_url",
"ietf_leap_second_auto_url",
]
"""Files or conf attributes to try in auto_open."""
@classmethod
def open(cls, file=None, cache=False):
"""Open a leap-second list.
Parameters
----------
file : path-like or None
Full local or network path to the file holding leap-second data,
for passing on to the various ``from_`` class methods.
If 'erfa', return the data used by the ERFA library.
If `None`, use default locations from file and configuration to
find a table that is not expired.
cache : bool
Whether to use cache. Defaults to False, since leap-second files
are regularly updated.
Returns
-------
leap_seconds : `~astropy.utils.iers.LeapSeconds`
Table with 'year', 'month', and 'tai_utc' columns, plus possibly
others.
Notes
-----
Bulletin C is released about 10 days after a possible leap second is
introduced, i.e., mid-January or mid-July. Expiration days are thus
generally at least 150 days after the present. For the auto-loading,
a list comprised of the table shipped with astropy, and files and
URLs in `~astropy.utils.iers.Conf` are tried, returning the first
that is sufficiently new, or the newest among them all.
"""
if file is None:
return cls.auto_open()
if file.lower() == "erfa":
return cls.from_erfa()
if urlparse(file).netloc:
file = download_file(file, cache=cache)
# Just try both reading methods.
try:
return cls.from_iers_leap_seconds(file)
except Exception:
return cls.from_leap_seconds_list(file)
@staticmethod
def _today():
# Get current day in scale='tai' without going through a scale change
# (so we do not need leap seconds).
s = "{0.year:04d}-{0.month:02d}-{0.day:02d}".format(datetime.utcnow())
return Time(s, scale="tai", format="iso", out_subfmt="date")
@classmethod
def auto_open(cls, files=None):
"""Attempt to get an up-to-date leap-second list.
The routine will try the files in sequence until it finds one
whose expiration date is "good enough" (see below). If none
are good enough, it returns the one with the most recent expiration
date, warning if that file is expired.
For remote files that are cached already, the cached file is tried
first before attempting to retrieve it again.
Parameters
----------
files : list of path-like, optional
List of files/URLs to attempt to open. By default, uses
``cls._auto_open_files``.
Returns
-------
leap_seconds : `~astropy.utils.iers.LeapSeconds`
Up to date leap-second table
Notes
-----
Bulletin C is released about 10 days after a possible leap second is
introduced, i.e., mid-January or mid-July. Expiration days are thus
generally at least 150 days after the present. We look for a file
that expires more than 180 - `~astropy.utils.iers.Conf.auto_max_age`
after the present.
"""
offset = 180 - (30 if conf.auto_max_age is None else conf.auto_max_age)
good_enough = cls._today() + TimeDelta(offset, format="jd")
if files is None:
# Basic files to go over (entries in _auto_open_files can be
# configuration items, which we want to be sure are up to date).
files = [getattr(conf, f, f) for f in cls._auto_open_files]
# Remove empty entries.
files = [f for f in files if f]
# Our trials start with normal files and remote ones that are
# already in cache. The bools here indicate that the cache
# should be used.
trials = [
(f, True) for f in files if not urlparse(f).netloc or is_url_in_cache(f)
]
# If we are allowed to download, we try downloading new versions
# if none of the above worked.
if conf.auto_download:
trials += [(f, False) for f in files if urlparse(f).netloc]
self = None
err_list = []
# Go through all entries, and return the first one that
# is not expired, or the most up to date one.
for f, allow_cache in trials:
if not allow_cache:
clear_download_cache(f)
try:
trial = cls.open(f, cache=True)
except Exception as exc:
err_list.append(exc)
continue
if self is None or trial.expires > self.expires:
self = trial
self.meta["data_url"] = str(f)
if self.expires > good_enough:
break
if self is None:
raise ValueError(
"none of the files could be read. The "
f"following errors were raised:\n {err_list}"
)
if self.expires < self._today() and conf.auto_max_age is not None:
warn("leap-second file is expired.", IERSStaleWarning)
return self
@property
def expires(self):
"""The limit of validity of the table."""
return self._expires
@classmethod
def _read_leap_seconds(cls, file, **kwargs):
"""Read a file, identifying expiration by matching 'File expires'."""
expires = None
# Find expiration date.
with get_readable_fileobj(file) as fh:
lines = fh.readlines()
for line in lines:
match = cls._re_expires.match(line)
if match:
day, month, year = match.groups()[0].split()
month_nb = MONTH_ABBR.index(month[:3]) + 1
expires = Time(
f"{year}-{month_nb:02d}-{day}", scale="tai", out_subfmt="date"
)
break
else:
raise ValueError(f"did not find expiration date in {file}")
self = cls.read(lines, format="ascii.no_header", **kwargs)
self._expires = expires
return self
@classmethod
def from_iers_leap_seconds(cls, file=IERS_LEAP_SECOND_FILE):
"""Create a table from a file like the IERS ``Leap_Second.dat``.
Parameters
----------
file : path-like, optional
Full local or network path to the file holding leap-second data
in a format consistent with that used by IERS. By default, uses
``iers.IERS_LEAP_SECOND_FILE``.
Notes
-----
The file *must* contain the expiration date in a comment line, like
'# File expires on 28 June 2020'
"""
return cls._read_leap_seconds(
file, names=["mjd", "day", "month", "year", "tai_utc"]
)
@classmethod
def from_leap_seconds_list(cls, file):
"""Create a table from a file like the IETF ``leap-seconds.list``.
Parameters
----------
file : path-like, optional
Full local or network path to the file holding leap-second data
in a format consistent with that used by IETF. Up to date versions
can be retrieved from ``iers.IETF_LEAP_SECOND_URL``.
Notes
-----
The file *must* contain the expiration date in a comment line, like
'# File expires on: 28 June 2020'
"""
from astropy.io.ascii import convert_numpy # Here to avoid circular import
names = ["ntp_seconds", "tai_utc", "comment", "day", "month", "year"]
# Note: ntp_seconds does not fit in 32 bit, so causes problems on
# 32-bit systems without the np.int64 converter.
self = cls._read_leap_seconds(
file,
names=names,
include_names=names[:2],
converters={"ntp_seconds": [convert_numpy(np.int64)]},
)
self["mjd"] = (self["ntp_seconds"] / 86400 + 15020).round()
# Note: cannot use Time.ymdhms, since that might require leap seconds.
isot = Time(self["mjd"], format="mjd", scale="tai").isot
ymd = np.array(
[[int(part) for part in t.partition("T")[0].split("-")] for t in isot]
)
self["year"], self["month"], self["day"] = ymd.T
return self
@classmethod
def from_erfa(cls, built_in=False):
"""Create table from the leap-second list in ERFA.
Parameters
----------
built_in : bool
If `False` (default), retrieve the list currently used by ERFA,
which may have been updated. If `True`, retrieve the list shipped
with erfa.
"""
current = cls(erfa.leap_seconds.get())
current._expires = Time(
"{0.year:04d}-{0.month:02d}-{0.day:02d}".format(erfa.leap_seconds.expires),
scale="tai",
)
if not built_in:
return current
try:
erfa.leap_seconds.set(None) # reset to defaults
return cls.from_erfa(built_in=False)
finally:
erfa.leap_seconds.set(current)
def update_erfa_leap_seconds(self, initialize_erfa=False):
"""Add any leap seconds not already present to the ERFA table.
This method matches leap seconds with those present in the ERFA table,
and extends the latter as necessary.
Parameters
----------
initialize_erfa : bool, or 'only', or 'empty'
Initialize the ERFA leap second table to its built-in value before
trying to expand it. This is generally not needed but can help
in case it somehow got corrupted. If equal to 'only', the ERFA
table is reinitialized and no attempt it made to update it.
If 'empty', the leap second table is emptied before updating, i.e.,
it is overwritten altogether (note that this may break things in
surprising ways, as most leap second tables do not include pre-1970
pseudo leap-seconds; you were warned).
Returns
-------
n_update : int
Number of items updated.
Raises
------
ValueError
If the leap seconds in the table are not on 1st of January or July,
or if the matches are inconsistent. This would normally suggest
a corrupted leap second table, but might also indicate that the
ERFA table was corrupted. If needed, the ERFA table can be reset
by calling this method with an appropriate value for
``initialize_erfa``.
"""
if initialize_erfa == "empty":
# Initialize to empty and update is the same as overwrite.
erfa.leap_seconds.set(self)
return len(self)
if initialize_erfa:
erfa.leap_seconds.set()
if initialize_erfa == "only":
return 0
return erfa.leap_seconds.update(self)
|
1d47016a5092abad058dc89109a2afcb649a181d45cb05dad76878f4a9ef3465 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Contains a class that makes it simple to stream out well-formed and
nicely-indented XML.
"""
# STDLIB
import contextlib
import textwrap
try:
from . import _iterparser
except ImportError:
def xml_escape_cdata(s):
"""
Escapes &, < and > in an XML CDATA string.
"""
s = s.replace("&", "&")
s = s.replace("<", "<")
s = s.replace(">", ">")
return s
def xml_escape(s):
"""
Escapes &, ', ", < and > in an XML attribute value.
"""
s = s.replace("&", "&")
s = s.replace("'", "'")
s = s.replace('"', """)
s = s.replace("<", "<")
s = s.replace(">", ">")
return s
else:
xml_escape_cdata = _iterparser.escape_xml_cdata
xml_escape = _iterparser.escape_xml
class XMLWriter:
"""
A class to write well-formed and nicely indented XML.
Use like this::
w = XMLWriter(fh)
with w.tag('html'):
with w.tag('body'):
w.data('This is the content')
Which produces::
<html>
<body>
This is the content
</body>
</html>
"""
def __init__(self, file):
"""
Parameters
----------
file : writable file-like
"""
self.write = file.write
if hasattr(file, "flush"):
self.flush = file.flush
self._open = 0 # true if start tag is open
self._tags = []
self._data = []
self._indentation = " " * 64
self.xml_escape_cdata = xml_escape_cdata
self.xml_escape = xml_escape
def _flush(self, indent=True, wrap=False):
"""
Flush internal buffers.
"""
if self._open:
if indent:
self.write(">\n")
else:
self.write(">")
self._open = 0
if self._data:
data = "".join(self._data)
if wrap:
indent = self.get_indentation_spaces(1)
data = textwrap.fill(
data, initial_indent=indent, subsequent_indent=indent
)
self.write("\n")
self.write(self.xml_escape_cdata(data))
self.write("\n")
self.write(self.get_indentation_spaces())
else:
self.write(self.xml_escape_cdata(data))
self._data = []
def start(self, tag, attrib={}, **extra):
"""
Opens a new element. Attributes can be given as keyword
arguments, or as a string/string dictionary. The method
returns an opaque identifier that can be passed to the
:meth:`close` method, to close all open elements up to and
including this one.
Parameters
----------
tag : str
The element name
attrib : dict of str -> str
Attribute dictionary. Alternatively, attributes can
be given as keyword arguments.
Returns
-------
id : int
Returns an element identifier.
"""
self._flush()
# This is just busy work -- we know our tag names are clean
# tag = xml_escape_cdata(tag)
self._data = []
self._tags.append(tag)
self.write(self.get_indentation_spaces(-1))
self.write(f"<{tag}")
if attrib or extra:
attrib = attrib.copy()
attrib.update(extra)
attrib = list(attrib.items())
attrib.sort()
for k, v in attrib:
if v is not None:
# This is just busy work -- we know our keys are clean
# k = xml_escape_cdata(k)
v = self.xml_escape(v)
self.write(f' {k}="{v}"')
self._open = 1
return len(self._tags)
@contextlib.contextmanager
def xml_cleaning_method(self, method="escape_xml", **clean_kwargs):
"""Context manager to control how XML data tags are cleaned (escaped) to
remove potentially unsafe characters or constructs.
The default (``method='escape_xml'``) applies brute-force escaping of
certain key XML characters like ``<``, ``>``, and ``&`` to ensure that
the output is not valid XML.
In order to explicitly allow certain XML tags (e.g. link reference or
emphasis tags), use ``method='bleach_clean'``. This sanitizes the data
string using the ``clean`` function of the
`bleach <https://bleach.readthedocs.io/en/latest/clean.html>`_ package.
Any additional keyword arguments will be passed directly to the
``clean`` function.
Finally, use ``method='none'`` to disable any sanitization. This should
be used sparingly.
Example::
w = writer.XMLWriter(ListWriter(lines))
with w.xml_cleaning_method('bleach_clean'):
w.start('td')
w.data('<a href="https://google.com">google.com</a>')
w.end()
Parameters
----------
method : str
Cleaning method. Allowed values are "escape_xml",
"bleach_clean", and "none".
**clean_kwargs : keyword args
Additional keyword args that are passed to the
bleach.clean() function.
"""
current_xml_escape_cdata = self.xml_escape_cdata
if method == "bleach_clean":
# NOTE: bleach is imported locally to avoid importing it when
# it is not nocessary
try:
import bleach
except ImportError:
raise ValueError(
"bleach package is required when HTML escaping is disabled.\n"
'Use "pip install bleach".'
)
if clean_kwargs is None:
clean_kwargs = {}
self.xml_escape_cdata = lambda x: bleach.clean(x, **clean_kwargs)
elif method == "none":
self.xml_escape_cdata = lambda x: x
elif method != "escape_xml":
raise ValueError(
'allowed values of method are "escape_xml", "bleach_clean", and "none"'
)
yield
self.xml_escape_cdata = current_xml_escape_cdata
@contextlib.contextmanager
def tag(self, tag, attrib={}, **extra):
"""
A convenience method for creating wrapper elements using the
``with`` statement.
Examples
--------
>>> with writer.tag('foo'): # doctest: +SKIP
... writer.element('bar')
... # </foo> is implicitly closed here
...
Parameters are the same as to `start`.
"""
self.start(tag, attrib, **extra)
yield
self.end(tag)
def comment(self, comment):
"""
Adds a comment to the output stream.
Parameters
----------
comment : str
Comment text, as a Unicode string.
"""
self._flush()
self.write(self.get_indentation_spaces())
self.write(f"<!-- {self.xml_escape_cdata(comment)} -->\n")
def data(self, text):
"""
Adds character data to the output stream.
Parameters
----------
text : str
Character data, as a Unicode string.
"""
self._data.append(text)
def end(self, tag=None, indent=True, wrap=False):
"""
Closes the current element (opened by the most recent call to
`start`).
Parameters
----------
tag : str
Element name. If given, the tag must match the start tag.
If omitted, the current element is closed.
"""
if tag:
if not self._tags:
raise ValueError(f"unbalanced end({tag})")
if tag != self._tags[-1]:
raise ValueError(f"expected end({self._tags[-1]}), got {tag}")
else:
if not self._tags:
raise ValueError("unbalanced end()")
tag = self._tags.pop()
if self._data:
self._flush(indent, wrap)
elif self._open:
self._open = 0
self.write("/>\n")
return
if indent:
self.write(self.get_indentation_spaces())
self.write(f"</{tag}>\n")
def close(self, id):
"""
Closes open elements, up to (and including) the element identified
by the given identifier.
Parameters
----------
id : int
Element identifier, as returned by the `start` method.
"""
while len(self._tags) > id:
self.end()
def element(self, tag, text=None, wrap=False, attrib={}, **extra):
"""
Adds an entire element. This is the same as calling `start`,
`data`, and `end` in sequence. The ``text`` argument
can be omitted.
"""
self.start(tag, attrib, **extra)
if text:
self.data(text)
self.end(indent=False, wrap=wrap)
def flush(self):
pass # replaced by the constructor
def get_indentation(self):
"""
Returns the number of indentation levels the file is currently
in.
"""
return len(self._tags)
def get_indentation_spaces(self, offset=0):
"""
Returns a string of spaces that matches the current
indentation level.
"""
return self._indentation[: len(self._tags) + offset]
@staticmethod
def object_attrs(obj, attrs):
"""
Converts an object with a bunch of attributes on an object
into a dictionary for use by the `XMLWriter`.
Parameters
----------
obj : object
Any Python object
attrs : sequence of str
Attribute names to pull from the object
Returns
-------
attrs : dict
Maps attribute names to the values retrieved from
``obj.attr``. If any of the attributes is `None`, it will
not appear in the output dictionary.
"""
d = {}
for attr in attrs:
if getattr(obj, attr) is not None:
d[attr.replace("_", "-")] = str(getattr(obj, attr))
return d
|
d61009b904a4d8e0b459004de053603f953ad16efc1450fe791c6b3e0835b967 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
from hypothesis import given
from hypothesis.extra.numpy import basic_indices
from numpy.testing import assert_equal
from astropy.utils.shapes import check_broadcast, simplify_basic_index, unbroadcast
def test_check_broadcast():
assert check_broadcast((10, 1), (3,)) == (10, 3)
assert check_broadcast((10, 1), (3,), (4, 1, 1, 3)) == (4, 1, 10, 3)
with pytest.raises(ValueError):
check_broadcast((10, 2), (3,))
with pytest.raises(ValueError):
check_broadcast((10, 1), (3,), (4, 1, 2, 3))
def test_unbroadcast():
x = np.array([1, 2, 3])
y = np.broadcast_to(x, (2, 4, 3))
z = unbroadcast(y)
assert z.shape == (3,)
np.testing.assert_equal(z, x)
x = np.ones((3, 5))
y = np.broadcast_to(x, (5, 3, 5))
z = unbroadcast(y)
assert z.shape == (3, 5)
TEST_SHAPE = (13, 16, 4, 90)
class TestSimplifyBasicIndex:
# We use a class here so that we can allocate the data once and for all to
# speed up the testing.
def setup_class(self):
self.shape = TEST_SHAPE
self.data = np.random.random(TEST_SHAPE)
@given(basic_indices(TEST_SHAPE))
def test_indexing(self, index):
new_index = simplify_basic_index(index, shape=self.shape)
assert_equal(self.data[index], self.data[new_index])
assert isinstance(new_index, tuple)
assert len(new_index) == len(self.shape)
for idim, idx in enumerate(new_index):
assert isinstance(idx, (slice, int))
if isinstance(idx, int):
assert idx >= 0
else:
assert isinstance(idx.start, int)
assert idx.start >= 0
assert idx.start < TEST_SHAPE[idim]
if idx.stop is not None:
assert isinstance(idx.stop, int)
assert idx.stop >= 0
assert idx.stop <= TEST_SHAPE[idim]
assert isinstance(idx.step, int)
|
31a8fd965eba156068d6cf7258255d512417a703cf71ae9f4d983e99a45db381 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import base64
import contextlib
import errno
import hashlib
import io
import itertools
import os
import pathlib
import platform
import random
import shutil
import stat
import sys
import tempfile
import urllib.error
import urllib.parse
import urllib.request
import warnings
from concurrent.futures import ThreadPoolExecutor
from itertools import islice
from tempfile import NamedTemporaryFile, TemporaryDirectory
import py.path
import pytest
import astropy.utils.data
from astropy import units as _u # u is taken
from astropy.config import paths
from astropy.utils.data import (
CacheDamaged,
CacheMissingWarning,
_deltemps,
_get_download_cache_loc,
_tempfilestodel,
cache_contents,
cache_total_size,
check_download_cache,
check_free_space_in_dir,
clear_download_cache,
compute_hash,
conf,
download_file,
download_files_in_parallel,
export_download_cache,
get_cached_urls,
get_file_contents,
get_free_space_in_dir,
get_pkg_data_contents,
get_pkg_data_filename,
get_pkg_data_fileobj,
get_pkg_data_path,
get_readable_fileobj,
import_download_cache,
import_file_to_cache,
is_url,
is_url_in_cache,
)
from astropy.utils.exceptions import AstropyWarning
CI = os.environ.get("CI", "false") == "true"
TESTURL = "http://www.astropy.org"
TESTURL2 = "http://www.astropy.org/about.html"
TESTURL_SSL = "https://www.astropy.org"
TESTLOCAL = get_pkg_data_filename(os.path.join("data", "local.dat"))
# NOTE: Python can be built without bz2 or lzma.
from astropy.utils.compat.optional_deps import HAS_BZ2, HAS_LZMA
# For when we need "some" test URLs
FEW = 5
# For stress testing the locking system using multiprocessing
N_PARALLEL_HAMMER = 5 # as high as 500 to replicate a bug
# For stress testing the locking system using threads
# (cheaper, works with coverage)
N_THREAD_HAMMER = 10 # as high as 1000 to replicate a bug
def can_rename_directory_in_use():
with TemporaryDirectory() as d:
d1 = os.path.join(d, "a")
d2 = os.path.join(d, "b")
f1 = os.path.join(d1, "file")
os.mkdir(d1)
with open(f1, "w") as f:
f.write("some contents\n")
try:
with open(f1):
os.rename(d1, d2)
except PermissionError:
return False
else:
return True
CAN_RENAME_DIRECTORY_IN_USE = can_rename_directory_in_use()
def url_to(path):
return pathlib.Path(path).resolve().as_uri()
@pytest.fixture
def valid_urls(tmp_path):
def _valid_urls(tmp_path):
for i in itertools.count():
c = os.urandom(16).hex()
fn = tmp_path / f"valid_{str(i)}"
with open(fn, "w") as f:
f.write(c)
u = url_to(fn)
yield u, c
return _valid_urls(tmp_path)
@pytest.fixture
def invalid_urls(tmp_path):
def _invalid_urls(tmp_path):
for i in itertools.count():
fn = tmp_path / f"invalid_{str(i)}"
if not os.path.exists(fn):
yield url_to(fn)
return _invalid_urls(tmp_path)
@pytest.fixture
def temp_cache(tmp_path):
with paths.set_temp_cache(tmp_path):
yield None
check_download_cache()
def change_tree_permission(d, writable=False):
if writable:
dirperm = stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
fileperm = stat.S_IRUSR | stat.S_IWUSR
else:
dirperm = stat.S_IRUSR | stat.S_IXUSR
fileperm = stat.S_IRUSR
for dirpath, dirnames, filenames in os.walk(d):
os.chmod(dirpath, dirperm)
for f in filenames:
os.chmod(os.path.join(dirpath, f), fileperm)
def is_dir_readonly(d):
try:
with NamedTemporaryFile(dir=d):
return False
except PermissionError:
return True
@contextlib.contextmanager
def readonly_dir(d):
try:
change_tree_permission(d, writable=False)
yield
finally:
change_tree_permission(d, writable=True)
@pytest.fixture
def readonly_cache(tmp_path, valid_urls):
with TemporaryDirectory(dir=tmp_path) as d:
# other fixtures use the same tmp_path so we need a subdirectory
# to make into the cache
d = pathlib.Path(d)
with paths.set_temp_cache(d):
us = {u for u, c in islice(valid_urls, FEW)}
urls = {u: download_file(u, cache=True) for u in us}
files = set(d.iterdir())
with readonly_dir(d):
if not is_dir_readonly(d):
pytest.skip("Unable to make directory readonly")
yield urls
assert set(d.iterdir()) == files
check_download_cache()
@pytest.fixture
def fake_readonly_cache(tmp_path, valid_urls, monkeypatch):
def no_mkdir(path, mode=None):
raise OSError(errno.EPERM, "os.mkdir monkeypatched out")
def no_mkdtemp(*args, **kwargs):
"""On Windows, mkdtemp uses mkdir in a loop and therefore hangs
with it monkeypatched out.
"""
raise OSError(errno.EPERM, "os.mkdtemp monkeypatched out")
def no_TemporaryDirectory(*args, **kwargs):
raise OSError(errno.EPERM, "_SafeTemporaryDirectory monkeypatched out")
with TemporaryDirectory(dir=tmp_path) as d:
# other fixtures use the same tmp_path so we need a subdirectory
# to make into the cache
d = pathlib.Path(d)
with paths.set_temp_cache(d):
us = {u for u, c in islice(valid_urls, FEW)}
urls = {u: download_file(u, cache=True) for u in us}
files = set(d.iterdir())
monkeypatch.setattr(os, "mkdir", no_mkdir)
monkeypatch.setattr(tempfile, "mkdtemp", no_mkdtemp)
monkeypatch.setattr(
astropy.utils.data, "_SafeTemporaryDirectory", no_TemporaryDirectory
)
yield urls
assert set(d.iterdir()) == files
check_download_cache()
def test_download_file_basic(valid_urls, temp_cache):
u, c = next(valid_urls)
assert get_file_contents(download_file(u, cache=False)) == c
assert not is_url_in_cache(u)
assert get_file_contents(download_file(u, cache=True)) == c # Cache miss
assert is_url_in_cache(u)
assert get_file_contents(download_file(u, cache=True)) == c # Cache hit
assert get_file_contents(download_file(u, cache=True, sources=[])) == c
def test_download_file_absolute_path(valid_urls, temp_cache):
def is_abs(p):
return p == os.path.abspath(p)
u, c = next(valid_urls)
assert is_abs(download_file(u, cache=False)) # no cache
assert is_abs(download_file(u, cache=True)) # not in cache
assert is_abs(download_file(u, cache=True)) # in cache
for k, v in cache_contents().items():
assert is_abs(v)
def test_unicode_url(valid_urls, temp_cache):
u, c = next(valid_urls)
unicode_url = "http://é—☃—è.com"
download_file(unicode_url, cache=False, sources=[u])
download_file(unicode_url, cache=True, sources=[u])
download_file(unicode_url, cache=True, sources=[])
assert is_url_in_cache(unicode_url)
assert unicode_url in cache_contents()
def test_too_long_url(valid_urls, temp_cache):
u, c = next(valid_urls)
long_url = "http://" + "a" * 256 + ".com"
download_file(long_url, cache=False, sources=[u])
download_file(long_url, cache=True, sources=[u])
download_file(long_url, cache=True, sources=[])
def test_case_collision(valid_urls, temp_cache):
u, c = next(valid_urls)
u2, c2 = next(valid_urls)
f1 = download_file("http://example.com/thing", cache=True, sources=[u])
f2 = download_file("http://example.com/THING", cache=True, sources=[u2])
assert f1 != f2
assert get_file_contents(f1) != get_file_contents(f2)
def test_domain_name_case(valid_urls, temp_cache):
u, c = next(valid_urls)
download_file("http://Example.com/thing", cache=True, sources=[u])
assert is_url_in_cache("http://EXAMPLE.com/thing")
download_file("http://EXAMPLE.com/thing", cache=True, sources=[])
assert is_url_in_cache("Http://example.com/thing")
download_file("Http://example.com/thing", cache=True, sources=[])
@pytest.mark.remote_data(source="astropy")
def test_download_nocache_from_internet():
fnout = download_file(TESTURL, cache=False)
assert os.path.isfile(fnout)
@pytest.fixture
def a_binary_file(tmp_path):
fn = tmp_path / "file"
b_contents = b"\xde\xad\xbe\xef"
with open(fn, "wb") as f:
f.write(b_contents)
yield fn, b_contents
@pytest.fixture
def a_file(tmp_path):
fn = tmp_path / "file.txt"
contents = "contents\n"
with open(fn, "w") as f:
f.write(contents)
yield fn, contents
def test_temp_cache(tmp_path):
dldir0 = _get_download_cache_loc()
check_download_cache()
with paths.set_temp_cache(tmp_path):
dldir1 = _get_download_cache_loc()
check_download_cache()
assert dldir1 != dldir0
dldir2 = _get_download_cache_loc()
check_download_cache()
assert dldir2 != dldir1
assert dldir2 == dldir0
# Check that things are okay even if we exit via an exception
class Special(Exception):
pass
try:
with paths.set_temp_cache(tmp_path):
dldir3 = _get_download_cache_loc()
check_download_cache()
assert dldir3 == dldir1
raise Special
except Special:
pass
dldir4 = _get_download_cache_loc()
check_download_cache()
assert dldir4 != dldir3
assert dldir4 == dldir0
@pytest.mark.parametrize("parallel", [False, True])
def test_download_with_sources_and_bogus_original(
valid_urls, invalid_urls, temp_cache, parallel
):
# This is a combined test because the parallel version triggered a nasty
# bug and I was trying to track it down by comparing with the non-parallel
# version. I think the bug was that the parallel downloader didn't respect
# temporary cache settings.
# Make a big list of test URLs
u, c = next(valid_urls)
# as tuples (URL, right_content, wrong_content)
urls = [(u, c, None)]
# where to download the contents
sources = {}
# Set up some URLs to download where the "true" URL is not in the sources
# list; make the true URL valid with different contents so we can tell if
# it was loaded by mistake.
for i, (um, c_bad) in enumerate(islice(valid_urls, FEW)):
assert not is_url_in_cache(um)
sources[um] = []
# For many of them the sources list starts with invalid URLs
for iu in islice(invalid_urls, i):
sources[um].append(iu)
u, c = next(valid_urls)
sources[um].append(u)
urls.append((um, c, c_bad))
# Now fetch them all
if parallel:
rs = download_files_in_parallel(
[u for (u, c, c_bad) in urls], cache=True, sources=sources
)
else:
rs = [
download_file(u, cache=True, sources=sources.get(u, None))
for (u, c, c_bad) in urls
]
assert len(rs) == len(urls)
for r, (u, c, c_bad) in zip(rs, urls):
assert get_file_contents(r) == c
assert get_file_contents(r) != c_bad
assert is_url_in_cache(u)
@pytest.mark.skipif(
(sys.platform.startswith("win") and CI), reason="flaky cache error on Windows CI"
)
def test_download_file_threaded_many(temp_cache, valid_urls):
"""Hammer download_file with multiple threaded requests.
The goal is to stress-test the locking system. Normal parallel downloading
also does this but coverage tools lose track of which paths are explored.
"""
urls = list(islice(valid_urls, N_THREAD_HAMMER))
with ThreadPoolExecutor(max_workers=len(urls)) as P:
r = list(P.map(lambda u: download_file(u, cache=True), [u for (u, c) in urls]))
check_download_cache()
assert len(r) == len(urls)
for r, (u, c) in zip(r, urls):
assert get_file_contents(r) == c
@pytest.mark.skipif(
(sys.platform.startswith("win") and CI), reason="flaky cache error on Windows CI"
)
def test_threaded_segfault(valid_urls):
"""Demonstrate urllib's segfault."""
def slurp_url(u):
with urllib.request.urlopen(u) as remote:
block = True
while block:
block = remote.read(1024)
urls = list(islice(valid_urls, N_THREAD_HAMMER))
with ThreadPoolExecutor(max_workers=len(urls)) as P:
list(P.map(lambda u: slurp_url(u), [u for (u, c) in urls]))
@pytest.mark.skipif(
(sys.platform.startswith("win") and CI), reason="flaky cache error on Windows CI"
)
def test_download_file_threaded_many_partial_success(
temp_cache, valid_urls, invalid_urls
):
"""Hammer download_file with multiple threaded requests.
Because some of these requests fail, the locking context manager is
exercised with exceptions as well as success returns. I do not expect many
surprises from the threaded version, but the process version gave trouble
here.
"""
urls = []
contents = {}
for (u, c), i in islice(zip(valid_urls, invalid_urls), N_THREAD_HAMMER):
urls.append(u)
contents[u] = c
urls.append(i)
def get(u):
try:
return download_file(u, cache=True)
except OSError:
return None
with ThreadPoolExecutor(max_workers=len(urls)) as P:
r = list(P.map(get, urls))
check_download_cache()
assert len(r) == len(urls)
for r, u in zip(r, urls):
if u in contents:
assert get_file_contents(r) == contents[u]
else:
assert r is None
def test_clear_download_cache(valid_urls):
u1, c1 = next(valid_urls)
download_file(u1, cache=True)
u2, c2 = next(valid_urls)
download_file(u2, cache=True)
assert is_url_in_cache(u2)
clear_download_cache(u2)
assert not is_url_in_cache(u2)
assert is_url_in_cache(u1)
u3, c3 = next(valid_urls)
f3 = download_file(u3, cache=True)
assert is_url_in_cache(u3)
clear_download_cache(f3)
assert not is_url_in_cache(u3)
assert is_url_in_cache(u1)
u4, c4 = next(valid_urls)
f4 = download_file(u4, cache=True)
assert is_url_in_cache(u4)
clear_download_cache(compute_hash(f4))
assert not is_url_in_cache(u4)
assert is_url_in_cache(u1)
def test_clear_download_multiple_references_doesnt_corrupt_storage(
temp_cache, tmp_path
):
"""Check that files with the same hash don't confuse the storage."""
content = "Test data; doesn't matter much.\n"
def make_url():
with NamedTemporaryFile("w", dir=tmp_path, delete=False) as f:
f.write(content)
url = url_to(f.name)
clear_download_cache(url)
filename = download_file(url, cache=True)
return url, filename
a_url, a_filename = make_url()
clear_download_cache(a_filename)
assert not is_url_in_cache(a_url)
f_url, f_filename = make_url()
g_url, g_filename = make_url()
assert f_url != g_url
assert is_url_in_cache(f_url)
assert is_url_in_cache(g_url)
clear_download_cache(f_url)
assert not is_url_in_cache(f_url)
assert is_url_in_cache(g_url)
assert os.path.exists(
g_filename
), "Contents should not be deleted while a reference exists"
clear_download_cache(g_url)
assert not os.path.exists(
g_filename
), "No reference exists any more, file should be deleted"
@pytest.mark.parametrize("use_cache", [False, True])
def test_download_file_local_cache_survives(tmp_path, temp_cache, use_cache):
"""Confirm that downloading a local file does not delete it.
When implemented with urlretrieve (rather than urlopen) local files are
not copied to create temporaries, so importing them to the cache deleted
the original from wherever it was in the filesystem. I lost some built-in
astropy data.
"""
fn = tmp_path / "file"
contents = "some text"
with open(fn, "w") as f:
f.write(contents)
u = url_to(fn)
f = download_file(u, cache=use_cache)
assert fn not in _tempfilestodel, "File should not be deleted!"
assert os.path.isfile(fn), "File should not be deleted!"
assert get_file_contents(f) == contents
def test_sources_normal(temp_cache, valid_urls, invalid_urls):
primary, contents = next(valid_urls)
fallback1 = next(invalid_urls)
f = download_file(primary, cache=True, sources=[primary, fallback1])
assert get_file_contents(f) == contents
assert is_url_in_cache(primary)
assert not is_url_in_cache(fallback1)
def test_sources_fallback(temp_cache, valid_urls, invalid_urls):
primary = next(invalid_urls)
fallback1, contents = next(valid_urls)
f = download_file(primary, cache=True, sources=[primary, fallback1])
assert get_file_contents(f) == contents
assert is_url_in_cache(primary)
assert not is_url_in_cache(fallback1)
def test_sources_ignore_primary(temp_cache, valid_urls, invalid_urls):
primary, bogus = next(valid_urls)
fallback1, contents = next(valid_urls)
f = download_file(primary, cache=True, sources=[fallback1])
assert get_file_contents(f) == contents
assert is_url_in_cache(primary)
assert not is_url_in_cache(fallback1)
def test_sources_multiple(temp_cache, valid_urls, invalid_urls):
primary = next(invalid_urls)
fallback1 = next(invalid_urls)
fallback2, contents = next(valid_urls)
f = download_file(primary, cache=True, sources=[primary, fallback1, fallback2])
assert get_file_contents(f) == contents
assert is_url_in_cache(primary)
assert not is_url_in_cache(fallback1)
assert not is_url_in_cache(fallback2)
def test_sources_multiple_missing(temp_cache, valid_urls, invalid_urls):
primary = next(invalid_urls)
fallback1 = next(invalid_urls)
fallback2 = next(invalid_urls)
with pytest.raises(urllib.error.URLError):
download_file(primary, cache=True, sources=[primary, fallback1, fallback2])
assert not is_url_in_cache(primary)
assert not is_url_in_cache(fallback1)
assert not is_url_in_cache(fallback2)
def test_update_url(tmp_path, temp_cache):
with TemporaryDirectory(dir=tmp_path) as d:
f_name = os.path.join(d, "f")
with open(f_name, "w") as f:
f.write("old")
f_url = url_to(f.name)
assert get_file_contents(download_file(f_url, cache=True)) == "old"
with open(f_name, "w") as f:
f.write("new")
assert get_file_contents(download_file(f_url, cache=True)) == "old"
assert get_file_contents(download_file(f_url, cache="update")) == "new"
# Now the URL doesn't exist any more.
assert not os.path.exists(f_name)
with pytest.raises(urllib.error.URLError):
# Direct download should fail
download_file(f_url, cache=False)
assert (
get_file_contents(download_file(f_url, cache=True)) == "new"
), "Cached version should still exist"
with pytest.raises(urllib.error.URLError):
# cannot download new version to check for updates
download_file(f_url, cache="update")
assert (
get_file_contents(download_file(f_url, cache=True)) == "new"
), "Failed update should not remove the current version"
@pytest.mark.remote_data(source="astropy")
def test_download_noprogress():
fnout = download_file(TESTURL, cache=False, show_progress=False)
assert os.path.isfile(fnout)
@pytest.mark.remote_data(source="astropy")
def test_download_cache():
download_dir = _get_download_cache_loc()
# Download the test URL and make sure it exists, then clear just that
# URL and make sure it got deleted.
fnout = download_file(TESTURL, cache=True)
assert os.path.isdir(download_dir)
assert os.path.isfile(fnout)
clear_download_cache(TESTURL)
assert not os.path.exists(fnout)
# Clearing download cache succeeds even if the URL does not exist.
clear_download_cache("http://this_was_never_downloaded_before.com")
# Make sure lockdir was released
lockdir = os.path.join(download_dir, "lock")
assert not os.path.isdir(lockdir), "Cache dir lock was not released!"
@pytest.mark.remote_data(source="astropy")
def test_download_certificate_verification_failed():
"""Tests for https://github.com/astropy/astropy/pull/10434"""
# First test the expected exception when download fails due to a
# certificate verification error; we simulate this by passing a bogus
# CA directory to the ssl_context argument
ssl_context = {"cafile": None, "capath": "/does/not/exist"}
msg = f"Verification of TLS/SSL certificate at {TESTURL_SSL} failed"
with pytest.raises(urllib.error.URLError, match=msg):
download_file(TESTURL_SSL, cache=False, ssl_context=ssl_context)
with pytest.warns(AstropyWarning, match=msg) as warning_lines:
fnout = download_file(
TESTURL_SSL, cache=False, ssl_context=ssl_context, allow_insecure=True
)
assert len(warning_lines) == 1
assert os.path.isfile(fnout)
def test_download_cache_after_clear(tmp_path, temp_cache, valid_urls):
testurl, contents = next(valid_urls)
# Test issues raised in #4427 with clear_download_cache() without a URL,
# followed by subsequent download.
download_dir = _get_download_cache_loc()
fnout = download_file(testurl, cache=True)
assert os.path.isfile(fnout)
clear_download_cache()
assert not os.path.exists(fnout)
assert not os.path.exists(download_dir)
fnout = download_file(testurl, cache=True)
assert os.path.isfile(fnout)
@pytest.mark.remote_data(source="astropy")
def test_download_parallel_from_internet_works(temp_cache):
main_url = conf.dataurl
mirror_url = conf.dataurl_mirror
fileloc = "intersphinx/README"
urls = []
sources = {}
for s in ["", fileloc]:
urls.append(main_url + s)
sources[urls[-1]] = [urls[-1], mirror_url + s]
fnout = download_files_in_parallel(urls, sources=sources)
assert all(os.path.isfile(f) for f in fnout), fnout
@pytest.mark.parametrize("method", [None, "spawn"])
def test_download_parallel_fills_cache(tmp_path, valid_urls, method):
urls = []
# tmp_path is shared between many tests, and that can cause weird
# interactions if we set the temporary cache too directly
with paths.set_temp_cache(tmp_path):
for um, c in islice(valid_urls, FEW):
assert not is_url_in_cache(um)
urls.append((um, c))
rs = download_files_in_parallel(
[u for (u, c) in urls], multiprocessing_start_method=method
)
assert len(rs) == len(urls)
url_set = {u for (u, c) in urls}
assert url_set <= set(get_cached_urls())
for r, (u, c) in zip(rs, urls):
assert get_file_contents(r) == c
check_download_cache()
assert not url_set.intersection(get_cached_urls())
check_download_cache()
def test_download_parallel_with_empty_sources(valid_urls, temp_cache):
urls = []
sources = {}
for um, c in islice(valid_urls, FEW):
assert not is_url_in_cache(um)
urls.append((um, c))
rs = download_files_in_parallel([u for (u, c) in urls], sources=sources)
assert len(rs) == len(urls)
# u = set(u for (u, c) in urls)
# assert u <= set(get_cached_urls())
check_download_cache()
for r, (u, c) in zip(rs, urls):
assert get_file_contents(r) == c
def test_download_parallel_with_sources_and_bogus_original(
valid_urls, invalid_urls, temp_cache
):
u, c = next(valid_urls)
urls = [(u, c, None)]
sources = {}
for i, (um, c_bad) in enumerate(islice(valid_urls, FEW)):
assert not is_url_in_cache(um)
sources[um] = []
for iu in islice(invalid_urls, i):
sources[um].append(iu)
u, c = next(valid_urls)
sources[um].append(u)
urls.append((um, c, c_bad))
rs = download_files_in_parallel([u for (u, c, c_bad) in urls], sources=sources)
assert len(rs) == len(urls)
# u = set(u for (u, c, c_bad) in urls)
# assert u <= set(get_cached_urls())
for r, (u, c, c_bad) in zip(rs, urls):
assert get_file_contents(r) == c
assert get_file_contents(r) != c_bad
def test_download_parallel_many(temp_cache, valid_urls):
td = list(islice(valid_urls, N_PARALLEL_HAMMER))
r = download_files_in_parallel([u for (u, c) in td])
assert len(r) == len(td)
for r, (u, c) in zip(r, td):
assert get_file_contents(r) == c
def test_download_parallel_partial_success(temp_cache, valid_urls, invalid_urls):
"""Check that a partially successful download works.
Even in the presence of many requested URLs, presumably hitting all the
parallelism this system can manage, a download failure leads to a tidy
shutdown.
"""
td = list(islice(valid_urls, N_PARALLEL_HAMMER))
u_bad = next(invalid_urls)
with pytest.raises(urllib.request.URLError):
download_files_in_parallel([u_bad] + [u for (u, c) in td])
# Actually some files may get downloaded, others not.
# Is this good? Should we stubbornly keep trying?
# assert not any([is_url_in_cache(u) for (u, c) in td])
@pytest.mark.slow
def test_download_parallel_partial_success_lock_safe(
temp_cache, valid_urls, invalid_urls
):
"""Check that a partially successful parallel download leaves the cache unlocked.
This needs to be repeated many times because race conditions are what cause
this sort of thing, especially situations where a process might be forcibly
shut down while it holds the lock.
"""
s = random.getstate()
try:
random.seed(0)
for _ in range(N_PARALLEL_HAMMER):
td = list(islice(valid_urls, FEW))
u_bad = next(invalid_urls)
urls = [u_bad] + [u for (u, c) in td]
random.shuffle(urls)
with pytest.raises(urllib.request.URLError):
download_files_in_parallel(urls)
finally:
random.setstate(s)
def test_download_parallel_update(temp_cache, tmp_path):
td = []
for i in range(N_PARALLEL_HAMMER):
c = f"{i:04d}"
fn = tmp_path / c
with open(fn, "w") as f:
f.write(c)
u = url_to(fn)
clear_download_cache(u)
td.append((fn, u, c))
r1 = download_files_in_parallel([u for (fn, u, c) in td])
assert len(r1) == len(td)
for r_1, (fn, u, c) in zip(r1, td):
assert get_file_contents(r_1) == c
td2 = []
for fn, u, c in td:
c_plus = f"{c} updated"
fn = tmp_path / c
with open(fn, "w") as f:
f.write(c_plus)
td2.append((fn, u, c, c_plus))
r2 = download_files_in_parallel([u for (fn, u, c) in td], cache=True)
assert len(r2) == len(td)
for r_2, (fn, u, c, c_plus) in zip(r2, td2):
assert get_file_contents(r_2) == c
assert c != c_plus
r3 = download_files_in_parallel([u for (fn, u, c) in td], cache="update")
assert len(r3) == len(td)
for r_3, (fn, u, c, c_plus) in zip(r3, td2):
assert get_file_contents(r_3) != c
assert get_file_contents(r_3) == c_plus
@pytest.mark.skipif(
(sys.platform.startswith("win") and CI), reason="flaky cache error on Windows CI"
)
def test_update_parallel(temp_cache, valid_urls):
u, c = next(valid_urls)
u2, c2 = next(valid_urls)
f = download_file(u, cache=True)
assert get_file_contents(f) == c
def update(i):
return download_file(u, cache="update", sources=[u2])
with ThreadPoolExecutor(max_workers=N_THREAD_HAMMER) as P:
r = set(P.map(update, range(N_THREAD_HAMMER)))
check_download_cache()
for f in r:
assert get_file_contents(f) == c2
@pytest.mark.skipif(
(sys.platform.startswith("win") and CI), reason="flaky cache error on Windows CI"
)
def test_update_parallel_multi(temp_cache, valid_urls):
u, c = next(valid_urls)
iucs = list(islice(valid_urls, N_THREAD_HAMMER))
f = download_file(u, cache=True)
assert get_file_contents(f) == c
def update(uc):
u2, c2 = uc
return download_file(u, cache="update", sources=[u2]), c2
with ThreadPoolExecutor(max_workers=len(iucs)) as P:
r = list(P.map(update, iucs))
check_download_cache()
assert any(get_file_contents(f) == c for (f, c) in r)
@pytest.mark.remote_data(source="astropy")
def test_url_nocache():
with get_readable_fileobj(TESTURL, cache=False, encoding="utf-8") as page:
assert page.read().find("Astropy") > -1
def test_find_by_hash(valid_urls, temp_cache):
testurl, contents = next(valid_urls)
p = download_file(testurl, cache=True)
hash = compute_hash(p)
hashstr = "hash/" + hash
fnout = get_pkg_data_filename(hashstr)
assert os.path.isfile(fnout)
clear_download_cache(fnout)
assert not os.path.isfile(fnout)
@pytest.mark.remote_data(source="astropy")
def test_find_invalid():
# this is of course not a real data file and not on any remote server, but
# it should *try* to go to the remote server
with pytest.raises(urllib.error.URLError):
get_pkg_data_filename(
"kjfrhgjklahgiulrhgiuraehgiurhgiuhreglhurieghruelighiuerahiulruli"
)
@pytest.mark.parametrize("package", [None, "astropy", "numpy"])
def test_get_invalid(package):
"""Test can create a file path to an invalid file."""
path = get_pkg_data_path("kjfrhgjkla", "hgiulrhgiu", package=package)
assert not os.path.isfile(path)
assert not os.path.isdir(path)
# Package data functions
@pytest.mark.parametrize(
"filename", ["local.dat", "local.dat.gz", "local.dat.bz2", "local.dat.xz"]
)
def test_local_data_obj(filename):
if (not HAS_BZ2 and "bz2" in filename) or (not HAS_LZMA and "xz" in filename):
with pytest.raises(ValueError, match=r" format files are not supported"):
with get_pkg_data_fileobj(
os.path.join("data", filename), encoding="binary"
) as f:
f.readline()
# assert f.read().rstrip() == b'CONTENT'
else:
with get_pkg_data_fileobj(
os.path.join("data", filename), encoding="binary"
) as f:
f.readline()
assert f.read().rstrip() == b"CONTENT"
@pytest.fixture(params=["invalid.dat.bz2", "invalid.dat.gz"])
def bad_compressed(request, tmp_path):
# These contents have valid headers for their respective file formats, but
# are otherwise malformed and invalid.
bz_content = b"BZhinvalid"
gz_content = b"\x1f\x8b\x08invalid"
datafile = tmp_path / request.param
filename = str(datafile)
if filename.endswith(".bz2"):
contents = bz_content
elif filename.endswith(".gz"):
contents = gz_content
else:
contents = "invalid"
datafile.write_bytes(contents)
return filename
def test_local_data_obj_invalid(bad_compressed):
is_bz2 = bad_compressed.endswith(".bz2")
is_xz = bad_compressed.endswith(".xz")
# Note, since these invalid files are created on the fly in order to avoid
# problems with detection by antivirus software
# (see https://github.com/astropy/astropy/issues/6520), it is no longer
# possible to use ``get_pkg_data_fileobj`` to read the files. Technically,
# they're not local anymore: they just live in a temporary directory
# created by pytest. However, we can still use get_readable_fileobj for the
# test.
if (not HAS_BZ2 and is_bz2) or (not HAS_LZMA and is_xz):
with pytest.raises(
ModuleNotFoundError, match=r"does not provide the [lb]z[2m]a? module\."
):
with get_readable_fileobj(bad_compressed, encoding="binary") as f:
f.read()
else:
with get_readable_fileobj(bad_compressed, encoding="binary") as f:
assert f.read().rstrip().endswith(b"invalid")
def test_local_data_name():
assert os.path.isfile(TESTLOCAL) and TESTLOCAL.endswith("local.dat")
# TODO: if in the future, the root data/ directory is added in, the below
# test should be uncommented and the README.rst should be replaced with
# whatever file is there
# get something in the astropy root
# fnout2 = get_pkg_data_filename('../../data/README.rst')
# assert os.path.isfile(fnout2) and fnout2.endswith('README.rst')
def test_data_name_third_party_package():
"""Regression test for issue #1256
Tests that `get_pkg_data_filename` works in a third-party package that
doesn't make any relative imports from the module it's used from.
Uses a test package under ``data/test_package``.
"""
# Get the actual data dir:
data_dir = os.path.join(os.path.dirname(__file__), "data")
sys.path.insert(0, data_dir)
try:
import test_package
filename = test_package.get_data_filename()
assert os.path.normcase(filename) == (
os.path.normcase(os.path.join(data_dir, "test_package", "data", "foo.txt"))
)
finally:
sys.path.pop(0)
def test_local_data_nonlocalfail():
# this would go *outside* the astropy tree
with pytest.raises(RuntimeError):
get_pkg_data_filename("../../../data/README.rst")
def test_compute_hash(tmp_path):
rands = b"1234567890abcdefghijklmnopqrstuvwxyz"
filename = tmp_path / "tmp.dat"
with open(filename, "wb") as ntf:
ntf.write(rands)
ntf.flush()
chhash = compute_hash(filename)
shash = hashlib.md5(rands).hexdigest()
assert chhash == shash
def test_get_pkg_data_contents():
with get_pkg_data_fileobj("data/local.dat") as f:
contents1 = f.read()
contents2 = get_pkg_data_contents("data/local.dat")
assert contents1 == contents2
@pytest.mark.remote_data(source="astropy")
def test_data_noastropy_fallback(monkeypatch):
"""
Tests to make sure the default behavior when the cache directory can't
be located is correct
"""
# better yet, set the configuration to make sure the temp files are deleted
conf.delete_temporary_downloads_at_exit = True
# make sure the config and cache directories are not searched
monkeypatch.setenv("XDG_CONFIG_HOME", "foo")
monkeypatch.delenv("XDG_CONFIG_HOME")
monkeypatch.setenv("XDG_CACHE_HOME", "bar")
monkeypatch.delenv("XDG_CACHE_HOME")
monkeypatch.setattr(paths.set_temp_config, "_temp_path", None)
monkeypatch.setattr(paths.set_temp_cache, "_temp_path", None)
# make sure the _find_or_create_astropy_dir function fails as though the
# astropy dir could not be accessed
def osraiser(dirnm, linkto, pkgname=None):
raise OSError()
monkeypatch.setattr(paths, "_find_or_create_root_dir", osraiser)
with pytest.raises(OSError):
# make sure the config dir search fails
paths.get_cache_dir(rootname="astropy")
with pytest.warns(CacheMissingWarning) as warning_lines:
fnout = download_file(TESTURL, cache=True)
n_warns = len(warning_lines)
partial_warn_msgs = ["remote data cache could not be accessed", "temporary file"]
if n_warns == 4:
partial_warn_msgs.extend(["socket", "socket"])
for wl in warning_lines:
cur_w = str(wl).lower()
for i, partial_msg in enumerate(partial_warn_msgs):
if partial_msg in cur_w:
del partial_warn_msgs[i]
break
assert (
len(partial_warn_msgs) == 0
), f"Got some unexpected warnings: {partial_warn_msgs}"
assert n_warns in (2, 4), f"Expected 2 or 4 warnings, got {n_warns}"
assert os.path.isfile(fnout)
# clearing the cache should be a no-up that doesn't affect fnout
with pytest.warns(
CacheMissingWarning, match=r".*Not clearing data cache - cache inaccessible.*"
):
clear_download_cache(TESTURL)
assert os.path.isfile(fnout)
# now remove it so tests don't clutter up the temp dir this should get
# called at exit, anyway, but we do it here just to make sure it's working
# correctly
_deltemps()
assert not os.path.isfile(fnout)
# now try with no cache
fnnocache = download_file(TESTURL, cache=False)
with open(fnnocache, "rb") as page:
assert page.read().decode("utf-8").find("Astropy") > -1
# no warnings should be raise in fileobj because cache is unnecessary
@pytest.mark.parametrize(
"filename",
[
"unicode.txt",
"unicode.txt.gz",
pytest.param(
"unicode.txt.bz2",
marks=pytest.mark.xfail(not HAS_BZ2, reason="no bz2 support"),
),
pytest.param(
"unicode.txt.xz",
marks=pytest.mark.xfail(not HAS_LZMA, reason="no lzma support"),
),
],
)
def test_read_unicode(filename):
contents = get_pkg_data_contents(os.path.join("data", filename), encoding="utf-8")
assert isinstance(contents, str)
contents = contents.splitlines()[1]
assert contents == "האסטרונומי פייתון"
contents = get_pkg_data_contents(os.path.join("data", filename), encoding="binary")
assert isinstance(contents, bytes)
x = contents.splitlines()[1]
# fmt: off
assert x == (
b"\xff\xd7\x94\xd7\x90\xd7\xa1\xd7\x98\xd7\xa8\xd7\x95\xd7\xa0\xd7\x95"
b"\xd7\x9e\xd7\x99 \xd7\xa4\xd7\x99\xd7\x99\xd7\xaa\xd7\x95\xd7\x9f"[1:]
)
# fmt: on
def test_compressed_stream():
gzipped_data = (
b"H4sICIxwG1AAA2xvY2FsLmRhdAALycgsVkjLzElVANKlxakpCpl5CiUZqQ"
b"olqcUl8Tn5yYk58SmJJYnxWmCRzLx0hbTSvOSSzPy8Yi5nf78QV78QLgAlLytnRQAAAA=="
)
gzipped_data = base64.b64decode(gzipped_data)
assert isinstance(gzipped_data, bytes)
class FakeStream:
"""
A fake stream that has `read`, but no `seek`.
"""
def __init__(self, data):
self.data = data
def read(self, nbytes=None):
if nbytes is None:
result = self.data
self.data = b""
else:
result = self.data[:nbytes]
self.data = self.data[nbytes:]
return result
stream = FakeStream(gzipped_data)
with get_readable_fileobj(stream, encoding="binary") as f:
f.readline()
assert f.read().rstrip() == b"CONTENT"
@pytest.mark.remote_data(source="astropy")
def test_invalid_location_download_raises_urlerror():
"""
checks that download_file gives a URLError and not an AttributeError,
as its code pathway involves some fiddling with the exception.
"""
with pytest.raises(urllib.error.URLError):
download_file("http://www.astropy.org/nonexistentfile")
def test_invalid_location_download_noconnect():
"""
checks that download_file gives an OSError if the socket is blocked
"""
# This should invoke socket's monkeypatched failure
with pytest.raises(OSError):
download_file("http://astropy.org/nonexistentfile")
@pytest.mark.remote_data(source="astropy")
def test_is_url_in_cache_remote():
assert not is_url_in_cache("http://astropy.org/nonexistentfile")
download_file(TESTURL, cache=True, show_progress=False)
assert is_url_in_cache(TESTURL)
def test_is_url_in_cache_local(temp_cache, valid_urls, invalid_urls):
testurl, contents = next(valid_urls)
nonexistent = next(invalid_urls)
assert not is_url_in_cache(testurl)
assert not is_url_in_cache(nonexistent)
download_file(testurl, cache=True, show_progress=False)
assert is_url_in_cache(testurl)
assert not is_url_in_cache(nonexistent)
# If non-deterministic failure happens see
# https://github.com/astropy/astropy/issues/9765
def test_check_download_cache(tmp_path, temp_cache, valid_urls, invalid_urls):
testurl, testurl_contents = next(valid_urls)
testurl2, testurl2_contents = next(valid_urls)
zip_file_name = tmp_path / "the.zip"
clear_download_cache()
assert not check_download_cache()
download_file(testurl, cache=True)
check_download_cache()
download_file(testurl2, cache=True)
check_download_cache()
export_download_cache(zip_file_name, [testurl, testurl2])
check_download_cache()
clear_download_cache(testurl2)
check_download_cache()
import_download_cache(zip_file_name, [testurl])
check_download_cache()
def test_export_import_roundtrip_one(tmp_path, temp_cache, valid_urls):
testurl, contents = next(valid_urls)
f = download_file(testurl, cache=True, show_progress=False)
assert get_file_contents(f) == contents
initial_urls_in_cache = set(get_cached_urls())
zip_file_name = tmp_path / "the.zip"
export_download_cache(zip_file_name, [testurl])
clear_download_cache(testurl)
import_download_cache(zip_file_name)
assert is_url_in_cache(testurl)
assert set(get_cached_urls()) == initial_urls_in_cache
assert (
get_file_contents(download_file(testurl, cache=True, show_progress=False))
== contents
)
def test_export_url_not_present(temp_cache, valid_urls):
testurl, contents = next(valid_urls)
with NamedTemporaryFile("wb") as zip_file:
assert not is_url_in_cache(testurl)
with pytest.raises(KeyError):
export_download_cache(zip_file, [testurl])
def test_import_one(tmp_path, temp_cache, valid_urls):
testurl, testurl_contents = next(valid_urls)
testurl2, testurl2_contents = next(valid_urls)
zip_file_name = tmp_path / "the.zip"
download_file(testurl, cache=True)
download_file(testurl2, cache=True)
assert is_url_in_cache(testurl2)
export_download_cache(zip_file_name, [testurl, testurl2])
clear_download_cache(testurl)
clear_download_cache(testurl2)
import_download_cache(zip_file_name, [testurl])
assert is_url_in_cache(testurl)
assert not is_url_in_cache(testurl2)
def test_export_import_roundtrip(tmp_path, temp_cache, valid_urls):
zip_file_name = tmp_path / "the.zip"
for u, c in islice(valid_urls, FEW):
download_file(u, cache=True)
initial_urls_in_cache = set(get_cached_urls())
export_download_cache(zip_file_name)
clear_download_cache()
import_download_cache(zip_file_name)
assert set(get_cached_urls()) == initial_urls_in_cache
def test_export_import_roundtrip_stream(temp_cache, valid_urls):
for u, c in islice(valid_urls, FEW):
download_file(u, cache=True)
initial_urls_in_cache = set(get_cached_urls())
with io.BytesIO() as f:
export_download_cache(f)
b = f.getvalue()
clear_download_cache()
with io.BytesIO(b) as f:
import_download_cache(f)
assert set(get_cached_urls()) == initial_urls_in_cache
def test_export_overwrite_flag_works(temp_cache, valid_urls, tmp_path):
fn = tmp_path / "f.zip"
c = b"Some contents\nto check later"
with open(fn, "wb") as f:
f.write(c)
for u, _ in islice(valid_urls, FEW):
download_file(u, cache=True)
with pytest.raises(FileExistsError):
export_download_cache(fn)
assert get_file_contents(fn, encoding="binary") == c
export_download_cache(fn, overwrite=True)
assert get_file_contents(fn, encoding="binary") != c
def test_export_import_roundtrip_different_location(tmp_path, valid_urls):
original_cache = tmp_path / "original"
original_cache.mkdir()
zip_file_name = tmp_path / "the.zip"
urls = list(islice(valid_urls, FEW))
initial_urls_in_cache = {u for (u, c) in urls}
with paths.set_temp_cache(original_cache):
for u, c in urls:
download_file(u, cache=True)
assert set(get_cached_urls()) == initial_urls_in_cache
export_download_cache(zip_file_name)
new_cache = tmp_path / "new"
new_cache.mkdir()
with paths.set_temp_cache(new_cache):
import_download_cache(zip_file_name)
check_download_cache()
assert set(get_cached_urls()) == initial_urls_in_cache
for u, c in urls:
assert get_file_contents(download_file(u, cache=True)) == c
def test_cache_size_is_zero_when_empty(temp_cache):
assert not get_cached_urls()
assert cache_total_size() == 0
def test_cache_size_changes_correctly_when_files_are_added_and_removed(
temp_cache, valid_urls
):
u, c = next(valid_urls)
clear_download_cache(u)
s_i = cache_total_size()
download_file(u, cache=True)
assert cache_total_size() == s_i + len(c) + len(u.encode("utf-8"))
clear_download_cache(u)
assert cache_total_size() == s_i
def test_cache_contents_agrees_with_get_urls(temp_cache, valid_urls):
r = []
for a, a_c in islice(valid_urls, FEW):
a_f = download_file(a, cache=True)
r.append((a, a_c, a_f))
assert set(cache_contents().keys()) == set(get_cached_urls())
for u, c, h in r:
assert cache_contents()[u] == h
@pytest.mark.parametrize("desired_size", [1_000_000_000_000_000_000, 1 * _u.Ebyte])
def test_free_space_checker_huge(tmp_path, desired_size):
with pytest.raises(OSError):
check_free_space_in_dir(tmp_path, desired_size)
def test_get_free_space_file_directory(tmp_path):
fn = tmp_path / "file"
with open(fn, "w"):
pass
with pytest.raises(OSError):
get_free_space_in_dir(fn)
free_space = get_free_space_in_dir(tmp_path)
assert free_space > 0 and not hasattr(free_space, "unit")
# TODO: If unit=True starts to auto-guess prefix, this needs updating.
free_space = get_free_space_in_dir(tmp_path, unit=True)
assert free_space > 0 and free_space.unit == _u.byte
free_space = get_free_space_in_dir(tmp_path, unit=_u.Mbit)
assert free_space > 0 and free_space.unit == _u.Mbit
def test_download_file_bogus_settings(invalid_urls, temp_cache):
u = next(invalid_urls)
with pytest.raises(KeyError):
download_file(u, sources=[])
def test_download_file_local_directory(tmp_path):
"""Make sure we get a URLError rather than OSError even if it's a
local directory."""
with pytest.raises(urllib.request.URLError):
download_file(url_to(tmp_path))
def test_download_file_schedules_deletion(valid_urls):
u, c = next(valid_urls)
f = download_file(u)
assert f in _tempfilestodel
# how to test deletion actually occurs?
def test_clear_download_cache_refuses_to_delete_outside_the_cache(tmp_path):
fn = str(tmp_path / "file")
with open(fn, "w") as f:
f.write("content")
assert os.path.exists(fn)
with pytest.raises(RuntimeError):
clear_download_cache(fn)
assert os.path.exists(fn)
def test_check_download_cache_finds_bogus_entries(temp_cache, valid_urls):
u, c = next(valid_urls)
download_file(u, cache=True)
dldir = _get_download_cache_loc()
bf = os.path.abspath(os.path.join(dldir, "bogus"))
with open(bf, "w") as f:
f.write("bogus file that exists")
with pytest.raises(CacheDamaged) as e:
check_download_cache()
assert bf in e.value.bad_files
clear_download_cache()
def test_check_download_cache_finds_bogus_subentries(temp_cache, valid_urls):
u, c = next(valid_urls)
f = download_file(u, cache=True)
bf = os.path.abspath(os.path.join(os.path.dirname(f), "bogus"))
with open(bf, "w") as f:
f.write("bogus file that exists")
with pytest.raises(CacheDamaged) as e:
check_download_cache()
assert bf in e.value.bad_files
clear_download_cache()
def test_check_download_cache_cleanup(temp_cache, valid_urls):
u, c = next(valid_urls)
fn = download_file(u, cache=True)
dldir = _get_download_cache_loc()
bf1 = os.path.abspath(os.path.join(dldir, "bogus1"))
with open(bf1, "w") as f:
f.write("bogus file that exists")
bf2 = os.path.abspath(os.path.join(os.path.dirname(fn), "bogus2"))
with open(bf2, "w") as f:
f.write("other bogus file that exists")
bf3 = os.path.abspath(os.path.join(dldir, "contents"))
with open(bf3, "w") as f:
f.write("awkwardly-named bogus file that exists")
u2, c2 = next(valid_urls)
f2 = download_file(u, cache=True)
os.unlink(f2)
bf4 = os.path.dirname(f2)
with pytest.raises(CacheDamaged) as e:
check_download_cache()
assert set(e.value.bad_files) == {bf1, bf2, bf3, bf4}
for bf in e.value.bad_files:
clear_download_cache(bf)
# download cache will be checked on exit
def test_download_cache_update_doesnt_damage_cache(temp_cache, valid_urls):
u, _ = next(valid_urls)
download_file(u, cache=True)
download_file(u, cache="update")
def test_cache_dir_is_actually_a_file(tmp_path, valid_urls):
"""Ensure that bogus cache settings are handled sensibly.
Because the user can specify the cache location in a config file, and
because they might try to deduce the location by looking around at what's
in their directory tree, and because the cache directory is actual several
tree levels down from the directory set in the config file, it's important
to check what happens if each of the steps in the path is wrong somehow.
"""
def check_quietly_ignores_bogus_cache():
"""We want a broken cache to produce a warning but then astropy should
act like there isn't a cache.
"""
with pytest.warns(CacheMissingWarning):
assert not get_cached_urls()
with pytest.warns(CacheMissingWarning):
assert not is_url_in_cache("http://www.example.com/")
with pytest.warns(CacheMissingWarning):
assert not cache_contents()
with pytest.warns(CacheMissingWarning):
u, c = next(valid_urls)
r = download_file(u, cache=True)
assert get_file_contents(r) == c
# check the filename r appears in a warning message?
# check r is added to the delete_at_exit list?
# in fact should there be testing of the delete_at_exit mechanism,
# as far as that is possible?
with pytest.warns(CacheMissingWarning):
assert not is_url_in_cache(u)
with pytest.warns(CacheMissingWarning):
with pytest.raises(OSError):
check_download_cache()
dldir = _get_download_cache_loc()
# set_temp_cache acts weird if it is pointed at a file (see below)
# but we want to see what happens when the cache is pointed
# at a file instead of a directory, so make a directory we can
# replace later.
fn = tmp_path / "file"
ct = "contents\n"
os.mkdir(fn)
with paths.set_temp_cache(fn):
shutil.rmtree(fn)
with open(fn, "w") as f:
f.write(ct)
with pytest.raises(OSError):
paths.get_cache_dir()
check_quietly_ignores_bogus_cache()
assert dldir == _get_download_cache_loc()
assert get_file_contents(fn) == ct, "File should not be harmed."
# See what happens when set_temp_cache is pointed at a file
with pytest.raises(OSError):
with paths.set_temp_cache(fn):
pass
assert dldir == _get_download_cache_loc()
assert get_file_contents(str(fn)) == ct
# Now the cache directory is normal but the subdirectory it wants
# to make is a file
cd = tmp_path / "astropy"
with open(cd, "w") as f:
f.write(ct)
with paths.set_temp_cache(tmp_path):
check_quietly_ignores_bogus_cache()
assert dldir == _get_download_cache_loc()
assert get_file_contents(cd) == ct
os.remove(cd)
# Ditto one level deeper
os.makedirs(cd)
cd = tmp_path / "astropy" / "download"
with open(cd, "w") as f:
f.write(ct)
with paths.set_temp_cache(tmp_path):
check_quietly_ignores_bogus_cache()
assert dldir == _get_download_cache_loc()
assert get_file_contents(cd) == ct
os.remove(cd)
# Ditto another level deeper
os.makedirs(cd)
cd = tmp_path / "astropy" / "download" / "url"
with open(cd, "w") as f:
f.write(ct)
with paths.set_temp_cache(tmp_path):
check_quietly_ignores_bogus_cache()
assert dldir == _get_download_cache_loc()
assert get_file_contents(cd) == ct
os.remove(cd)
def test_get_fileobj_str(a_file):
fn, c = a_file
with get_readable_fileobj(str(fn)) as rf:
assert rf.read() == c
def test_get_fileobj_localpath(a_file):
fn, c = a_file
with get_readable_fileobj(py.path.local(fn)) as rf:
assert rf.read() == c
def test_get_fileobj_pathlib(a_file):
fn, c = a_file
with get_readable_fileobj(pathlib.Path(fn)) as rf:
assert rf.read() == c
def test_get_fileobj_binary(a_binary_file):
fn, c = a_binary_file
with get_readable_fileobj(fn, encoding="binary") as rf:
assert rf.read() == c
def test_get_fileobj_already_open_text(a_file):
fn, c = a_file
with open(fn) as f:
with get_readable_fileobj(f) as rf:
with pytest.raises(TypeError):
rf.read()
def test_get_fileobj_already_open_binary(a_file):
fn, c = a_file
with open(fn, "rb") as f:
with get_readable_fileobj(f) as rf:
assert rf.read() == c
def test_get_fileobj_binary_already_open_binary(a_binary_file):
fn, c = a_binary_file
with open(fn, "rb") as f:
with get_readable_fileobj(f, encoding="binary") as rf:
assert rf.read() == c
def test_cache_contents_not_writable(temp_cache, valid_urls):
c = cache_contents()
with pytest.raises(TypeError):
c["foo"] = 7
u, _ = next(valid_urls)
download_file(u, cache=True)
c = cache_contents()
assert u in c
with pytest.raises(TypeError):
c["foo"] = 7
def test_cache_relocatable(tmp_path, valid_urls):
u, c = next(valid_urls)
d1 = tmp_path / "1"
d2 = tmp_path / "2"
os.mkdir(d1)
with paths.set_temp_cache(d1):
p1 = download_file(u, cache=True)
assert is_url_in_cache(u)
assert get_file_contents(p1) == c
shutil.copytree(d1, d2)
clear_download_cache()
with paths.set_temp_cache(d2):
assert is_url_in_cache(u)
p2 = download_file(u, cache=True)
assert p1 != p2
assert os.path.exists(p2)
clear_download_cache(p2)
check_download_cache()
def test_get_readable_fileobj_cleans_up_temporary_files(tmp_path, monkeypatch):
"""checks that get_readable_fileobj leaves no temporary files behind"""
# Create a 'file://' URL pointing to a path on the local filesystem
url = url_to(TESTLOCAL)
# Save temporary files to a known location
monkeypatch.setattr(tempfile, "tempdir", str(tmp_path))
# Call get_readable_fileobj() as a context manager
with get_readable_fileobj(url) as f:
f.read()
# Get listing of files in temporary directory
tempdir_listing = list(tmp_path.iterdir())
# Assert that the temporary file was empty after get_readable_fileobj()
# context manager finished running
assert len(tempdir_listing) == 0
def test_path_objects_get_readable_fileobj():
fpath = pathlib.Path(TESTLOCAL)
with get_readable_fileobj(fpath) as f:
assert (
f.read().rstrip()
== "This file is used in the test_local_data_* testing functions\nCONTENT"
)
def test_nested_get_readable_fileobj():
"""Ensure fileobj state is as expected when get_readable_fileobj()
is called inside another get_readable_fileobj().
"""
with get_readable_fileobj(TESTLOCAL, encoding="binary") as fileobj:
with get_readable_fileobj(fileobj, encoding="UTF-8") as fileobj2:
fileobj2.seek(1)
fileobj.seek(1)
# Theoretically, fileobj2 should be closed already here but it is not.
# See https://github.com/astropy/astropy/pull/8675.
# UNCOMMENT THIS WHEN PYTHON FINALLY LETS IT HAPPEN.
# assert fileobj2.closed
assert fileobj.closed and fileobj2.closed
def test_download_file_wrong_size(monkeypatch):
@contextlib.contextmanager
def mockurl(remote_url, timeout=None):
yield MockURL()
def mockurl_builder(*args, tlscontext=None, **kwargs):
mock_opener = type("MockOpener", (object,), {})()
mock_opener.open = mockurl
return mock_opener
class MockURL:
def __init__(self):
self.reader = io.BytesIO(b"a" * real_length)
def info(self):
return {"Content-Length": str(report_length)}
def read(self, length=None):
return self.reader.read(length)
monkeypatch.setattr(astropy.utils.data, "_build_urlopener", mockurl_builder)
with pytest.raises(urllib.error.ContentTooShortError):
report_length = 1024
real_length = 1023
download_file(TESTURL, cache=False)
with pytest.raises(urllib.error.URLError):
report_length = 1023
real_length = 1024
download_file(TESTURL, cache=False)
report_length = 1023
real_length = 1023
fn = download_file(TESTURL, cache=False)
with open(fn, "rb") as f:
assert f.read() == b"a" * real_length
report_length = None
real_length = 1023
fn = download_file(TESTURL, cache=False)
with open(fn, "rb") as f:
assert f.read() == b"a" * real_length
def test_can_make_directories_readonly(tmp_path):
try:
with readonly_dir(tmp_path):
assert is_dir_readonly(tmp_path)
except AssertionError:
if hasattr(os, "geteuid") and os.geteuid() == 0:
pytest.skip(
"We are root, we can't make a directory un-writable with chmod."
)
elif platform.system() == "Windows":
pytest.skip(
"It seems we can't make a driectory un-writable under Windows "
"with chmod, in spite of the documentation."
)
else:
raise
def test_can_make_files_readonly(tmp_path):
fn = tmp_path / "test"
c = "contents\n"
with open(fn, "w") as f:
f.write(c)
with readonly_dir(tmp_path):
try:
with open(fn, "w+") as f:
f.write("more contents\n")
except PermissionError:
pass
else:
if hasattr(os, "geteuid") and os.geteuid() == 0:
pytest.skip("We are root, we can't make a file un-writable with chmod.")
assert get_file_contents(fn) == c
def test_read_cache_readonly(readonly_cache):
assert cache_contents() == readonly_cache
def test_download_file_cache_readonly(readonly_cache):
for u in readonly_cache:
f = download_file(u, cache=True)
assert f == readonly_cache[u]
def test_import_file_cache_readonly(readonly_cache, tmp_path):
filename = tmp_path / "test-file"
content = "Some text or other"
url = "http://example.com/"
with open(filename, "w") as f:
f.write(content)
with pytest.raises(OSError):
import_file_to_cache(url, filename, remove_original=True)
assert not is_url_in_cache(url)
def test_import_file_cache_invalid_cross_device_link(tmp_path, monkeypatch):
def no_rename(path, mode=None):
if os.path.exists(path):
raise OSError(errno.EXDEV, "os.rename monkeypatched out")
else:
raise FileNotFoundError(f"File {path} does not exist.")
monkeypatch.setattr(os, "rename", no_rename)
filename = tmp_path / "test-file"
content = "Some text or other"
url = "http://example.com/"
with open(filename, "w") as f:
f.write(content)
with pytest.warns(AstropyWarning, match="os.rename monkeypatched out"):
import_file_to_cache(url, filename, remove_original=True, replace=True)
assert is_url_in_cache(url)
def test_download_file_cache_readonly_cache_miss(readonly_cache, valid_urls):
u, c = next(valid_urls)
with pytest.warns(CacheMissingWarning):
f = download_file(u, cache=True)
assert get_file_contents(f) == c
assert not is_url_in_cache(u)
def test_download_file_cache_readonly_update(readonly_cache):
for u in readonly_cache:
with pytest.warns(CacheMissingWarning):
f = download_file(u, cache="update")
assert f != readonly_cache[u]
assert compute_hash(f) == compute_hash(readonly_cache[u])
def test_check_download_cache_works_if_readonly(readonly_cache):
check_download_cache()
# On Windows I can't make directories readonly. On CircleCI I can't make
# anything readonly because the test suite runs as root. So on those platforms
# none of the "real" tests above can be run. I can use monkeypatch to trigger
# the readonly code paths, see the "fake" versions of the tests below, but I
# don't totally trust those to completely explore what happens either, so we
# have both. I couldn't see an easy way to parameterize over fixtures and share
# tests.
def test_read_cache_fake_readonly(fake_readonly_cache):
assert cache_contents() == fake_readonly_cache
def test_download_file_cache_fake_readonly(fake_readonly_cache):
for u in fake_readonly_cache:
f = download_file(u, cache=True)
assert f == fake_readonly_cache[u]
def test_mkdtemp_cache_fake_readonly(fake_readonly_cache):
with pytest.raises(OSError):
tempfile.mkdtemp()
def test_TD_cache_fake_readonly(fake_readonly_cache):
with pytest.raises(OSError):
with TemporaryDirectory():
pass
def test_import_file_cache_fake_readonly(fake_readonly_cache, tmp_path):
filename = tmp_path / "test-file"
content = "Some text or other"
url = "http://example.com/"
with open(filename, "w") as f:
f.write(content)
with pytest.raises(OSError):
import_file_to_cache(url, filename, remove_original=True)
assert not is_url_in_cache(url)
def test_download_file_cache_fake_readonly_cache_miss(fake_readonly_cache, valid_urls):
u, c = next(valid_urls)
with pytest.warns(CacheMissingWarning):
f = download_file(u, cache=True)
assert not is_url_in_cache(u)
assert get_file_contents(f) == c
def test_download_file_cache_fake_readonly_update(fake_readonly_cache):
for u in fake_readonly_cache:
with pytest.warns(CacheMissingWarning):
f = download_file(u, cache="update")
assert f != fake_readonly_cache[u]
assert compute_hash(f) == compute_hash(fake_readonly_cache[u])
def test_check_download_cache_works_if_fake_readonly(fake_readonly_cache):
check_download_cache()
def test_pkgname_isolation(temp_cache, valid_urls):
a = "bogus_cache_name"
assert not get_cached_urls()
assert not get_cached_urls(pkgname=a)
for u, _ in islice(valid_urls, FEW):
download_file(u, cache=True, pkgname=a)
assert not get_cached_urls()
assert len(get_cached_urls(pkgname=a)) == FEW
assert cache_total_size() < cache_total_size(pkgname=a)
for u, _ in islice(valid_urls, FEW + 1):
download_file(u, cache=True)
assert len(get_cached_urls()) == FEW + 1
assert len(get_cached_urls(pkgname=a)) == FEW
assert cache_total_size() > cache_total_size(pkgname=a)
assert set(get_cached_urls()) == set(cache_contents().keys())
assert set(get_cached_urls(pkgname=a)) == set(cache_contents(pkgname=a).keys())
for i in get_cached_urls():
assert is_url_in_cache(i)
assert not is_url_in_cache(i, pkgname=a)
for i in get_cached_urls(pkgname=a):
assert not is_url_in_cache(i)
assert is_url_in_cache(i, pkgname=a)
# FIXME: need to break a cache to test whether we check the right one
check_download_cache()
check_download_cache(pkgname=a)
# FIXME: check that cache='update' works
u = get_cached_urls()[0]
with pytest.raises(KeyError):
download_file(u, cache=True, sources=[], pkgname=a)
clear_download_cache(u, pkgname=a)
assert len(get_cached_urls()) == FEW + 1, "wrong pkgname should do nothing"
assert len(get_cached_urls(pkgname=a)) == FEW, "wrong pkgname should do nothing"
f = download_file(u, sources=[], cache=True)
with pytest.raises(RuntimeError):
clear_download_cache(f, pkgname=a)
ua = get_cached_urls(pkgname=a)[0]
with pytest.raises(KeyError):
download_file(ua, cache=True, sources=[])
fa = download_file(ua, sources=[], cache=True, pkgname=a)
with pytest.raises(RuntimeError):
clear_download_cache(fa)
clear_download_cache(ua, pkgname=a)
assert len(get_cached_urls()) == FEW + 1
assert len(get_cached_urls(pkgname=a)) == FEW - 1
clear_download_cache(u)
assert len(get_cached_urls()) == FEW
assert len(get_cached_urls(pkgname=a)) == FEW - 1
clear_download_cache(pkgname=a)
assert len(get_cached_urls()) == FEW
assert not get_cached_urls(pkgname=a)
clear_download_cache()
assert not get_cached_urls()
assert not get_cached_urls(pkgname=a)
def test_transport_cache_via_zip(temp_cache, valid_urls):
a = "bogus_cache_name"
assert not get_cached_urls()
assert not get_cached_urls(pkgname=a)
for u, _ in islice(valid_urls, FEW):
download_file(u, cache=True)
with io.BytesIO() as f:
export_download_cache(f)
b = f.getvalue()
with io.BytesIO(b) as f:
import_download_cache(f, pkgname=a)
check_download_cache()
check_download_cache(pkgname=a)
assert set(get_cached_urls()) == set(get_cached_urls(pkgname=a))
cca = cache_contents(pkgname=a)
for k, v in cache_contents().items():
assert v != cca[k]
assert get_file_contents(v) == get_file_contents(cca[k])
clear_download_cache()
with io.BytesIO() as f:
export_download_cache(f, pkgname=a)
b = f.getvalue()
with io.BytesIO(b) as f:
import_download_cache(f)
assert set(get_cached_urls()) == set(get_cached_urls(pkgname=a))
def test_download_parallel_respects_pkgname(temp_cache, valid_urls):
a = "bogus_cache_name"
assert not get_cached_urls()
assert not get_cached_urls(pkgname=a)
download_files_in_parallel([u for (u, c) in islice(valid_urls, FEW)], pkgname=a)
assert not get_cached_urls()
assert len(get_cached_urls(pkgname=a)) == FEW
@pytest.mark.skipif(
not CAN_RENAME_DIRECTORY_IN_USE,
reason="This platform is unable to rename directories that are in use.",
)
def test_removal_of_open_files(temp_cache, valid_urls):
u, c = next(valid_urls)
with open(download_file(u, cache=True)):
clear_download_cache(u)
assert not is_url_in_cache(u)
check_download_cache()
@pytest.mark.skipif(
not CAN_RENAME_DIRECTORY_IN_USE,
reason="This platform is unable to rename directories that are in use.",
)
def test_update_of_open_files(temp_cache, valid_urls):
u, c = next(valid_urls)
with open(download_file(u, cache=True)):
u2, c2 = next(valid_urls)
f = download_file(u, cache="update", sources=[u2])
check_download_cache()
assert is_url_in_cache(u)
assert get_file_contents(f) == c2
assert is_url_in_cache(u)
def test_removal_of_open_files_windows(temp_cache, valid_urls, monkeypatch):
def no_rmtree(*args, **kwargs):
warnings.warn(CacheMissingWarning("in use"))
raise PermissionError
if CAN_RENAME_DIRECTORY_IN_USE:
# This platform is able to remove files while in use.
monkeypatch.setattr(astropy.utils.data, "_rmtree", no_rmtree)
u, c = next(valid_urls)
with open(download_file(u, cache=True)):
with pytest.warns(CacheMissingWarning, match=r".*in use.*"):
clear_download_cache(u)
def test_update_of_open_files_windows(temp_cache, valid_urls, monkeypatch):
def no_rmtree(*args, **kwargs):
warnings.warn(CacheMissingWarning("in use"))
raise PermissionError
if CAN_RENAME_DIRECTORY_IN_USE:
# This platform is able to remove files while in use.
monkeypatch.setattr(astropy.utils.data, "_rmtree", no_rmtree)
u, c = next(valid_urls)
with open(download_file(u, cache=True)):
u2, c2 = next(valid_urls)
with pytest.warns(CacheMissingWarning, match=r".*in use.*"):
f = download_file(u, cache="update", sources=[u2])
check_download_cache()
assert is_url_in_cache(u)
assert get_file_contents(f) == c2
assert get_file_contents(download_file(u, cache=True, sources=[])) == c
def test_no_allow_internet(temp_cache, valid_urls):
u, c = next(valid_urls)
with conf.set_temp("allow_internet", False):
with pytest.raises(urllib.error.URLError):
download_file(u)
assert not is_url_in_cache(u)
with pytest.raises(urllib.error.URLError):
# This will trigger the remote data error if it's allowed to touch the internet
download_file(TESTURL)
def test_clear_download_cache_not_too_aggressive(temp_cache, valid_urls):
u, c = next(valid_urls)
download_file(u, cache=True)
dldir = _get_download_cache_loc()
bad_filename = os.path.join(dldir, "contents")
assert is_url_in_cache(u)
clear_download_cache(bad_filename)
assert is_url_in_cache(u)
def test_clear_download_cache_variants(temp_cache, valid_urls):
# deletion by contents filename
u, c = next(valid_urls)
f = download_file(u, cache=True)
clear_download_cache(f)
assert not is_url_in_cache(u)
# deletion by url filename
u, c = next(valid_urls)
f = download_file(u, cache=True)
clear_download_cache(os.path.join(os.path.dirname(f), "url"))
assert not is_url_in_cache(u)
# deletion by hash directory name
u, c = next(valid_urls)
f = download_file(u, cache=True)
clear_download_cache(os.path.dirname(f))
assert not is_url_in_cache(u)
# deletion by directory name with trailing slash
u, c = next(valid_urls)
f = download_file(u, cache=True)
clear_download_cache(os.path.dirname(f) + "/")
assert not is_url_in_cache(u)
# deletion by hash of file contents
u, c = next(valid_urls)
f = download_file(u, cache=True)
h = compute_hash(f)
clear_download_cache(h)
assert not is_url_in_cache(u)
def test_clear_download_cache_invalid_cross_device_link(
temp_cache, valid_urls, monkeypatch
):
def no_rename(path, mode=None):
raise OSError(errno.EXDEV, "os.rename monkeypatched out")
u, c = next(valid_urls)
download_file(u, cache=True)
monkeypatch.setattr(os, "rename", no_rename)
assert is_url_in_cache(u)
with pytest.warns(AstropyWarning, match="os.rename monkeypatched out"):
clear_download_cache(u)
assert not is_url_in_cache(u)
def test_clear_download_cache_raises_os_error(temp_cache, valid_urls, monkeypatch):
def no_rename(path, mode=None):
raise OSError(errno.EBUSY, "os.rename monkeypatched out")
u, c = next(valid_urls)
download_file(u, cache=True)
monkeypatch.setattr(os, "rename", no_rename)
assert is_url_in_cache(u)
with pytest.warns(CacheMissingWarning, match="os.rename monkeypatched out"):
clear_download_cache(u)
@pytest.mark.skipif(
CI and os.environ.get("IS_CRON", "false") == "false",
reason="Flaky/too much external traffic for regular CI",
)
@pytest.mark.remote_data
def test_ftp_tls_auto(temp_cache):
"""Test that download automatically enables TLS/SSL when required"""
url = "ftp://anonymous:mail%[email protected]/pub/products/iers/finals2000A.daily"
download_file(url)
@pytest.mark.parametrize("base", ["http://example.com", "https://example.com"])
def test_url_trailing_slash(temp_cache, valid_urls, base):
slash = base + "/"
no_slash = base
u, c = next(valid_urls)
download_file(slash, cache=True, sources=[u])
assert is_url_in_cache(no_slash)
download_file(no_slash, cache=True, sources=[])
clear_download_cache(no_slash)
assert not is_url_in_cache(no_slash)
assert not is_url_in_cache(slash)
download_file(no_slash, cache=True, sources=[u])
# see if implicit check_download_cache squawks
def test_empty_url(temp_cache, valid_urls):
u, c = next(valid_urls)
download_file("file://", cache=True, sources=[u])
assert not is_url_in_cache("file:///")
@pytest.mark.remote_data
def test_download_ftp_file_properly_handles_socket_error():
faulty_url = "ftp://anonymous:mail%40astropy.org@nonexisting/pub/products/iers/finals2000A.all"
with pytest.raises(urllib.error.URLError) as excinfo:
download_file(faulty_url)
errmsg = excinfo.exconly()
found_msg = False
possible_msgs = [
"Name or service not known",
"nodename nor servname provided, or not known",
"getaddrinfo failed",
"Temporary failure in name resolution",
"No address associated with hostname",
]
for cur_msg in possible_msgs:
if cur_msg in errmsg:
found_msg = True
break
assert found_msg, f'Got {errmsg}, expected one of these: {",".join(possible_msgs)}'
@pytest.mark.parametrize(
("s", "ans"),
[
("http://googlecom", True),
("https://google.com", True),
("ftp://google.com", True),
("sftp://google.com", True),
("ssh://google.com", True),
("file:///c:/path/to/the%20file.txt", True),
("google.com", False),
("C:\\\\path\\\\file.docx", False),
("data://file", False),
],
)
def test_string_is_url_check(s, ans):
assert is_url(s) is ans
|
dc4f420dbeeb3e356c7f1eece2c37a9e3e4996ddc7909743b377c321b15567b3 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Built-in mask mixin class.
The design uses `Masked` as a factory class which automatically
generates new subclasses for any data class that is itself a
subclass of a predefined masked class, with `MaskedNDArray`
providing such a predefined class for `~numpy.ndarray`.
Generally, any new predefined class should override the
``from_unmasked(data, mask, copy=False)`` class method that
creates an instance from unmasked data and a mask, as well as
the ``unmasked`` property that returns just the data.
The `Masked` class itself provides a base ``mask`` property,
which can also be overridden if needed.
"""
import builtins
import numpy as np
from astropy.utils.compat import NUMPY_LT_1_22
from astropy.utils.data_info import ParentDtypeInfo
from astropy.utils.shapes import NDArrayShapeMethods
from .function_helpers import (
APPLY_TO_BOTH_FUNCTIONS,
DISPATCHED_FUNCTIONS,
MASKED_SAFE_FUNCTIONS,
UNSUPPORTED_FUNCTIONS,
)
__all__ = ["Masked", "MaskedNDArray"]
get__doc__ = """Masked version of {0.__name__}.
Except for the ability to pass in a ``mask``, parameters are
as for `{0.__module__}.{0.__name__}`.
""".format
class Masked(NDArrayShapeMethods):
"""A scalar value or array of values with associated mask.
The resulting instance will take its exact type from whatever the
contents are, with the type generated on the fly as needed.
Parameters
----------
data : array-like
The data for which a mask is to be added. The result will be a
a subclass of the type of ``data``.
mask : array-like of bool, optional
The initial mask to assign. If not given, taken from the data.
copy : bool
Whether the data and mask should be copied. Default: `False`.
"""
_base_classes = {}
"""Explicitly defined masked classes keyed by their unmasked counterparts.
For subclasses of these unmasked classes, masked counterparts can be generated.
"""
_masked_classes = {}
"""Masked classes keyed by their unmasked data counterparts."""
def __new__(cls, *args, **kwargs):
if cls is Masked:
# Initializing with Masked itself means we're in "factory mode".
if not kwargs and len(args) == 1 and isinstance(args[0], type):
# Create a new masked class.
return cls._get_masked_cls(args[0])
else:
return cls._get_masked_instance(*args, **kwargs)
else:
# Otherwise we're a subclass and should just pass information on.
return super().__new__(cls, *args, **kwargs)
def __init_subclass__(cls, base_cls=None, data_cls=None, **kwargs):
"""Register a Masked subclass.
Parameters
----------
base_cls : type, optional
If given, it is taken to mean that ``cls`` can be used as
a base for masked versions of all subclasses of ``base_cls``,
so it is registered as such in ``_base_classes``.
data_cls : type, optional
If given, ``cls`` should will be registered as the masked version of
``data_cls``. Will set the private ``cls._data_cls`` attribute,
and auto-generate a docstring if not present already.
**kwargs
Passed on for possible further initialization by superclasses.
"""
if base_cls is not None:
Masked._base_classes[base_cls] = cls
if data_cls is not None:
cls._data_cls = data_cls
cls._masked_classes[data_cls] = cls
if cls.__doc__ is None:
cls.__doc__ = get__doc__(data_cls)
super().__init_subclass__(**kwargs)
# This base implementation just uses the class initializer.
# Subclasses can override this in case the class does not work
# with this signature, or to provide a faster implementation.
@classmethod
def from_unmasked(cls, data, mask=None, copy=False):
"""Create an instance from unmasked data and a mask."""
return cls(data, mask=mask, copy=copy)
@classmethod
def _get_masked_instance(cls, data, mask=None, copy=False):
data, data_mask = cls._get_data_and_mask(data)
if mask is None:
mask = False if data_mask is None else data_mask
masked_cls = cls._get_masked_cls(data.__class__)
return masked_cls.from_unmasked(data, mask, copy)
@classmethod
def _get_masked_cls(cls, data_cls):
"""Get the masked wrapper for a given data class.
If the data class does not exist yet but is a subclass of any of the
registered base data classes, it is automatically generated
(except we skip `~numpy.ma.MaskedArray` subclasses, since then the
masking mechanisms would interfere).
"""
if issubclass(data_cls, (Masked, np.ma.MaskedArray)):
return data_cls
masked_cls = cls._masked_classes.get(data_cls)
if masked_cls is None:
# Walk through MRO and find closest base data class.
# Note: right now, will basically always be ndarray, but
# one could imagine needing some special care for one subclass,
# which would then get its own entry. E.g., if MaskedAngle
# defined something special, then MaskedLongitude should depend
# on it.
for mro_item in data_cls.__mro__:
base_cls = cls._base_classes.get(mro_item)
if base_cls is not None:
break
else:
# Just hope that MaskedNDArray can handle it.
# TODO: this covers the case where a user puts in a list or so,
# but for those one could just explicitly do something like
# _masked_classes[list] = MaskedNDArray.
return MaskedNDArray
# Create (and therefore register) new Masked subclass for the
# given data_cls.
masked_cls = type(
"Masked" + data_cls.__name__,
(data_cls, base_cls),
{},
data_cls=data_cls,
)
return masked_cls
@classmethod
def _get_data_and_mask(cls, data, allow_ma_masked=False):
"""Split data into unmasked and mask, if present.
Parameters
----------
data : array-like
Possibly masked item, judged by whether it has a ``mask`` attribute.
If so, checks for being an instance of `~astropy.utils.masked.Masked`
or `~numpy.ma.MaskedArray`, and gets unmasked data appropriately.
allow_ma_masked : bool, optional
Whether or not to process `~numpy.ma.masked`, i.e., an item that
implies no data but the presence of a mask.
Returns
-------
unmasked, mask : array-like
Unmasked will be `None` for `~numpy.ma.masked`.
Raises
------
ValueError
If `~numpy.ma.masked` is passed in and ``allow_ma_masked`` is not set.
"""
mask = getattr(data, "mask", None)
if mask is not None:
try:
data = data.unmasked
except AttributeError:
if not isinstance(data, np.ma.MaskedArray):
raise
if data is np.ma.masked:
if allow_ma_masked:
data = None
else:
raise ValueError("cannot handle np.ma.masked here.") from None
else:
data = data.data
return data, mask
@classmethod
def _get_data_and_masks(cls, *args):
data_masks = [cls._get_data_and_mask(arg) for arg in args]
return (
tuple(data for data, _ in data_masks),
tuple(mask for _, mask in data_masks),
)
def _get_mask(self):
"""The mask.
If set, replace the original mask, with whatever it is set with,
using a view if no broadcasting or type conversion is required.
"""
return self._mask
def _set_mask(self, mask, copy=False):
self_dtype = getattr(self, "dtype", None)
mask_dtype = (
np.ma.make_mask_descr(self_dtype)
if self_dtype and self_dtype.names
else np.dtype("?")
)
ma = np.asanyarray(mask, dtype=mask_dtype)
if ma.shape != self.shape:
# This will fail (correctly) if not broadcastable.
self._mask = np.empty(self.shape, dtype=mask_dtype)
self._mask[...] = ma
elif ma is mask:
# Even if not copying use a view so that shape setting
# does not propagate.
self._mask = mask.copy() if copy else mask.view()
else:
self._mask = ma
mask = property(_get_mask, _set_mask)
# Note: subclass should generally override the unmasked property.
# This one assumes the unmasked data is stored in a private attribute.
@property
def unmasked(self):
"""The unmasked values.
See Also
--------
astropy.utils.masked.Masked.filled
"""
return self._unmasked
def filled(self, fill_value):
"""Get a copy of the underlying data, with masked values filled in.
Parameters
----------
fill_value : object
Value to replace masked values with.
See Also
--------
astropy.utils.masked.Masked.unmasked
"""
unmasked = self.unmasked.copy()
if self.mask.dtype.names:
np.ma.core._recursive_filled(unmasked, self.mask, fill_value)
else:
unmasked[self.mask] = fill_value
return unmasked
def _apply(self, method, *args, **kwargs):
# Required method for NDArrayShapeMethods, to help provide __getitem__
# and shape-changing methods.
if callable(method):
data = method(self.unmasked, *args, **kwargs)
mask = method(self.mask, *args, **kwargs)
else:
data = getattr(self.unmasked, method)(*args, **kwargs)
mask = getattr(self.mask, method)(*args, **kwargs)
result = self.from_unmasked(data, mask, copy=False)
if "info" in self.__dict__:
result.info = self.info
return result
def __setitem__(self, item, value):
value, mask = self._get_data_and_mask(value, allow_ma_masked=True)
if value is not None:
self.unmasked[item] = value
self.mask[item] = mask
class MaskedInfoBase:
mask_val = np.ma.masked
def __init__(self, bound=False):
super().__init__(bound)
# If bound to a data object instance then create the dict of attributes
# which stores the info attribute values.
if bound:
# Specify how to serialize this object depending on context.
self.serialize_method = {
"fits": "null_value",
"ecsv": "null_value",
"hdf5": "data_mask",
"parquet": "data_mask",
None: "null_value",
}
class MaskedNDArrayInfo(MaskedInfoBase, ParentDtypeInfo):
"""
Container for meta information like name, description, format.
"""
# Add `serialize_method` attribute to the attrs that MaskedNDArrayInfo knows
# about. This allows customization of the way that MaskedColumn objects
# get written to file depending on format. The default is to use whatever
# the writer would normally do, which in the case of FITS or ECSV is to use
# a NULL value within the data itself. If serialize_method is 'data_mask'
# then the mask is explicitly written out as a separate column if there
# are any masked values. This is the same as for MaskedColumn.
attr_names = ParentDtypeInfo.attr_names | {"serialize_method"}
# When `serialize_method` is 'data_mask', and data and mask are being written
# as separate columns, use column names <name> and <name>.mask (instead
# of default encoding as <name>.data and <name>.mask).
_represent_as_dict_primary_data = "data"
def _represent_as_dict(self):
out = super()._represent_as_dict()
masked_array = self._parent
# If the serialize method for this context (e.g. 'fits' or 'ecsv') is
# 'data_mask', that means to serialize using an explicit mask column.
method = self.serialize_method[self._serialize_context]
if method == "data_mask":
out["data"] = masked_array.unmasked
if np.any(masked_array.mask):
# Only if there are actually masked elements do we add the ``mask`` column
out["mask"] = masked_array.mask
elif method == "null_value":
out["data"] = np.ma.MaskedArray(
masked_array.unmasked, mask=masked_array.mask
)
else:
raise ValueError(
'serialize method must be either "data_mask" or "null_value"'
)
return out
def _construct_from_dict(self, map):
# Override usual handling, since MaskedNDArray takes shape and buffer
# as input, which is less useful here.
# The map can contain either a MaskedColumn or a Column and a mask.
# Extract the mask for the former case.
map.setdefault("mask", getattr(map["data"], "mask", False))
return self._parent_cls.from_unmasked(**map)
class MaskedArraySubclassInfo(MaskedInfoBase):
"""Mixin class to create a subclasses such as MaskedQuantityInfo."""
# This is used below in __init_subclass__, which also inserts a
# 'serialize_method' attribute in attr_names.
def _represent_as_dict(self):
# Use the data_cls as the class name for serialization,
# so that we do not have to store all possible masked classes
# in astropy.table.serialize.__construct_mixin_classes.
out = super()._represent_as_dict()
data_cls = self._parent._data_cls
out.setdefault("__class__", data_cls.__module__ + "." + data_cls.__name__)
return out
def _comparison_method(op):
"""
Create a comparison operator for MaskedNDArray.
Needed since for string dtypes the base operators bypass __array_ufunc__
and hence return unmasked results.
"""
def _compare(self, other):
other_data, other_mask = self._get_data_and_mask(other)
result = getattr(self.unmasked, op)(other_data)
if result is NotImplemented:
return NotImplemented
mask = self.mask | (other_mask if other_mask is not None else False)
return self._masked_result(result, mask, None)
return _compare
class MaskedIterator:
"""
Flat iterator object to iterate over Masked Arrays.
A `~astropy.utils.masked.MaskedIterator` iterator is returned by ``m.flat``
for any masked array ``m``. It allows iterating over the array as if it
were a 1-D array, either in a for-loop or by calling its `next` method.
Iteration is done in C-contiguous style, with the last index varying the
fastest. The iterator can also be indexed using basic slicing or
advanced indexing.
Notes
-----
The design of `~astropy.utils.masked.MaskedIterator` follows that of
`~numpy.ma.core.MaskedIterator`. It is not exported by the
`~astropy.utils.masked` module. Instead of instantiating directly,
use the ``flat`` method in the masked array instance.
"""
def __init__(self, m):
self._masked = m
self._dataiter = m.unmasked.flat
self._maskiter = m.mask.flat
def __iter__(self):
return self
def __getitem__(self, indx):
out = self._dataiter.__getitem__(indx)
mask = self._maskiter.__getitem__(indx)
# For single elements, ndarray.flat.__getitem__ returns scalars; these
# need a new view as a Masked array.
if not isinstance(out, np.ndarray):
out = out[...]
mask = mask[...]
return self._masked.from_unmasked(out, mask, copy=False)
def __setitem__(self, index, value):
data, mask = self._masked._get_data_and_mask(value, allow_ma_masked=True)
if data is not None:
self._dataiter[index] = data
self._maskiter[index] = mask
def __next__(self):
"""
Return the next value, or raise StopIteration.
"""
out = next(self._dataiter)[...]
mask = next(self._maskiter)[...]
return self._masked.from_unmasked(out, mask, copy=False)
next = __next__
class MaskedNDArray(Masked, np.ndarray, base_cls=np.ndarray, data_cls=np.ndarray):
_mask = None
info = MaskedNDArrayInfo()
def __new__(cls, *args, mask=None, **kwargs):
"""Get data class instance from arguments and then set mask."""
self = super().__new__(cls, *args, **kwargs)
if mask is not None:
self.mask = mask
elif self._mask is None:
self.mask = False
return self
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(cls, **kwargs)
# For all subclasses we should set a default __new__ that passes on
# arguments other than mask to the data class, and then sets the mask.
if "__new__" not in cls.__dict__:
def __new__(newcls, *args, mask=None, **kwargs):
"""Get data class instance from arguments and then set mask."""
# Need to explicitly mention classes outside of class definition.
self = super(cls, newcls).__new__(newcls, *args, **kwargs)
if mask is not None:
self.mask = mask
elif self._mask is None:
self.mask = False
return self
cls.__new__ = __new__
if "info" not in cls.__dict__ and hasattr(cls._data_cls, "info"):
data_info = cls._data_cls.info
attr_names = data_info.attr_names | {"serialize_method"}
new_info = type(
cls.__name__ + "Info",
(MaskedArraySubclassInfo, data_info.__class__),
dict(attr_names=attr_names),
)
cls.info = new_info()
# The two pieces typically overridden.
@classmethod
def from_unmasked(cls, data, mask=None, copy=False):
# Note: have to override since __new__ would use ndarray.__new__
# which expects the shape as its first argument, not an array.
data = np.array(data, subok=True, copy=copy)
self = data.view(cls)
self._set_mask(mask, copy=copy)
return self
@property
def unmasked(self):
return super().view(self._data_cls)
@classmethod
def _get_masked_cls(cls, data_cls):
# Short-cuts
if data_cls is np.ndarray:
return MaskedNDArray
elif data_cls is None: # for .view()
return cls
return super()._get_masked_cls(data_cls)
@property
def flat(self):
"""A 1-D iterator over the Masked array.
This returns a ``MaskedIterator`` instance, which behaves the same
as the `~numpy.flatiter` instance returned by `~numpy.ndarray.flat`,
and is similar to Python's built-in iterator, except that it also
allows assignment.
"""
return MaskedIterator(self)
@property
def _baseclass(self):
"""Work-around for MaskedArray initialization.
Allows the base class to be inferred correctly when a masked instance
is used to initialize (or viewed as) a `~numpy.ma.MaskedArray`.
"""
return self._data_cls
def view(self, dtype=None, type=None):
"""New view of the masked array.
Like `numpy.ndarray.view`, but always returning a masked array subclass.
"""
if type is None and (
isinstance(dtype, builtins.type) and issubclass(dtype, np.ndarray)
):
return super().view(self._get_masked_cls(dtype))
if dtype is None:
return super().view(self._get_masked_cls(type))
dtype = np.dtype(dtype)
if not (
dtype.itemsize == self.dtype.itemsize
and (dtype.names is None or len(dtype.names) == len(self.dtype.names))
):
raise NotImplementedError(
f"{self.__class__} cannot be viewed with a dtype with a "
"with a different number of fields or size."
)
return super().view(dtype, self._get_masked_cls(type))
def __array_finalize__(self, obj):
# If we're a new object or viewing an ndarray, nothing has to be done.
if obj is None or obj.__class__ is np.ndarray:
return
# Logically, this should come from ndarray and hence be None, but
# just in case someone creates a new mixin, we check.
super_array_finalize = super().__array_finalize__
if super_array_finalize: # pragma: no cover
super_array_finalize(obj)
if self._mask is None:
# Got here after, e.g., a view of another masked class.
# Get its mask, or initialize ours.
self._set_mask(getattr(obj, "_mask", False))
if "info" in obj.__dict__:
self.info = obj.info
@property
def shape(self):
"""The shape of the data and the mask.
Usually used to get the current shape of an array, but may also be
used to reshape the array in-place by assigning a tuple of array
dimensions to it. As with `numpy.reshape`, one of the new shape
dimensions can be -1, in which case its value is inferred from the
size of the array and the remaining dimensions.
Raises
------
AttributeError
If a copy is required, of either the data or the mask.
"""
# Redefinition to allow defining a setter and add a docstring.
return super().shape
@shape.setter
def shape(self, shape):
old_shape = self.shape
self._mask.shape = shape
# Reshape array proper in try/except just in case some broadcasting
# or so causes it to fail.
try:
super(MaskedNDArray, type(self)).shape.__set__(self, shape)
except Exception as exc:
self._mask.shape = old_shape
# Given that the mask reshaping succeeded, the only logical
# reason for an exception is something like a broadcast error in
# in __array_finalize__, or a different memory ordering between
# mask and data. For those, give a more useful error message;
# otherwise just raise the error.
if "could not broadcast" in exc.args[0]:
raise AttributeError(
"Incompatible shape for in-place modification. "
"Use `.reshape()` to make a copy with the desired "
"shape."
) from None
else: # pragma: no cover
raise
_eq_simple = _comparison_method("__eq__")
_ne_simple = _comparison_method("__ne__")
__lt__ = _comparison_method("__lt__")
__le__ = _comparison_method("__le__")
__gt__ = _comparison_method("__gt__")
__ge__ = _comparison_method("__ge__")
def __eq__(self, other):
if not self.dtype.names:
return self._eq_simple(other)
# For structured arrays, we treat this as a reduction over the fields,
# where masked fields are skipped and thus do not influence the result.
other = np.asanyarray(other, dtype=self.dtype)
result = np.stack(
[self[field] == other[field] for field in self.dtype.names], axis=-1
)
return result.all(axis=-1)
def __ne__(self, other):
if not self.dtype.names:
return self._ne_simple(other)
# For structured arrays, we treat this as a reduction over the fields,
# where masked fields are skipped and thus do not influence the result.
other = np.asanyarray(other, dtype=self.dtype)
result = np.stack(
[self[field] != other[field] for field in self.dtype.names], axis=-1
)
return result.any(axis=-1)
def _combine_masks(self, masks, out=None, where=True, copy=True):
"""Combine masks, possibly storing it in some output.
Parameters
----------
masks : tuple of array of bool or None
Input masks. Any that are `None` or `False` are ignored.
Should broadcast to each other.
out : output mask array, optional
Possible output array to hold the result.
where : array of bool, optional
Which elements of the output array to fill.
copy : bool optional
Whether to ensure a copy is made. Only relevant if a single
input mask is not `None`, and ``out`` is not given.
"""
masks = [m for m in masks if m is not None and m is not False]
if not masks:
return False
if len(masks) == 1:
if out is None:
return masks[0].copy() if copy else masks[0]
else:
np.copyto(out, masks[0], where=where)
return out
out = np.logical_or(masks[0], masks[1], out=out, where=where)
for mask in masks[2:]:
np.logical_or(out, mask, out=out, where=where)
return out
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
out = kwargs.pop("out", None)
out_unmasked = None
out_mask = None
if out is not None:
out_unmasked, out_masks = self._get_data_and_masks(*out)
for d, m in zip(out_unmasked, out_masks):
if m is None:
# TODO: allow writing to unmasked output if nothing is masked?
if d is not None:
raise TypeError("cannot write to unmasked output")
elif out_mask is None:
out_mask = m
# TODO: where is only needed for __call__ and reduce;
# this is very fast, but still worth separating out?
where = kwargs.pop("where", True)
if where is True:
where_unmasked = True
where_mask = None
else:
where_unmasked, where_mask = self._get_data_and_mask(where)
unmasked, masks = self._get_data_and_masks(*inputs)
if ufunc.signature:
# We're dealing with a gufunc. For now, only deal with
# np.matmul and gufuncs for which the mask of any output always
# depends on all core dimension values of all inputs.
# Also ignore axes keyword for now...
# TODO: in principle, it should be possible to generate the mask
# purely based on the signature.
if "axes" in kwargs:
raise NotImplementedError(
"Masked does not yet support gufunc calls with 'axes'."
)
if ufunc is np.matmul:
# np.matmul is tricky and its signature cannot be parsed by
# _parse_gufunc_signature.
unmasked = np.atleast_1d(*unmasked)
mask0, mask1 = masks
masks = []
is_mat1 = unmasked[1].ndim >= 2
if mask0 is not None:
masks.append(np.logical_or.reduce(mask0, axis=-1, keepdims=is_mat1))
if mask1 is not None:
masks.append(
np.logical_or.reduce(mask1, axis=-2, keepdims=True)
if is_mat1
else np.logical_or.reduce(mask1)
)
mask = self._combine_masks(masks, out=out_mask, copy=False)
else:
# Parse signature with private numpy function. Note it
# cannot handle spaces in tuples, so remove those.
in_sig, out_sig = np.lib.function_base._parse_gufunc_signature(
ufunc.signature.replace(" ", "")
)
axis = kwargs.get("axis", -1)
keepdims = kwargs.get("keepdims", False)
in_masks = []
for sig, mask in zip(in_sig, masks):
if mask is not None:
if sig:
# Input has core dimensions. Assume that if any
# value in those is masked, the output will be
# masked too (TODO: for multiple core dimensions
# this may be too strong).
mask = np.logical_or.reduce(
mask, axis=axis, keepdims=keepdims
)
in_masks.append(mask)
mask = self._combine_masks(in_masks)
result_masks = []
for os in out_sig:
if os:
# Output has core dimensions. Assume all those
# get the same mask.
result_mask = np.expand_dims(mask, axis)
else:
result_mask = mask
result_masks.append(result_mask)
mask = result_masks if len(result_masks) > 1 else result_masks[0]
elif method == "__call__":
# Regular ufunc call.
# Combine the masks from the input, possibly selecting elements.
mask = self._combine_masks(masks, out=out_mask, where=where_unmasked)
# If relevant, also mask output elements for which where was masked.
if where_mask is not None:
mask |= where_mask
elif method == "outer":
# Must have two arguments; adjust masks as will be done for data.
m0, m1 = masks
if m0 is not None and m0.ndim > 0:
m0 = m0[(...,) + (np.newaxis,) * np.ndim(unmasked[1])]
mask = self._combine_masks((m0, m1), out=out_mask)
elif method in {"reduce", "accumulate"}:
# Reductions like np.add.reduce (sum).
# Treat any masked where as if the input element was masked.
mask = self._combine_masks((masks[0], where_mask), copy=False)
if mask is not False:
# By default, we simply propagate masks, since for
# things like np.sum, it makes no sense to do otherwise.
# Individual methods need to override as needed.
if method == "reduce":
axis = kwargs.get("axis", None)
keepdims = kwargs.get("keepdims", False)
mask = np.logical_or.reduce(
mask,
where=where_unmasked,
axis=axis,
keepdims=keepdims,
out=out_mask,
)
if where_unmasked is not True:
# Mask also whole rows in which no elements were selected;
# those will have been left as unmasked above.
mask |= ~np.logical_or.reduce(
where_unmasked, axis=axis, keepdims=keepdims
)
else:
# Accumulate
axis = kwargs.get("axis", 0)
mask = np.logical_or.accumulate(mask, axis=axis, out=out_mask)
elif out is None:
# Can only get here if neither input nor output was masked, but
# perhaps where was masked (possible in "not NUMPY_LT_1_25" and
# in NUMPY_LT_1_21 (latter also allowed axis).
# We don't support this.
return NotImplemented
elif method in {"reduceat", "at"}: # pragma: no cover
raise NotImplementedError(
"masked instances cannot yet deal with 'reduceat' or 'at'."
)
if out_unmasked is not None:
kwargs["out"] = out_unmasked
if where_unmasked is not True:
kwargs["where"] = where_unmasked
result = getattr(ufunc, method)(*unmasked, **kwargs)
if result is None: # pragma: no cover
# This happens for the "at" method.
return result
if out is not None and len(out) == 1:
out = out[0]
return self._masked_result(result, mask, out)
def __array_function__(self, function, types, args, kwargs):
# TODO: go through functions systematically to see which ones
# work and/or can be supported.
if function in MASKED_SAFE_FUNCTIONS:
return super().__array_function__(function, types, args, kwargs)
elif function in APPLY_TO_BOTH_FUNCTIONS:
helper = APPLY_TO_BOTH_FUNCTIONS[function]
try:
helper_result = helper(*args, **kwargs)
except NotImplementedError:
return self._not_implemented_or_raise(function, types)
data_args, mask_args, kwargs, out = helper_result
if out is not None:
if not isinstance(out, Masked):
return self._not_implemented_or_raise(function, types)
function(*mask_args, out=out.mask, **kwargs)
function(*data_args, out=out.unmasked, **kwargs)
return out
mask = function(*mask_args, **kwargs)
result = function(*data_args, **kwargs)
elif function in DISPATCHED_FUNCTIONS:
dispatched_function = DISPATCHED_FUNCTIONS[function]
try:
dispatched_result = dispatched_function(*args, **kwargs)
except NotImplementedError:
return self._not_implemented_or_raise(function, types)
if not isinstance(dispatched_result, tuple):
return dispatched_result
result, mask, out = dispatched_result
elif function in UNSUPPORTED_FUNCTIONS:
return NotImplemented
else: # pragma: no cover
# By default, just pass it through for now.
return super().__array_function__(function, types, args, kwargs)
if mask is None:
return result
else:
return self._masked_result(result, mask, out)
def _not_implemented_or_raise(self, function, types):
# Our function helper or dispatcher found that the function does not
# work with Masked. In principle, there may be another class that
# knows what to do with us, for which we should return NotImplemented.
# But if there is ndarray (or a non-Masked subclass of it) around,
# it quite likely coerces, so we should just break.
if any(issubclass(t, np.ndarray) and not issubclass(t, Masked) for t in types):
raise TypeError(
"the MaskedNDArray implementation cannot handle {} "
"with the given arguments.".format(function)
) from None
else:
return NotImplemented
def _masked_result(self, result, mask, out):
if isinstance(result, tuple):
if out is None:
out = (None,) * len(result)
if not isinstance(mask, (list, tuple)):
mask = (mask,) * len(result)
return tuple(
self._masked_result(result_, mask_, out_)
for (result_, mask_, out_) in zip(result, mask, out)
)
if out is None:
# Note that we cannot count on result being the same class as
# 'self' (e.g., comparison of quantity results in an ndarray, most
# operations on Longitude and Latitude result in Angle or
# Quantity), so use Masked to determine the appropriate class.
return Masked(result, mask)
# TODO: remove this sanity check once test cases are more complete.
assert isinstance(out, Masked)
# If we have an output, the result was written in-place, so we should
# also write the mask in-place (if not done already in the code).
if out._mask is not mask:
out._mask[...] = mask
return out
# Below are ndarray methods that need to be overridden as masked elements
# need to be skipped and/or an initial value needs to be set.
def _reduce_defaults(self, kwargs, initial_func=None):
"""Get default where and initial for masked reductions.
Generally, the default should be to skip all masked elements. For
reductions such as np.minimum.reduce, we also need an initial value,
which can be determined using ``initial_func``.
"""
if "where" not in kwargs:
kwargs["where"] = ~self.mask
if initial_func is not None and "initial" not in kwargs:
kwargs["initial"] = initial_func(self.unmasked)
return kwargs
def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None):
# Unfortunately, cannot override the call to diagonal inside trace, so
# duplicate implementation in numpy/core/src/multiarray/calculation.c.
diagonal = self.diagonal(offset=offset, axis1=axis1, axis2=axis2)
return diagonal.sum(-1, dtype=dtype, out=out)
def min(self, axis=None, out=None, **kwargs):
return super().min(
axis=axis, out=out, **self._reduce_defaults(kwargs, np.nanmax)
)
def max(self, axis=None, out=None, **kwargs):
return super().max(
axis=axis, out=out, **self._reduce_defaults(kwargs, np.nanmin)
)
def nonzero(self):
unmasked_nonzero = self.unmasked.nonzero()
if self.ndim >= 1:
not_masked = ~self.mask[unmasked_nonzero]
return tuple(u[not_masked] for u in unmasked_nonzero)
else:
return unmasked_nonzero if not self.mask else np.nonzero(0)
def compress(self, condition, axis=None, out=None):
if out is not None:
raise NotImplementedError("cannot yet give output")
return self._apply("compress", condition, axis=axis)
def repeat(self, repeats, axis=None):
return self._apply("repeat", repeats, axis=axis)
def choose(self, choices, out=None, mode="raise"):
# Let __array_function__ take care since choices can be masked too.
return np.choose(self, choices, out=out, mode=mode)
if NUMPY_LT_1_22:
def argmin(self, axis=None, out=None):
# Todo: should this return a masked integer array, with masks
# if all elements were masked?
at_min = self == self.min(axis=axis, keepdims=True)
return at_min.filled(False).argmax(axis=axis, out=out)
def argmax(self, axis=None, out=None):
at_max = self == self.max(axis=axis, keepdims=True)
return at_max.filled(False).argmax(axis=axis, out=out)
else:
def argmin(self, axis=None, out=None, *, keepdims=False):
# Todo: should this return a masked integer array, with masks
# if all elements were masked?
at_min = self == self.min(axis=axis, keepdims=True)
return at_min.filled(False).argmax(axis=axis, out=out, keepdims=keepdims)
def argmax(self, axis=None, out=None, *, keepdims=False):
at_max = self == self.max(axis=axis, keepdims=True)
return at_max.filled(False).argmax(axis=axis, out=out, keepdims=keepdims)
def argsort(self, axis=-1, kind=None, order=None):
"""Returns the indices that would sort an array.
Perform an indirect sort along the given axis on both the array
and the mask, with masked items being sorted to the end.
Parameters
----------
axis : int or None, optional
Axis along which to sort. The default is -1 (the last axis).
If None, the flattened array is used.
kind : str or None, ignored.
The kind of sort. Present only to allow subclasses to work.
order : str or list of str.
For an array with fields defined, the fields to compare first,
second, etc. A single field can be specified as a string, and not
all fields need be specified, but unspecified fields will still be
used, in dtype order, to break ties.
Returns
-------
index_array : ndarray, int
Array of indices that sorts along the specified ``axis``. Use
``np.take_along_axis(self, index_array, axis=axis)`` to obtain
the sorted array.
"""
if axis is None:
data = self.ravel()
axis = -1
else:
data = self
if self.dtype.names:
# As done inside the argsort implementation in multiarray/methods.c.
if order is None:
order = self.dtype.names
else:
order = np.core._internal._newnames(self.dtype, order)
keys = tuple(data[name] for name in order[::-1])
elif order is not None:
raise ValueError("Cannot specify order when the array has no fields.")
else:
keys = (data,)
return np.lexsort(keys, axis=axis)
def sort(self, axis=-1, kind=None, order=None):
"""Sort an array in-place. Refer to `numpy.sort` for full documentation."""
# TODO: probably possible to do this faster than going through argsort!
indices = self.argsort(axis, kind=kind, order=order)
self[:] = np.take_along_axis(self, indices, axis=axis)
def argpartition(self, kth, axis=-1, kind="introselect", order=None):
# TODO: should be possible to do this faster than with a full argsort!
return self.argsort(axis=axis, order=order)
def partition(self, kth, axis=-1, kind="introselect", order=None):
# TODO: should be possible to do this faster than with a full argsort!
return self.sort(axis=axis, order=None)
def cumsum(self, axis=None, dtype=None, out=None):
if axis is None:
self = self.ravel()
axis = 0
return np.add.accumulate(self, axis=axis, dtype=dtype, out=out)
def cumprod(self, axis=None, dtype=None, out=None):
if axis is None:
self = self.ravel()
axis = 0
return np.multiply.accumulate(self, axis=axis, dtype=dtype, out=out)
def clip(self, min=None, max=None, out=None, **kwargs):
"""Return an array whose values are limited to ``[min, max]``.
Like `~numpy.clip`, but any masked values in ``min`` and ``max``
are ignored for clipping. The mask of the input array is propagated.
"""
# TODO: implement this at the ufunc level.
dmin, mmin = self._get_data_and_mask(min)
dmax, mmax = self._get_data_and_mask(max)
if mmin is None and mmax is None:
# Fast path for unmasked max, min.
return super().clip(min, max, out=out, **kwargs)
masked_out = np.positive(self, out=out)
out = masked_out.unmasked
if dmin is not None:
np.maximum(out, dmin, out=out, where=True if mmin is None else ~mmin)
if dmax is not None:
np.minimum(out, dmax, out=out, where=True if mmax is None else ~mmax)
return masked_out
def mean(self, axis=None, dtype=None, out=None, keepdims=False, *, where=True):
# Implementation based on that in numpy/core/_methods.py
# Cast bool, unsigned int, and int to float64 by default,
# and do float16 at higher precision.
is_float16_result = False
if dtype is None:
if issubclass(self.dtype.type, (np.integer, np.bool_)):
dtype = np.dtype("f8")
elif issubclass(self.dtype.type, np.float16):
dtype = np.dtype("f4")
is_float16_result = out is None
where = ~self.mask & where
result = self.sum(
axis=axis, dtype=dtype, out=out, keepdims=keepdims, where=where
)
n = np.add.reduce(where, axis=axis, keepdims=keepdims)
# catch the case when an axis is fully masked to prevent div by zero:
n = np.add.reduce(where, axis=axis, keepdims=keepdims)
neq0 = n == 0
n += neq0
result /= n
# correct fully-masked slice results to what is expected for 0/0 division
result.unmasked[neq0] = np.nan
if is_float16_result:
result = result.astype(self.dtype)
return result
def var(
self, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True
):
where_final = ~self.mask & where
# Simplified implementation based on that in numpy/core/_methods.py
n = np.add.reduce(where_final, axis=axis, keepdims=keepdims)[...]
# Cast bool, unsigned int, and int to float64 by default.
if dtype is None and issubclass(self.dtype.type, (np.integer, np.bool_)):
dtype = np.dtype("f8")
mean = self.mean(axis=axis, dtype=dtype, keepdims=True, where=where)
x = self - mean
x *= x.conjugate() # Conjugate just returns x if not complex.
result = x.sum(
axis=axis, dtype=dtype, out=out, keepdims=keepdims, where=where_final
)
n -= ddof
n = np.maximum(n, 0, out=n)
result /= n
result._mask |= n == 0
return result
def std(
self, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True
):
result = self.var(
axis=axis, dtype=dtype, out=out, ddof=ddof, keepdims=keepdims, where=where
)
return np.sqrt(result, out=result)
def __bool__(self):
# First get result from array itself; this will error if not a scalar.
result = super().__bool__()
return result and not self.mask
def any(self, axis=None, out=None, keepdims=False, *, where=True):
return np.logical_or.reduce(
self, axis=axis, out=out, keepdims=keepdims, where=~self.mask & where
)
def all(self, axis=None, out=None, keepdims=False, *, where=True):
return np.logical_and.reduce(
self, axis=axis, out=out, keepdims=keepdims, where=~self.mask & where
)
# Following overrides needed since somehow the ndarray implementation
# does not actually call these.
def __str__(self):
return np.array_str(self)
def __repr__(self):
return np.array_repr(self)
def __format__(self, format_spec):
string = super().__format__(format_spec)
if self.shape == () and self.mask:
n = min(3, max(1, len(string)))
return " " * (len(string) - n) + "\u2014" * n
else:
return string
class MaskedRecarray(np.recarray, MaskedNDArray, data_cls=np.recarray):
# Explicit definition since we need to override some methods.
def __array_finalize__(self, obj):
# recarray.__array_finalize__ does not do super, so we do it
# explicitly.
super().__array_finalize__(obj)
super(np.recarray, self).__array_finalize__(obj)
# __getattribute__, __setattr__, and field use these somewhat
# obscrure ndarray methods. TODO: override in MaskedNDArray?
def getfield(self, dtype, offset=0):
for field, info in self.dtype.fields.items():
if offset == info[1] and dtype == info[0]:
return self[field]
raise NotImplementedError("can only get existing field from structured dtype.")
def setfield(self, val, dtype, offset=0):
for field, info in self.dtype.fields.items():
if offset == info[1] and dtype == info[0]:
self[field] = val
return
raise NotImplementedError("can only set existing field from structured dtype.")
|
845dafd84713eae8d2d1ebceea7d0cd6e5f7d2d35cfab85445fb65bf15f88d7f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Helpers for letting numpy functions interact with Masked arrays.
The module supplies helper routines for numpy functions that propagate
masks appropriately., for use in the ``__array_function__``
implementation of `~astropy.utils.masked.MaskedNDArray`. They are not
very useful on their own, but the ones with docstrings are included in
the documentation so that there is a place to find out how the mask is
interpreted.
"""
import numpy as np
from astropy.units.quantity_helper.function_helpers import FunctionAssigner
from astropy.utils.compat import NUMPY_LT_1_23, NUMPY_LT_1_24
# This module should not really be imported, but we define __all__
# such that sphinx can typeset the functions with docstrings.
# The latter are added to __all__ at the end.
__all__ = [
"MASKED_SAFE_FUNCTIONS",
"APPLY_TO_BOTH_FUNCTIONS",
"DISPATCHED_FUNCTIONS",
"UNSUPPORTED_FUNCTIONS",
]
MASKED_SAFE_FUNCTIONS = set()
"""Set of functions that work fine on Masked classes already.
Most of these internally use `numpy.ufunc` or other functions that
are already covered.
"""
APPLY_TO_BOTH_FUNCTIONS = {}
"""Dict of functions that should apply to both data and mask.
The `dict` is keyed by the numpy function and the values are functions
that take the input arguments of the numpy function and organize these
for passing the data and mask to the numpy function.
Returns
-------
data_args : tuple
Arguments to pass on to the numpy function for the unmasked data.
mask_args : tuple
Arguments to pass on to the numpy function for the masked data.
kwargs : dict
Keyword arguments to pass on for both unmasked data and mask.
out : `~astropy.utils.masked.Masked` instance or None
Optional instance in which to store the output.
Raises
------
NotImplementedError
When an arguments is masked when it should not be or vice versa.
"""
DISPATCHED_FUNCTIONS = {}
"""Dict of functions that provide the numpy function's functionality.
These are for more complicated versions where the numpy function itself
cannot easily be used. It should return either the result of the
function, or a tuple consisting of the unmasked result, the mask for the
result and a possible output instance.
It should raise `NotImplementedError` if one of the arguments is masked
when it should not be or vice versa.
"""
UNSUPPORTED_FUNCTIONS = set()
"""Set of numpy functions that are not supported for masked arrays.
For most, masked input simply makes no sense, but for others it may have
been lack of time. Issues or PRs for support for functions are welcome.
"""
# Almost all from np.core.fromnumeric defer to methods so are OK.
MASKED_SAFE_FUNCTIONS |= {
getattr(np, name)
for name in np.core.fromnumeric.__all__
if name not in {"choose", "put", "resize", "searchsorted", "where", "alen"}
}
MASKED_SAFE_FUNCTIONS |= {
# built-in from multiarray
np.may_share_memory, np.can_cast, np.min_scalar_type, np.result_type,
np.shares_memory,
# np.core.arrayprint
np.array_repr,
# np.core.function_base
np.linspace, np.logspace, np.geomspace,
# np.core.numeric
np.isclose, np.allclose, np.flatnonzero, np.argwhere,
# np.core.shape_base
np.atleast_1d, np.atleast_2d, np.atleast_3d, np.stack, np.hstack, np.vstack,
# np.lib.function_base
np.average, np.diff, np.extract, np.meshgrid, np.trapz, np.gradient,
# np.lib.index_tricks
np.diag_indices_from, np.triu_indices_from, np.tril_indices_from,
np.fill_diagonal,
# np.lib.shape_base
np.column_stack, np.row_stack, np.dstack,
np.array_split, np.split, np.hsplit, np.vsplit, np.dsplit,
np.expand_dims, np.apply_along_axis, np.kron, np.tile,
np.take_along_axis, np.put_along_axis,
# np.lib.type_check (all but asfarray, nan_to_num)
np.iscomplexobj, np.isrealobj, np.imag, np.isreal, np.real,
np.real_if_close, np.common_type,
# np.lib.ufunclike
np.fix, np.isneginf, np.isposinf,
# np.lib.function_base
np.angle, np.i0,
} # fmt: skip
IGNORED_FUNCTIONS = {
# I/O - useless for Masked, since no way to store the mask.
np.save, np.savez, np.savetxt, np.savez_compressed,
# Polynomials
np.poly, np.polyadd, np.polyder, np.polydiv, np.polyfit, np.polyint,
np.polymul, np.polysub, np.polyval, np.roots, np.vander,
} # fmt: skip
IGNORED_FUNCTIONS |= {
np.pad, np.searchsorted, np.digitize,
np.is_busday, np.busday_count, np.busday_offset,
# numpy.lib.function_base
np.cov, np.corrcoef, np.trim_zeros,
# numpy.core.numeric
np.correlate, np.convolve,
# numpy.lib.histograms
np.histogram, np.histogram2d, np.histogramdd, np.histogram_bin_edges,
# TODO!!
np.dot, np.vdot, np.inner, np.tensordot, np.cross,
np.einsum, np.einsum_path,
} # fmt: skip
# Really should do these...
IGNORED_FUNCTIONS |= {
getattr(np, setopsname) for setopsname in np.lib.arraysetops.__all__
}
if NUMPY_LT_1_23:
IGNORED_FUNCTIONS |= {
# Deprecated, removed in numpy 1.23
np.asscalar,
np.alen,
}
# Explicitly unsupported functions
UNSUPPORTED_FUNCTIONS |= {
np.unravel_index,
np.ravel_multi_index,
np.ix_,
}
# No support for the functions also not supported by Quantity
# (io, polynomial, etc.).
UNSUPPORTED_FUNCTIONS |= IGNORED_FUNCTIONS
apply_to_both = FunctionAssigner(APPLY_TO_BOTH_FUNCTIONS)
dispatched_function = FunctionAssigner(DISPATCHED_FUNCTIONS)
def _get_data_and_masks(*args):
"""Separate out arguments into tuples of data and masks.
An all-False mask is created if an argument does not have a mask.
"""
from .core import Masked
data, masks = Masked._get_data_and_masks(*args)
masks = tuple(
m if m is not None else np.zeros(np.shape(d), bool) for d, m in zip(data, masks)
)
return data, masks
# Following are simple ufunc-like functions which should just copy the mask.
@dispatched_function
def datetime_as_string(arr, *args, **kwargs):
return (np.datetime_as_string(arr.unmasked, *args, **kwargs), arr.mask.copy(), None)
@dispatched_function
def sinc(x):
return np.sinc(x.unmasked), x.mask.copy(), None
@dispatched_function
def iscomplex(x):
return np.iscomplex(x.unmasked), x.mask.copy(), None
@dispatched_function
def unwrap(p, *args, **kwargs):
return np.unwrap(p.unmasked, *args, **kwargs), p.mask.copy(), None
@dispatched_function
def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None):
data = np.nan_to_num(x.unmasked, copy=copy, nan=nan, posinf=posinf, neginf=neginf)
return (data, x.mask.copy(), None) if copy else x
# Following are simple functions related to shapes, where the same function
# should be applied to the data and the mask. They cannot all share the
# same helper, because the first arguments have different names.
@apply_to_both(
helps={np.copy, np.asfarray, np.resize, np.moveaxis, np.rollaxis, np.roll}
)
def masked_a_helper(a, *args, **kwargs):
data, mask = _get_data_and_masks(a)
return data + args, mask + args, kwargs, None
@apply_to_both(helps={np.flip, np.flipud, np.fliplr, np.rot90, np.triu, np.tril})
def masked_m_helper(m, *args, **kwargs):
data, mask = _get_data_and_masks(m)
return data + args, mask + args, kwargs, None
@apply_to_both(helps={np.diag, np.diagflat})
def masked_v_helper(v, *args, **kwargs):
data, mask = _get_data_and_masks(v)
return data + args, mask + args, kwargs, None
@apply_to_both(helps={np.delete})
def masked_arr_helper(array, *args, **kwargs):
data, mask = _get_data_and_masks(array)
return data + args, mask + args, kwargs, None
@apply_to_both
def broadcast_to(array, shape, subok=False):
"""Broadcast array to the given shape.
Like `numpy.broadcast_to`, and applied to both unmasked data and mask.
Note that ``subok`` is taken to mean whether or not subclasses of
the unmasked data and mask are allowed, i.e., for ``subok=False``,
a `~astropy.utils.masked.MaskedNDArray` will be returned.
"""
data, mask = _get_data_and_masks(array)
return data, mask, dict(shape=shape, subok=subok), None
@dispatched_function
def outer(a, b, out=None):
return np.multiply.outer(np.ravel(a), np.ravel(b), out=out)
@dispatched_function
def empty_like(prototype, dtype=None, order="K", subok=True, shape=None):
"""Return a new array with the same shape and type as a given array.
Like `numpy.empty_like`, but will add an empty mask.
"""
unmasked = np.empty_like(
prototype.unmasked, dtype=dtype, order=order, subok=subok, shape=shape
)
if dtype is not None:
dtype = (
np.ma.make_mask_descr(unmasked.dtype)
if unmasked.dtype.names
else np.dtype("?")
)
mask = np.empty_like(
prototype.mask, dtype=dtype, order=order, subok=subok, shape=shape
)
return unmasked, mask, None
@dispatched_function
def zeros_like(a, dtype=None, order="K", subok=True, shape=None):
"""Return an array of zeros with the same shape and type as a given array.
Like `numpy.zeros_like`, but will add an all-false mask.
"""
unmasked = np.zeros_like(
a.unmasked, dtype=dtype, order=order, subok=subok, shape=shape
)
return unmasked, False, None
@dispatched_function
def ones_like(a, dtype=None, order="K", subok=True, shape=None):
"""Return an array of ones with the same shape and type as a given array.
Like `numpy.ones_like`, but will add an all-false mask.
"""
unmasked = np.ones_like(
a.unmasked, dtype=dtype, order=order, subok=subok, shape=shape
)
return unmasked, False, None
@dispatched_function
def full_like(a, fill_value, dtype=None, order="K", subok=True, shape=None):
"""Return a full array with the same shape and type as a given array.
Like `numpy.full_like`, but with a mask that is also set.
If ``fill_value`` is `numpy.ma.masked`, the data will be left unset
(i.e., as created by `numpy.empty_like`).
"""
result = np.empty_like(a, dtype=dtype, order=order, subok=subok, shape=shape)
result[...] = fill_value
return result
@dispatched_function
def put(a, ind, v, mode="raise"):
"""Replaces specified elements of an array with given values.
Like `numpy.put`, but for masked array ``a`` and possibly masked
value ``v``. Masked indices ``ind`` are not supported.
"""
from astropy.utils.masked import Masked
if isinstance(ind, Masked) or not isinstance(a, Masked):
raise NotImplementedError
v_data, v_mask = a._get_data_and_mask(v)
if v_data is not None:
np.put(a.unmasked, ind, v_data, mode=mode)
# v_mask of None will be correctly interpreted as False.
np.put(a.mask, ind, v_mask, mode=mode)
@dispatched_function
def putmask(a, mask, values):
"""Changes elements of an array based on conditional and input values.
Like `numpy.putmask`, but for masked array ``a`` and possibly masked
``values``. Masked ``mask`` is not supported.
"""
from astropy.utils.masked import Masked
if isinstance(mask, Masked) or not isinstance(a, Masked):
raise NotImplementedError
values_data, values_mask = a._get_data_and_mask(values)
if values_data is not None:
np.putmask(a.unmasked, mask, values_data)
np.putmask(a.mask, mask, values_mask)
@dispatched_function
def place(arr, mask, vals):
"""Change elements of an array based on conditional and input values.
Like `numpy.place`, but for masked array ``a`` and possibly masked
``values``. Masked ``mask`` is not supported.
"""
from astropy.utils.masked import Masked
if isinstance(mask, Masked) or not isinstance(arr, Masked):
raise NotImplementedError
vals_data, vals_mask = arr._get_data_and_mask(vals)
if vals_data is not None:
np.place(arr.unmasked, mask, vals_data)
np.place(arr.mask, mask, vals_mask)
@dispatched_function
def copyto(dst, src, casting="same_kind", where=True):
"""Copies values from one array to another, broadcasting as necessary.
Like `numpy.copyto`, but for masked destination ``dst`` and possibly
masked source ``src``.
"""
from astropy.utils.masked import Masked
if not isinstance(dst, Masked) or isinstance(where, Masked):
raise NotImplementedError
src_data, src_mask = dst._get_data_and_mask(src)
if src_data is not None:
np.copyto(dst.unmasked, src_data, casting=casting, where=where)
if src_mask is not None:
np.copyto(dst.mask, src_mask, where=where)
@dispatched_function
def packbits(a, *args, **kwargs):
result = np.packbits(a.unmasked, *args, **kwargs)
mask = np.packbits(a.mask, *args, **kwargs).astype(bool)
return result, mask, None
@dispatched_function
def unpackbits(a, *args, **kwargs):
result = np.unpackbits(a.unmasked, *args, **kwargs)
mask = np.zeros(a.shape, dtype="u1")
mask[a.mask] = 255
mask = np.unpackbits(mask, *args, **kwargs).astype(bool)
return result, mask, None
@dispatched_function
def bincount(x, weights=None, minlength=0):
"""Count number of occurrences of each value in array of non-negative ints.
Like `numpy.bincount`, but masked entries in ``x`` will be skipped.
Any masked entries in ``weights`` will lead the corresponding bin to
be masked.
"""
from astropy.utils.masked import Masked
if weights is not None:
weights = np.asanyarray(weights)
if isinstance(x, Masked) and x.ndim <= 1:
# let other dimensions lead to errors.
if weights is not None and weights.ndim == x.ndim:
weights = weights[~x.mask]
x = x.unmasked[~x.mask]
mask = None
if weights is not None:
weights, w_mask = Masked._get_data_and_mask(weights)
if w_mask is not None:
mask = np.bincount(x, w_mask.astype(int), minlength=minlength).astype(bool)
result = np.bincount(x, weights, minlength=0)
return result, mask, None
@dispatched_function
def msort(a):
result = a.copy()
result.sort(axis=0)
return result
@dispatched_function
def sort_complex(a):
# Just a copy of function_base.sort_complex, to avoid the asarray.
b = a.copy()
b.sort()
if not issubclass(b.dtype.type, np.complexfloating): # pragma: no cover
if b.dtype.char in "bhBH":
return b.astype("F")
elif b.dtype.char == "g":
return b.astype("G")
else:
return b.astype("D")
else:
return b
@dispatched_function
def concatenate(arrays, axis=0, out=None, dtype=None, casting="same_kind"):
data, masks = _get_data_and_masks(*arrays)
if out is None:
return (
np.concatenate(data, axis=axis, dtype=dtype, casting=casting),
np.concatenate(masks, axis=axis),
None,
)
else:
from astropy.utils.masked import Masked
if not isinstance(out, Masked):
raise NotImplementedError
np.concatenate(masks, out=out.mask, axis=axis)
np.concatenate(data, out=out.unmasked, axis=axis, dtype=dtype, casting=casting)
return out
@apply_to_both
def append(arr, values, axis=None):
data, masks = _get_data_and_masks(arr, values)
return data, masks, dict(axis=axis), None
@dispatched_function
def block(arrays):
# We need to override block since the numpy implementation can take two
# different paths, one for concatenation, one for creating a large empty
# result array in which parts are set. Each assumes array input and
# cannot be used directly. Since it would be very costly to inspect all
# arrays and then turn them back into a nested list, we just copy here the
# second implementation, np.core.shape_base._block_slicing, since it is
# shortest and easiest.
from astropy.utils.masked import Masked
arrays, list_ndim, result_ndim, final_size = np.core.shape_base._block_setup(arrays)
shape, slices, arrays = np.core.shape_base._block_info_recursion(
arrays, list_ndim, result_ndim
)
dtype = np.result_type(*[arr.dtype for arr in arrays])
F_order = all(arr.flags["F_CONTIGUOUS"] for arr in arrays)
C_order = all(arr.flags["C_CONTIGUOUS"] for arr in arrays)
order = "F" if F_order and not C_order else "C"
result = Masked(np.empty(shape=shape, dtype=dtype, order=order))
for the_slice, arr in zip(slices, arrays):
result[(Ellipsis,) + the_slice] = arr
return result
@dispatched_function
def broadcast_arrays(*args, subok=True):
"""Broadcast arrays to a common shape.
Like `numpy.broadcast_arrays`, applied to both unmasked data and masks.
Note that ``subok`` is taken to mean whether or not subclasses of
the unmasked data and masks are allowed, i.e., for ``subok=False``,
`~astropy.utils.masked.MaskedNDArray` instances will be returned.
"""
from .core import Masked
are_masked = [isinstance(arg, Masked) for arg in args]
data = [
(arg.unmasked if is_masked else arg) for arg, is_masked in zip(args, are_masked)
]
results = np.broadcast_arrays(*data, subok=subok)
shape = results[0].shape if isinstance(results, list) else results.shape
masks = [
(np.broadcast_to(arg.mask, shape, subok=subok) if is_masked else None)
for arg, is_masked in zip(args, are_masked)
]
results = [
(Masked(result, mask) if mask is not None else result)
for (result, mask) in zip(results, masks)
]
return results if len(results) > 1 else results[0]
@apply_to_both
def insert(arr, obj, values, axis=None):
"""Insert values along the given axis before the given indices.
Like `numpy.insert` but for possibly masked ``arr`` and ``values``.
Masked ``obj`` is not supported.
"""
from astropy.utils.masked import Masked
if isinstance(obj, Masked) or not isinstance(arr, Masked):
raise NotImplementedError
(arr_data, val_data), (arr_mask, val_mask) = _get_data_and_masks(arr, values)
return ((arr_data, obj, val_data, axis), (arr_mask, obj, val_mask, axis), {}, None)
@dispatched_function
def count_nonzero(a, axis=None, *, keepdims=False):
"""Counts the number of non-zero values in the array ``a``.
Like `numpy.count_nonzero`, with masked values counted as 0 or `False`.
"""
filled = a.filled(np.zeros((), a.dtype))
return np.count_nonzero(filled, axis, keepdims=keepdims)
def _masked_median_1d(a, overwrite_input):
# TODO: need an in-place mask-sorting option.
unmasked = a.unmasked[~a.mask]
if unmasked.size:
return a.from_unmasked(np.median(unmasked, overwrite_input=overwrite_input))
else:
return a.from_unmasked(np.zeros_like(a.unmasked, shape=(1,))[0], mask=True)
def _masked_median(a, axis=None, out=None, overwrite_input=False):
# As for np.nanmedian, but without a fast option as yet.
if axis is None or a.ndim == 1:
part = a.ravel()
result = _masked_median_1d(part, overwrite_input)
else:
result = np.apply_along_axis(_masked_median_1d, axis, a, overwrite_input)
if out is not None:
out[...] = result
return result
@dispatched_function
def median(a, axis=None, out=None, **kwargs):
from astropy.utils.masked import Masked
if out is not None and not isinstance(out, Masked):
raise NotImplementedError
a = Masked(a)
if NUMPY_LT_1_24:
keepdims = kwargs.pop("keepdims", False)
r, k = np.lib.function_base._ureduce(
a, func=_masked_median, axis=axis, out=out, **kwargs
)
return (r.reshape(k) if keepdims else r) if out is None else out
else:
return np.lib.function_base._ureduce(
a, func=_masked_median, axis=axis, out=out, **kwargs
)
def _masked_quantile_1d(a, q, **kwargs):
"""
Private function for rank 1 arrays. Compute quantile ignoring NaNs.
See nanpercentile for parameter usage.
"""
unmasked = a.unmasked[~a.mask]
if unmasked.size:
result = np.lib.function_base._quantile_unchecked(unmasked, q, **kwargs)
return a.from_unmasked(result)
else:
return a.from_unmasked(np.zeros_like(a.unmasked, shape=q.shape), True)
def _masked_quantile(a, q, axis=None, out=None, **kwargs):
# As for np.nanmedian, but without a fast option as yet.
if axis is None or a.ndim == 1:
part = a.ravel()
result = _masked_quantile_1d(part, q, **kwargs)
else:
result = np.apply_along_axis(_masked_quantile_1d, axis, a, q, **kwargs)
# apply_along_axis fills in collapsed axis with results.
# Move that axis to the beginning to match percentile's
# convention.
if q.ndim != 0:
result = np.moveaxis(result, axis, 0)
if out is not None:
out[...] = result
return result
@dispatched_function
def quantile(a, q, axis=None, out=None, **kwargs):
from astropy.utils.masked import Masked
if isinstance(q, Masked) or out is not None and not isinstance(out, Masked):
raise NotImplementedError
a = Masked(a)
q = np.asanyarray(q)
if not np.lib.function_base._quantile_is_valid(q):
raise ValueError("Quantiles must be in the range [0, 1]")
if NUMPY_LT_1_24:
keepdims = kwargs.pop("keepdims", False)
r, k = np.lib.function_base._ureduce(
a, func=_masked_quantile, q=q, axis=axis, out=out, **kwargs
)
return (r.reshape(q.shape + k) if keepdims else r) if out is None else out
else:
return np.lib.function_base._ureduce(
a, func=_masked_quantile, q=q, axis=axis, out=out, **kwargs
)
@dispatched_function
def percentile(a, q, *args, **kwargs):
q = np.true_divide(q, 100)
return quantile(a, q, *args, **kwargs)
@dispatched_function
def array_equal(a1, a2, equal_nan=False):
(a1d, a2d), (a1m, a2m) = _get_data_and_masks(a1, a2)
if a1d.shape != a2d.shape:
return False
equal = a1d == a2d
if equal_nan:
equal |= np.isnan(a1d) & np.isnan(a2d)
return bool((equal | a1m | a2m).all())
@dispatched_function
def array_equiv(a1, a2):
return bool((a1 == a2).all())
@dispatched_function
def where(condition, *args):
from astropy.utils.masked import Masked
if not args:
return condition.nonzero(), None, None
condition, c_mask = Masked._get_data_and_mask(condition)
data, masks = _get_data_and_masks(*args)
unmasked = np.where(condition, *data)
mask = np.where(condition, *masks)
if c_mask is not None:
mask |= c_mask
return Masked(unmasked, mask=mask)
@dispatched_function
def choose(a, choices, out=None, mode="raise"):
"""Construct an array from an index array and a set of arrays to choose from.
Like `numpy.choose`. Masked indices in ``a`` will lead to masked output
values and underlying data values are ignored if out of bounds (for
``mode='raise'``). Any values masked in ``choices`` will be propagated
if chosen.
"""
from astropy.utils.masked import Masked
a_data, a_mask = Masked._get_data_and_mask(a)
if a_mask is not None and mode == "raise":
# Avoid raising on masked indices.
a_data = a.filled(fill_value=0)
kwargs = {"mode": mode}
if out is not None:
if not isinstance(out, Masked):
raise NotImplementedError
kwargs["out"] = out.unmasked
data, masks = _get_data_and_masks(*choices)
data_chosen = np.choose(a_data, data, **kwargs)
if out is not None:
kwargs["out"] = out.mask
mask_chosen = np.choose(a_data, masks, **kwargs)
if a_mask is not None:
mask_chosen |= a_mask
return Masked(data_chosen, mask_chosen) if out is None else out
@apply_to_both
def select(condlist, choicelist, default=0):
"""Return an array drawn from elements in choicelist, depending on conditions.
Like `numpy.select`, with masks in ``choicelist`` are propagated.
Any masks in ``condlist`` are ignored.
"""
from astropy.utils.masked import Masked
condlist = [c.unmasked if isinstance(c, Masked) else c for c in condlist]
data_list, mask_list = _get_data_and_masks(*choicelist)
default = Masked(default) if default is not np.ma.masked else Masked(0, mask=True)
return (
(condlist, data_list, default.unmasked),
(condlist, mask_list, default.mask),
{},
None,
)
@dispatched_function
def piecewise(x, condlist, funclist, *args, **kw):
"""Evaluate a piecewise-defined function.
Like `numpy.piecewise` but for masked input array ``x``.
Any masks in ``condlist`` are ignored.
"""
# Copied implementation from numpy.lib.function_base.piecewise,
# just to ensure output is Masked.
n2 = len(funclist)
# undocumented: single condition is promoted to a list of one condition
if np.isscalar(condlist) or (
not isinstance(condlist[0], (list, np.ndarray)) and x.ndim != 0
): # pragma: no cover
condlist = [condlist]
condlist = np.array(condlist, dtype=bool)
n = len(condlist)
if n == n2 - 1: # compute the "otherwise" condition.
condelse = ~np.any(condlist, axis=0, keepdims=True)
condlist = np.concatenate([condlist, condelse], axis=0)
n += 1
elif n != n2:
raise ValueError(
f"with {n} condition(s), either {n} or {n + 1} functions are expected"
)
# The one real change...
y = np.zeros_like(x)
where = []
what = []
for k in range(n):
item = funclist[k]
if not callable(item):
where.append(condlist[k])
what.append(item)
else:
vals = x[condlist[k]]
if vals.size > 0:
where.append(condlist[k])
what.append(item(vals, *args, **kw))
for item, value in zip(where, what):
y[item] = value
return y
@dispatched_function
def interp(x, xp, fp, *args, **kwargs):
"""One-dimensional linear interpolation.
Like `numpy.interp`, but any masked points in ``xp`` and ``fp``
are ignored. Any masked values in ``x`` will still be evaluated,
but masked on output.
"""
from astropy.utils.masked import Masked
xd, xm = Masked._get_data_and_mask(x)
if isinstance(xp, Masked) or isinstance(fp, Masked):
(xp, fp), (xpm, fpm) = _get_data_and_masks(xp, fp)
if xp.ndim == fp.ndim == 1:
# Avoid making arrays 1-D; will just raise below.
m = xpm | fpm
xp = xp[~m]
fp = fp[~m]
result = np.interp(xd, xp, fp, *args, **kwargs)
return result if xm is None else Masked(result, xm.copy())
@dispatched_function
def lexsort(keys, axis=-1):
"""Perform an indirect stable sort using a sequence of keys.
Like `numpy.lexsort` but for possibly masked ``keys``. Masked
values are sorted towards the end for each key.
"""
# Sort masks to the end.
from .core import Masked
new_keys = []
for key in keys:
if isinstance(key, Masked):
# If there are other keys below, want to be sure that
# for masked values, those other keys set the order.
new_key = key.unmasked
if new_keys and key.mask.any():
new_key = new_key.copy()
new_key[key.mask] = new_key.flat[0]
new_keys.extend([new_key, key.mask])
else:
new_keys.append(key)
return np.lexsort(new_keys, axis=axis)
@dispatched_function
def apply_over_axes(func, a, axes):
# Copied straight from numpy/lib/shape_base, just to omit its
# val = asarray(a); if only it had been asanyarray, or just not there
# since a is assumed to an an array in the next line...
# Which is what we do here - we can only get here if it is Masked.
val = a
N = a.ndim
if np.array(axes).ndim == 0:
axes = (axes,)
for axis in axes:
if axis < 0:
axis = N + axis
args = (val, axis)
res = func(*args)
if res.ndim == val.ndim:
val = res
else:
res = np.expand_dims(res, axis)
if res.ndim == val.ndim:
val = res
else:
raise ValueError(
"function is not returning an array of the correct shape"
)
return val
class MaskedFormat:
"""Formatter for masked array scalars.
For use in `numpy.array2string`, wrapping the regular formatters such
that if a value is masked, its formatted string is replaced.
Typically initialized using the ``from_data`` class method.
"""
def __init__(self, format_function):
self.format_function = format_function
# Special case for structured void and subarray: we need to make all the
# format functions for the items masked as well.
# TODO: maybe is a separate class is more logical?
ffs = getattr(format_function, "format_functions", None)
if ffs:
# StructuredVoidFormat: multiple format functions to be changed.
self.format_function.format_functions = [MaskedFormat(ff) for ff in ffs]
ff = getattr(format_function, "format_function", None)
if ff:
# SubarrayFormat: change format function for the elements.
self.format_function.format_function = MaskedFormat(ff)
def __call__(self, x):
if x.dtype.names:
# The replacement of x with a list is needed because the function
# inside StructuredVoidFormat iterates over x, which works for an
# np.void but not an array scalar.
return self.format_function([x[field] for field in x.dtype.names])
if x.shape:
# For a subarray pass on the data directly, since the
# items will be iterated on inside the function.
return self.format_function(x)
# Single element: first just typeset it normally, replace with masked
# string if needed.
string = self.format_function(x.unmasked[()])
if x.mask:
# Strikethrough would be neat, but terminal needs a different
# formatting than, say, jupyter notebook.
# return "\x1B[9m"+string+"\x1B[29m"
# return ''.join(s+'\u0336' for s in string)
n = min(3, max(1, len(string)))
return " " * (len(string) - n) + "\u2014" * n
else:
return string
@classmethod
def from_data(cls, data, **options):
from numpy.core.arrayprint import _get_format_function
return cls(_get_format_function(data, **options))
def _array2string(a, options, separator=" ", prefix=""):
# Mostly copied from numpy.core.arrayprint, except:
# - The format function is wrapped in a mask-aware class;
# - Arrays scalars are not cast as arrays.
from numpy.core.arrayprint import _formatArray, _leading_trailing
data = np.asarray(a)
if a.size > options["threshold"]:
summary_insert = "..."
data = _leading_trailing(data, options["edgeitems"])
else:
summary_insert = ""
# find the right formatting function for the array
format_function = MaskedFormat.from_data(data, **options)
# skip over "["
next_line_prefix = " "
# skip over array(
next_line_prefix += " " * len(prefix)
lst = _formatArray(
a,
format_function,
options["linewidth"],
next_line_prefix,
separator,
options["edgeitems"],
summary_insert,
options["legacy"],
)
return lst
@dispatched_function
def array2string(
a,
max_line_width=None,
precision=None,
suppress_small=None,
separator=" ",
prefix="",
style=np._NoValue,
formatter=None,
threshold=None,
edgeitems=None,
sign=None,
floatmode=None,
suffix="",
):
# Copied from numpy.core.arrayprint, but using _array2string above.
from numpy.core.arrayprint import _format_options, _make_options_dict
overrides = _make_options_dict(
precision,
threshold,
edgeitems,
max_line_width,
suppress_small,
None,
None,
sign,
formatter,
floatmode,
)
options = _format_options.copy()
options.update(overrides)
options["linewidth"] -= len(suffix)
# treat as a null array if any of shape elements == 0
if a.size == 0:
return "[]"
return _array2string(a, options, separator, prefix)
@dispatched_function
def array_str(a, max_line_width=None, precision=None, suppress_small=None):
# Override to avoid special treatment of array scalars.
return array2string(a, max_line_width, precision, suppress_small, " ", "")
# For the nanfunctions, we just treat any nan as an additional mask.
_nanfunc_fill_values = {"nansum": 0, "nancumsum": 0, "nanprod": 1, "nancumprod": 1}
def masked_nanfunc(nanfuncname):
np_func = getattr(np, nanfuncname[3:])
fill_value = _nanfunc_fill_values.get(nanfuncname, None)
def nanfunc(a, *args, **kwargs):
from astropy.utils.masked import Masked
a, mask = Masked._get_data_and_mask(a)
if issubclass(a.dtype.type, np.inexact):
nans = np.isnan(a)
mask = nans if mask is None else (nans | mask)
if mask is not None:
a = Masked(a, mask)
if fill_value is not None:
a = a.filled(fill_value)
return np_func(a, *args, **kwargs)
doc = f"Like `numpy.{nanfuncname}`, skipping masked values as well.\n\n"
if fill_value is not None:
# sum, cumsum, prod, cumprod
doc += (
f"Masked/NaN values are replaced with {fill_value}. "
"The output is not masked."
)
elif "arg" in nanfuncname:
doc += (
"No exceptions are raised for fully masked/NaN slices.\n"
"Instead, these give index 0."
)
else:
doc += (
"No warnings are given for fully masked/NaN slices.\n"
"Instead, they are masked in the output."
)
nanfunc.__doc__ = doc
nanfunc.__name__ = nanfuncname
return nanfunc
for nanfuncname in np.lib.nanfunctions.__all__:
globals()[nanfuncname] = dispatched_function(
masked_nanfunc(nanfuncname), helps=getattr(np, nanfuncname)
)
# Add any dispatched or helper function that has a docstring to
# __all__, so they will be typeset by sphinx. The logic is that for
# those presumably the use of the mask is not entirely obvious.
__all__ += sorted(
helper.__name__
for helper in (
set(APPLY_TO_BOTH_FUNCTIONS.values()) | set(DISPATCHED_FUNCTIONS.values())
)
if helper.__doc__
)
|
615d4ccd141c70f66d603a40d2e826ed2d6e58d6ab09e5838581cb048e745020 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import re
import warnings
from pathlib import Path
import numpy as np
import pytest
from astropy import units as u
from astropy.config import set_temp_cache
from astropy.table import QTable
from astropy.tests.helper import assert_quantity_allclose
from astropy.time import Time, TimeDelta
from astropy.utils.data import get_pkg_data_filename
from astropy.utils.iers import iers
CI = os.environ.get("CI", False)
FILE_NOT_FOUND_ERROR = getattr(__builtins__, "FileNotFoundError", OSError)
try:
iers.IERS_A.open("finals2000A.all") # check if IERS_A is available
except OSError:
HAS_IERS_A = False
else:
HAS_IERS_A = True
IERS_A_EXCERPT = get_pkg_data_filename(os.path.join("data", "iers_a_excerpt"))
def setup_module():
# Need auto_download so that IERS_B won't be loaded and cause tests to
# fail. Files to be downloaded are handled appropriately in the tests.
iers.conf.auto_download = True
def teardown_module():
# This setting is to be consistent with astropy/conftest.py
iers.conf.auto_download = False
class TestBasic:
"""Basic tests that IERS_B returns correct values"""
@pytest.mark.parametrize("iers_cls", (iers.IERS_B, iers.IERS))
def test_simple(self, iers_cls):
"""Test the default behaviour for IERS_B and IERS."""
# Arguably, IERS itself should not be used at all, but it used to
# provide IERS_B by default so we check that it continues to do so.
# Eventually, IERS should probably be deprecated.
iers_cls.close()
assert iers_cls.iers_table is None
iers_tab = iers_cls.open()
assert iers_cls.iers_table is not None
assert iers_cls.iers_table is iers_tab
assert isinstance(iers_tab, QTable)
assert isinstance(iers_tab, iers.IERS_B)
assert (iers_tab["UT1_UTC"].unit / u.second).is_unity()
assert (iers_tab["PM_x"].unit / u.arcsecond).is_unity()
assert (iers_tab["PM_y"].unit / u.arcsecond).is_unity()
jd1 = np.array([2456108.5, 2456108.5, 2456108.5, 2456109.5, 2456109.5])
jd2 = np.array([0.49999421, 0.99997685, 0.99998843, 0.0, 0.5])
ut1_utc = iers_tab.ut1_utc(jd1, jd2)
assert isinstance(ut1_utc, u.Quantity)
assert (ut1_utc.unit / u.second).is_unity()
# IERS files change at the 0.1 ms level; see gh-6981
assert_quantity_allclose(
ut1_utc,
[-0.5868211, -0.5868184, -0.5868184, 0.4131816, 0.41328895] * u.s,
atol=0.1 * u.ms,
)
# should be future-proof; surely we've moved to another planet by then
with pytest.raises(IndexError):
ut1_utc2, status2 = iers_tab.ut1_utc(1e11, 0.0)
# also check it returns the right status
ut1_utc2, status2 = iers_tab.ut1_utc(jd1, jd2, return_status=True)
assert np.all(status2 == iers.FROM_IERS_B)
ut1_utc4, status4 = iers_tab.ut1_utc(1e11, 0.0, return_status=True)
assert status4 == iers.TIME_BEYOND_IERS_RANGE
# check it works via Time too
t = Time(jd1, jd2, format="jd", scale="utc")
ut1_utc3 = iers_tab.ut1_utc(t)
assert_quantity_allclose(
ut1_utc3,
[-0.5868211, -0.5868184, -0.5868184, 0.4131816, 0.41328895] * u.s,
atol=0.1 * u.ms,
)
# Table behaves properly as a table (e.g. can be sliced)
assert len(iers_tab[:2]) == 2
def test_open_filename(self):
iers.IERS_B.close()
iers.IERS_B.open(iers.IERS_B_FILE)
assert iers.IERS_B.iers_table is not None
assert isinstance(iers.IERS_B.iers_table, QTable)
iers.IERS_B.close()
with pytest.raises(FILE_NOT_FOUND_ERROR):
iers.IERS_B.open("surely this does not exist")
def test_open_network_url(self):
iers.IERS_A.close()
iers.IERS_A.open(Path(IERS_A_EXCERPT).as_uri())
assert iers.IERS_A.iers_table is not None
assert isinstance(iers.IERS_A.iers_table, QTable)
iers.IERS_A.close()
def test_IERS_B_old_style_excerpt():
"""Check that the instructions given in `IERS_B.read` actually work."""
# If this test is changed, be sure to also adjust the instructions.
#
# TODO: this test and the note can probably be removed after
# enough time has passed that old-style IERS_B files are simply
# not around any more, say in 2025. If so, also remove the excerpt
# and the ReadMe.eopc04_IAU2000 file.
old_style_file = get_pkg_data_filename(
os.path.join("data", "iers_b_old_style_excerpt")
)
excerpt = iers.IERS_B.read(
old_style_file,
readme=get_pkg_data_filename(
"data/ReadMe.eopc04_IAU2000", package="astropy.utils.iers"
),
data_start=14,
)
assert isinstance(excerpt, QTable)
assert "PM_x_dot" not in excerpt.colnames
class TestIERS_AExcerpt:
def test_simple(self):
# Test the IERS A reader. It is also a regression tests that ensures
# values do not get overridden by IERS B; see #4933.
iers_tab = iers.IERS_A.open(IERS_A_EXCERPT)
assert (iers_tab["UT1_UTC"].unit / u.second).is_unity()
assert "P" in iers_tab["UT1Flag"]
assert "I" in iers_tab["UT1Flag"]
assert "B" in iers_tab["UT1Flag"]
assert np.all(
(iers_tab["UT1Flag"] == "I")
| (iers_tab["UT1Flag"] == "P")
| (iers_tab["UT1Flag"] == "B")
)
assert (iers_tab["dX_2000A"].unit / u.marcsec).is_unity()
assert (iers_tab["dY_2000A"].unit / u.marcsec).is_unity()
assert "P" in iers_tab["NutFlag"]
assert "I" in iers_tab["NutFlag"]
assert "B" in iers_tab["NutFlag"]
assert np.all(
(iers_tab["NutFlag"] == "P")
| (iers_tab["NutFlag"] == "I")
| (iers_tab["NutFlag"] == "B")
)
assert (iers_tab["PM_x"].unit / u.arcsecond).is_unity()
assert (iers_tab["PM_y"].unit / u.arcsecond).is_unity()
assert "P" in iers_tab["PolPMFlag"]
assert "I" in iers_tab["PolPMFlag"]
assert "B" in iers_tab["PolPMFlag"]
assert np.all(
(iers_tab["PolPMFlag"] == "P")
| (iers_tab["PolPMFlag"] == "I")
| (iers_tab["PolPMFlag"] == "B")
)
t = Time([57053.0, 57054.0, 57055.0], format="mjd")
ut1_utc, status = iers_tab.ut1_utc(t, return_status=True)
assert status[0] == iers.FROM_IERS_B
assert np.all(status[1:] == iers.FROM_IERS_A)
# These values are *exactly* as given in the table, so they should
# match to double precision accuracy.
assert_quantity_allclose(
ut1_utc, [-0.4916557, -0.4925323, -0.4934373] * u.s, atol=0.1 * u.ms
)
dcip_x, dcip_y, status = iers_tab.dcip_xy(t, return_status=True)
assert status[0] == iers.FROM_IERS_B
assert np.all(status[1:] == iers.FROM_IERS_A)
# These values are *exactly* as given in the table, so they should
# match to double precision accuracy.
print(dcip_x)
print(dcip_y)
assert_quantity_allclose(
dcip_x, [-0.086, -0.093, -0.087] * u.marcsec, atol=1.0 * u.narcsec
)
assert_quantity_allclose(
dcip_y, [0.094, 0.081, 0.072] * u.marcsec, atol=1 * u.narcsec
)
pm_x, pm_y, status = iers_tab.pm_xy(t, return_status=True)
assert status[0] == iers.FROM_IERS_B
assert np.all(status[1:] == iers.FROM_IERS_A)
assert_quantity_allclose(
pm_x, [0.003734, 0.004581, 0.004623] * u.arcsec, atol=0.1 * u.marcsec
)
assert_quantity_allclose(
pm_y, [0.310824, 0.313150, 0.315517] * u.arcsec, atol=0.1 * u.marcsec
)
# Table behaves properly as a table (e.g. can be sliced)
assert len(iers_tab[:2]) == 2
@pytest.mark.skipif(not HAS_IERS_A, reason="requires IERS_A")
class TestIERS_A:
def test_simple(self):
"""Test that open() by default reads a 'finals2000A.all' file."""
# Ensure we remove any cached table (gh-5131).
iers.IERS_A.close()
iers_tab = iers.IERS_A.open()
jd1 = np.array([2456108.5, 2456108.5, 2456108.5, 2456109.5, 2456109.5])
jd2 = np.array([0.49999421, 0.99997685, 0.99998843, 0.0, 0.5])
ut1_utc, status = iers_tab.ut1_utc(jd1, jd2, return_status=True)
assert np.all(status == iers.FROM_IERS_B)
assert_quantity_allclose(
ut1_utc,
[-0.5868211, -0.5868184, -0.5868184, 0.4131816, 0.41328895] * u.s,
atol=0.1 * u.ms,
)
ut1_utc2, status2 = iers_tab.ut1_utc(1e11, 0.0, return_status=True)
assert status2 == iers.TIME_BEYOND_IERS_RANGE
tnow = Time.now()
ut1_utc3, status3 = iers_tab.ut1_utc(tnow, return_status=True)
assert status3 == iers.FROM_IERS_A_PREDICTION
assert ut1_utc3 != 0.0
class TestIERS_Auto:
def setup_class(self):
"""Set up useful data for the tests."""
self.N = 40
self.ame = 30.0
self.iers_a_file_1 = get_pkg_data_filename(
os.path.join("data", "finals2000A-2016-02-30-test")
)
self.iers_a_file_2 = get_pkg_data_filename(
os.path.join("data", "finals2000A-2016-04-30-test")
)
self.iers_a_url_1 = Path(self.iers_a_file_1).as_uri()
self.iers_a_url_2 = Path(self.iers_a_file_2).as_uri()
self.t = Time.now() + TimeDelta(10, format="jd") * np.arange(self.N)
def teardown_method(self, method):
"""Run this after every test."""
iers.IERS_Auto.close()
def test_interpolate_error_formatting(self):
"""Regression test: make sure the error message in
IERS_Auto._check_interpolate_indices() is formatted correctly.
"""
with iers.conf.set_temp("iers_auto_url", self.iers_a_url_1):
with iers.conf.set_temp("iers_auto_url_mirror", self.iers_a_url_1):
with iers.conf.set_temp("auto_max_age", self.ame):
with pytest.raises(
ValueError,
match=re.escape(iers.INTERPOLATE_ERROR.format(self.ame)),
):
iers_table = iers.IERS_Auto.open()
with warnings.catch_warnings():
# Ignoring this if it comes up -- IERS_Auto predictive
# values are older than 30.0 days but downloading the
# latest table did not find newer values
warnings.simplefilter("ignore", iers.IERSStaleWarning)
iers_table.ut1_utc(self.t.jd1, self.t.jd2)
def test_auto_max_age_none(self):
"""Make sure that iers.INTERPOLATE_ERROR's advice about setting
auto_max_age = None actually works.
"""
with iers.conf.set_temp("iers_auto_url", self.iers_a_url_1):
with iers.conf.set_temp("auto_max_age", None):
iers_table = iers.IERS_Auto.open()
delta = iers_table.ut1_utc(self.t.jd1, self.t.jd2)
assert isinstance(delta, np.ndarray)
assert delta.shape == (self.N,)
assert_quantity_allclose(delta, np.array([-0.2246227] * self.N) * u.s)
def test_auto_max_age_minimum(self):
"""Check that the minimum auto_max_age is enforced."""
with iers.conf.set_temp("iers_auto_url", self.iers_a_url_1):
with iers.conf.set_temp("auto_max_age", 5.0):
with pytest.raises(
ValueError,
match=(
r"IERS auto_max_age configuration value must be larger than 10"
r" days"
),
):
iers_table = iers.IERS_Auto.open()
_ = iers_table.ut1_utc(self.t.jd1, self.t.jd2)
def test_no_auto_download(self):
with iers.conf.set_temp("auto_download", False):
t = iers.IERS_Auto.open()
assert type(t) is iers.IERS_B
@pytest.mark.remote_data
def test_simple(self):
with iers.conf.set_temp("iers_auto_url", self.iers_a_url_1):
dat = iers.IERS_Auto.open()
assert dat["MJD"][0] == 57359.0 * u.d
assert dat["MJD"][-1] == 57539.0 * u.d
# Pretend we are accessing at a time 7 days after start of predictive data
predictive_mjd = dat.meta["predictive_mjd"]
dat._time_now = Time(predictive_mjd, format="mjd") + 7 * u.d
# Look at times before and after the test file begins. 0.1292934 is
# the IERS-B value from MJD=57359. The value in
# finals2000A-2016-02-30-test has been replaced at this point.
assert np.allclose(
dat.ut1_utc(Time(50000, format="mjd").jd).value, 0.1292934
)
assert np.allclose(
dat.ut1_utc(Time(60000, format="mjd").jd).value, -0.2246227
)
# Now pretend we are accessing at time 60 days after start of predictive data.
# There will be a warning when downloading the file doesn't give new data
# and an exception when extrapolating into the future with insufficient data.
dat._time_now = Time(predictive_mjd, format="mjd") + 60 * u.d
assert np.allclose(
dat.ut1_utc(Time(50000, format="mjd").jd).value, 0.1292934
)
with pytest.warns(
iers.IERSStaleWarning, match="IERS_Auto predictive values are older"
) as warns, pytest.raises(
ValueError,
match="interpolating from IERS_Auto using predictive values",
):
dat.ut1_utc(Time(60000, format="mjd").jd)
assert len(warns) == 1
# Warning only if we are getting return status
with pytest.warns(
iers.IERSStaleWarning, match="IERS_Auto predictive values are older"
) as warns:
dat.ut1_utc(Time(60000, format="mjd").jd, return_status=True)
assert len(warns) == 1
# Now set auto_max_age = None which says that we don't care how old the
# available IERS-A file is. There should be no warnings or exceptions.
with iers.conf.set_temp("auto_max_age", None):
dat.ut1_utc(Time(60000, format="mjd").jd)
# Now point to a later file with same values but MJD increased by
# 60 days and see that things work. dat._time_now is still the same value
# as before, i.e. right around the start of predictive values for the new file.
# (In other words this is like downloading the latest file online right now).
with iers.conf.set_temp("iers_auto_url", self.iers_a_url_2):
# Look at times before and after the test file begins. This forces a new download.
assert np.allclose(
dat.ut1_utc(Time(50000, format="mjd").jd).value, 0.1292934
)
assert np.allclose(dat.ut1_utc(Time(60000, format="mjd").jd).value, -0.3)
# Now the time range should be different.
assert dat["MJD"][0] == 57359.0 * u.d
assert dat["MJD"][-1] == (57539.0 + 60) * u.d
@pytest.mark.remote_data
def test_IERS_B_parameters_loading_into_IERS_Auto():
A = iers.IERS_Auto.open()
B = iers.IERS_B.open()
ok_A = A["MJD"] <= B["MJD"][-1]
assert not np.all(ok_A), "IERS B covers all of IERS A: should not happen"
# We only overwrite IERS_B values in the IERS_A table that were already
# there in the first place. Better take that into account.
ok_A &= np.isfinite(A["UT1_UTC_B"])
i_B = np.searchsorted(B["MJD"], A["MJD"][ok_A])
assert np.all(np.diff(i_B) == 1), "Valid region not contiguous"
assert np.all(A["MJD"][ok_A] == B["MJD"][i_B])
# Check that values are copied correctly. Since units are not
# necessarily the same, we use allclose with very strict tolerance.
for name in ("UT1_UTC", "PM_x", "PM_y", "dX_2000A", "dY_2000A"):
assert_quantity_allclose(
A[name][ok_A],
B[name][i_B],
rtol=1e-15,
err_msg=(
f"Bug #9206 IERS B parameter {name} not copied over "
"correctly to IERS Auto"
),
)
# Issue with FTP, rework test into previous one when it's fixed
@pytest.mark.skipif("CI", reason="Flaky on CI")
@pytest.mark.remote_data
def test_iers_a_dl():
iersa_tab = iers.IERS_A.open(iers.IERS_A_URL, cache=False)
try:
# some basic checks to ensure the format makes sense
assert len(iersa_tab) > 0
assert "UT1_UTC_A" in iersa_tab.colnames
finally:
iers.IERS_A.close()
@pytest.mark.remote_data
def test_iers_a_dl_mirror():
iersa_tab = iers.IERS_A.open(iers.IERS_A_URL_MIRROR, cache=False)
try:
# some basic checks to ensure the format makes sense
assert len(iersa_tab) > 0
assert "UT1_UTC_A" in iersa_tab.colnames
finally:
iers.IERS_A.close()
@pytest.mark.remote_data
def test_iers_b_dl():
iersb_tab = iers.IERS_B.open(iers.IERS_B_URL, cache=False)
try:
# some basic checks to ensure the format makes sense
assert len(iersb_tab) > 0
assert "UT1_UTC" in iersb_tab.colnames
finally:
iers.IERS_B.close()
@pytest.mark.remote_data
def test_iers_out_of_range_handling(tmp_path):
# Make sure we don't have IERS-A data available anywhere
with set_temp_cache(tmp_path):
iers.IERS_A.close()
iers.IERS_Auto.close()
iers.IERS.close()
now = Time.now()
with iers.conf.set_temp("auto_download", False):
# Should be fine with built-in IERS_B
(now - 300 * u.day).ut1
# Default is to raise an error
match = r"\(some\) times are outside of range covered by IERS table"
with pytest.raises(iers.IERSRangeError, match=match):
(now + 100 * u.day).ut1
with iers.conf.set_temp("iers_degraded_accuracy", "warn"):
with pytest.warns(iers.IERSDegradedAccuracyWarning, match=match):
(now + 100 * u.day).ut1
with iers.conf.set_temp("iers_degraded_accuracy", "ignore"):
(now + 100 * u.day).ut1
@pytest.mark.remote_data
def test_iers_download_error_handling(tmp_path):
# Make sure we don't have IERS-A data available anywhere
with set_temp_cache(tmp_path):
iers.IERS_A.close()
iers.IERS_Auto.close()
iers.IERS.close()
now = Time.now()
# bad site name
with iers.conf.set_temp("iers_auto_url", "FAIL FAIL"):
# site that exists but doesn't have IERS data
with iers.conf.set_temp("iers_auto_url_mirror", "https://google.com"):
with pytest.warns(iers.IERSWarning) as record:
with iers.conf.set_temp("iers_degraded_accuracy", "ignore"):
(now + 100 * u.day).ut1
assert len(record) == 3
assert str(record[0].message).startswith(
"failed to download FAIL FAIL: Malformed URL"
)
assert str(record[1].message).startswith(
"malformed IERS table from https://google.com"
)
assert str(record[2].message).startswith(
"unable to download valid IERS file, using local IERS-B"
)
|
cc1b71c29ebafdace8b5feee327436500b47b2f657c7809d56b6fd88769efc33 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test numpy functions and ufuncs on Masked arrays and quantities.
The tests here are fairly detailed but do not aim for complete
coverage. Complete coverage of all numpy functions is done
with less detailed tests in test_function_helpers.
"""
import numpy as np
import pytest
from numpy.testing import assert_array_equal
from astropy import units as u
from astropy.units import Quantity
from astropy.utils.compat.numpycompat import NUMPY_LT_1_25
from astropy.utils.masked.core import Masked
from .test_masked import (
LongitudeSetup,
MaskedArraySetup,
QuantitySetup,
assert_masked_equal,
)
class MaskedUfuncTests(MaskedArraySetup):
@pytest.mark.parametrize(
"ufunc", (np.add, np.subtract, np.divide, np.arctan2, np.minimum)
)
@pytest.mark.parametrize("a, b", [("ma", "mb"), ("ma", "b"), ("a", "mb")])
def test_2op_ufunc(self, ufunc, a, b):
a, b = getattr(self, a), getattr(self, b)
mask_a = getattr(a, "mask", np.zeros(a.shape, bool))
mask_b = getattr(b, "mask", np.zeros(b.shape, bool))
result = ufunc(a, b)
expected_data = ufunc(self.a, self.b)
expected_mask = mask_a | mask_b
# Note: assert_array_equal also checks type, i.e., that, e.g.,
# Longitude decays into an Angle.
assert_array_equal(result.unmasked, expected_data)
assert_array_equal(result.mask, expected_mask)
out = Masked(np.zeros_like(result.unmasked))
result2 = ufunc(a, b, out=out)
assert result2 is out
assert_masked_equal(result2, result)
@pytest.mark.parametrize("base_mask", [True, False])
def test_ufunc_inplace_where(self, base_mask):
# Construct base filled with -9 and base_mask (copying to get unit/class).
base = self.ma.copy()
base.unmasked.view(np.ndarray)[...] = -9.0
base._mask[...] = base_mask
out = base.copy()
where = np.array([[True, False, False], [False, True, False]])
result = np.add(self.ma, self.mb, out=out, where=where)
# Direct checks.
assert np.all(result.unmasked[~where] == base.unmasked[0, 0])
assert np.all(result.unmasked[where] == (self.a + self.b)[where])
# Full comparison.
expected = base.unmasked.copy()
np.add(self.a, self.b, out=expected, where=where)
expected_mask = base.mask.copy()
np.logical_or(self.mask_a, self.mask_b, out=expected_mask, where=where)
assert_array_equal(result.unmasked, expected)
assert_array_equal(result.mask, expected_mask)
@pytest.mark.parametrize("base_mask", [True, False])
def test_ufunc_inplace_masked_where(self, base_mask):
base = self.ma.copy()
base.unmasked.view(np.ndarray)[...] = -9.0
base._mask[...] = base_mask
out = base.copy()
where = Masked(
[[True, False, True], [False, False, True]],
mask=[[True, False, False], [True, False, True]],
)
result = np.add(self.ma, self.mb, out=out, where=where)
# Direct checks.
assert np.all(result.unmasked[~where.unmasked] == base.unmasked[0, 0])
assert np.all(
result.unmasked[where.unmasked] == (self.a + self.b)[where.unmasked]
)
assert np.all(result.mask[where.mask])
assert np.all(result.mask[~where.mask & ~where.unmasked] == base.mask[0, 0])
assert np.all(
result.mask[~where.mask & where.unmasked]
== (self.mask_a | self.mask_b)[~where.mask & where.unmasked]
)
# Full comparison.
expected = base.unmasked.copy()
np.add(self.a, self.b, out=expected, where=where.unmasked)
expected_mask = base.mask.copy()
np.logical_or(self.mask_a, self.mask_b, out=expected_mask, where=where.unmasked)
expected_mask |= where.mask
assert_array_equal(result.unmasked, expected)
assert_array_equal(result.mask, expected_mask)
def test_ufunc_inplace_no_masked_input(self):
a_b = np.add(self.a, self.b)
out = Masked(np.zeros_like(a_b))
result = np.add(self.a, self.b, out=out)
assert result is out
assert_array_equal(result.unmasked, a_b)
assert_array_equal(result.mask, np.zeros(a_b.shape, bool))
def test_ufunc_inplace_error(self):
# Output is not masked.
out = np.zeros(self.ma.shape)
with pytest.raises(TypeError):
np.add(self.ma, self.mb, out=out)
@pytest.mark.xfail(NUMPY_LT_1_25, reason="masked where not supported in numpy<1.25")
def test_ufunc_inplace_error_masked_where(self):
# Input and output are not masked, but where is.
# Note: prior to numpy 1.25, we cannot control this.
out = self.a.copy()
with pytest.raises(TypeError):
np.add(self.a, self.b, out=out, where=Masked(True, mask=True))
@pytest.mark.parametrize("ufunc", (np.add.outer, np.minimum.outer))
@pytest.mark.parametrize("a, b", [("ma", "mb"), ("ma", "b"), ("a", "mb")])
def test_2op_ufunc_outer(self, ufunc, a, b):
a, b = getattr(self, a), getattr(self, b)
mask_a = getattr(a, "mask", np.zeros(a.shape, bool))
mask_b = getattr(b, "mask", np.zeros(b.shape, bool))
result = ufunc(a, b)
expected_data = ufunc(self.a, self.b)
expected_mask = np.logical_or.outer(mask_a, mask_b)
# Note: assert_array_equal also checks type, i.e., that, e.g.,
# Longitude decays into an Angle.
assert_array_equal(result.unmasked, expected_data)
assert_array_equal(result.mask, expected_mask)
out = Masked(np.zeros_like(result.unmasked))
result2 = ufunc(a, b, out=out)
assert result2 is out
assert_masked_equal(result2, result)
@pytest.mark.parametrize("ufunc", (np.add.outer, np.minimum.outer))
def test_2op_ufunc_outer_no_masked_input(self, ufunc):
expected_data = ufunc(self.a, self.b)
out = Masked(np.zeros_like(expected_data), True)
result = ufunc(self.a, self.b, out=out)
assert_array_equal(out.unmasked, expected_data)
assert_array_equal(out.mask, np.zeros(out.shape, dtype=bool))
def test_3op_ufunc(self):
ma_mb = np.clip(self.ma, self.b, self.c)
expected_data = np.clip(self.a, self.b, self.c)
expected_mask = self.mask_a
assert_array_equal(ma_mb.unmasked, expected_data)
assert_array_equal(ma_mb.mask, expected_mask)
@pytest.mark.parametrize("axis", (0, 1, None))
def test_add_reduce(self, axis):
ma_reduce = np.add.reduce(self.ma, axis=axis)
expected_data = np.add.reduce(self.a, axis=axis)
expected_mask = np.logical_or.reduce(self.ma.mask, axis=axis)
assert_array_equal(ma_reduce.unmasked, expected_data)
assert_array_equal(ma_reduce.mask, expected_mask)
out = Masked(np.zeros_like(ma_reduce.unmasked), np.ones_like(ma_reduce.mask))
ma_reduce2 = np.add.reduce(self.ma, axis=axis, out=out)
assert ma_reduce2 is out
assert_masked_equal(ma_reduce2, ma_reduce)
def test_add_reduce_no_masked_input(self):
a_reduce = np.add.reduce(self.a, axis=0)
out = Masked(np.zeros_like(a_reduce), np.ones(a_reduce.shape, bool))
result = np.add.reduce(self.a, axis=0, out=out)
assert result is out
assert_array_equal(out.unmasked, a_reduce)
assert_array_equal(out.mask, np.zeros(a_reduce.shape, bool))
@pytest.mark.parametrize("axis", (0, 1, None))
def test_minimum_reduce(self, axis):
ma_reduce = np.minimum.reduce(self.ma, axis=axis)
expected_data = np.minimum.reduce(self.a, axis=axis)
expected_mask = np.logical_or.reduce(self.ma.mask, axis=axis)
assert_array_equal(ma_reduce.unmasked, expected_data)
assert_array_equal(ma_reduce.mask, expected_mask)
@pytest.mark.parametrize("axis", (0, 1, None))
def test_maximum_reduce(self, axis):
ma_reduce = np.maximum.reduce(self.ma, axis=axis)
expected_data = np.maximum.reduce(self.a, axis=axis)
expected_mask = np.logical_or.reduce(self.ma.mask, axis=axis)
assert_array_equal(ma_reduce.unmasked, expected_data)
assert_array_equal(ma_reduce.mask, expected_mask)
class TestMaskedArrayUfuncs(MaskedUfuncTests):
# multiply.reduce does not work with units, so test only for plain array.
@pytest.mark.parametrize("axis", (0, 1, None))
def test_multiply_reduce(self, axis):
ma_reduce = np.multiply.reduce(self.ma, axis=axis)
expected_data = np.multiply.reduce(self.a, axis=axis)
expected_mask = np.logical_or.reduce(self.ma.mask, axis=axis)
assert_array_equal(ma_reduce.unmasked, expected_data)
assert_array_equal(ma_reduce.mask, expected_mask)
def test_ufunc_not_implemented_for_other(self):
"""
If the unmasked operation returns NotImplemented, this
should lead to a TypeError also for the masked version.
"""
a = np.array([1, 2])
b = 3 * u.m
with pytest.raises(TypeError):
a & b
ma = Masked(a)
with pytest.raises(TypeError):
ma & b
class TestMaskedQuantityUfuncs(MaskedUfuncTests, QuantitySetup):
def test_ufunc_inplace_error2(self):
out = Masked(np.zeros(self.ma.shape))
with pytest.raises(TypeError):
np.add(self.ma, self.mb, out=out)
class TestMaskedLongitudeUfuncs(MaskedUfuncTests, LongitudeSetup):
def test_ufunc_inplace_quantity_initial(self):
out = Masked(np.zeros(self.ma.shape) << u.m)
result = np.add(self.ma, self.mb, out=out)
assert result is out
expected = np.add(self.ma, self.mb).view(Quantity)
assert_masked_equal(result, expected)
class TestMaskedArrayConcatenation(MaskedArraySetup):
def test_concatenate(self):
mb = self.mb[np.newaxis]
concat_a_b = np.concatenate((self.ma, mb), axis=0)
expected_data = np.concatenate((self.a, self.b[np.newaxis]), axis=0)
expected_mask = np.concatenate((self.mask_a, self.mask_b[np.newaxis]), axis=0)
assert_array_equal(concat_a_b.unmasked, expected_data)
assert_array_equal(concat_a_b.mask, expected_mask)
def test_concatenate_not_all_masked(self):
mb = self.mb[np.newaxis]
concat_a_b = np.concatenate((self.a, mb), axis=0)
expected_data = np.concatenate((self.a, self.b[np.newaxis]), axis=0)
expected_mask = np.concatenate(
(np.zeros(self.a.shape, bool), self.mask_b[np.newaxis]), axis=0
)
assert_array_equal(concat_a_b.unmasked, expected_data)
assert_array_equal(concat_a_b.mask, expected_mask)
@pytest.mark.parametrize("obj", (1, slice(2, 3)))
def test_insert(self, obj):
mc_in_a = np.insert(self.ma, obj, self.mc, axis=-1)
expected = Masked(
np.insert(self.a, obj, self.c, axis=-1),
np.insert(self.mask_a, obj, self.mask_c, axis=-1),
)
assert_masked_equal(mc_in_a, expected)
def test_insert_masked_obj(self):
with pytest.raises(TypeError):
np.insert(self.ma, Masked(1, mask=False), self.mc, axis=-1)
def test_append(self):
mc_to_a = np.append(self.ma, self.mc, axis=-1)
expected = Masked(
np.append(self.a, self.c, axis=-1),
np.append(self.mask_a, self.mask_c, axis=-1),
)
assert_masked_equal(mc_to_a, expected)
class TestMaskedQuantityConcatenation(TestMaskedArrayConcatenation, QuantitySetup):
pass
class TestMaskedLongitudeConcatenation(TestMaskedArrayConcatenation, LongitudeSetup):
pass
class TestMaskedArrayBroadcast(MaskedArraySetup):
def test_broadcast_to(self):
shape = self.ma.shape
ba = np.broadcast_to(self.mb, shape, subok=True)
assert ba.shape == shape
assert ba.mask.shape == shape
expected = Masked(
np.broadcast_to(self.mb.unmasked, shape, subok=True),
np.broadcast_to(self.mb.mask, shape, subok=True),
)
assert_masked_equal(ba, expected)
def test_broadcast_to_using_apply(self):
# Partially just to ensure we cover the relevant part of _apply.
shape = self.ma.shape
ba = self.mb._apply(np.broadcast_to, shape=shape, subok=True)
assert ba.shape == shape
assert ba.mask.shape == shape
expected = Masked(
np.broadcast_to(self.mb.unmasked, shape, subok=True),
np.broadcast_to(self.mb.mask, shape, subok=True),
)
assert_masked_equal(ba, expected)
def test_broadcast_arrays(self):
mb = np.broadcast_arrays(self.ma, self.mb, self.mc, subok=True)
b = np.broadcast_arrays(self.a, self.b, self.c, subok=True)
bm = np.broadcast_arrays(self.mask_a, self.mask_b, self.mask_c)
for mb_, b_, bm_ in zip(mb, b, bm):
assert_array_equal(mb_.unmasked, b_)
assert_array_equal(mb_.mask, bm_)
def test_broadcast_arrays_not_all_masked(self):
mb = np.broadcast_arrays(self.a, self.mb, self.c, subok=True)
assert_array_equal(mb[0], self.a)
expected1 = np.broadcast_to(self.mb, self.a.shape, subok=True)
assert_masked_equal(mb[1], expected1)
expected2 = np.broadcast_to(self.c, self.a.shape, subok=True)
assert_array_equal(mb[2], expected2)
def test_broadcast_arrays_subok_false(self):
# subok affects ndarray subclasses but not masking itself.
mb = np.broadcast_arrays(self.ma, self.mb, self.mc, subok=False)
assert all(type(mb_.unmasked) is np.ndarray for mb_ in mb)
b = np.broadcast_arrays(self.a, self.b, self.c, subok=False)
mask_b = np.broadcast_arrays(self.mask_a, self.mask_b, self.mask_c, subok=False)
for mb_, b_, mask_ in zip(mb, b, mask_b):
assert_array_equal(mb_.unmasked, b_)
assert_array_equal(mb_.mask, mask_)
class TestMaskedQuantityBroadcast(TestMaskedArrayBroadcast, QuantitySetup):
pass
class TestMaskedLongitudeBroadcast(TestMaskedArrayBroadcast, LongitudeSetup):
pass
class TestMaskedArrayCalculation(MaskedArraySetup):
@pytest.mark.parametrize("n,axis", [(1, -1), (2, -1), (1, 0)])
def test_diff(self, n, axis):
mda = np.diff(self.ma, n=n, axis=axis)
expected_data = np.diff(self.a, n, axis)
nan_mask = np.zeros_like(self.a)
nan_mask[self.ma.mask] = np.nan
expected_mask = np.isnan(np.diff(nan_mask, n=n, axis=axis))
assert_array_equal(mda.unmasked, expected_data)
assert_array_equal(mda.mask, expected_mask)
def test_diff_explicit(self):
ma = Masked(
np.arange(8.0), [True, False, False, False, False, True, False, False]
)
mda = np.diff(ma)
assert np.all(mda.unmasked == 1.0)
assert np.all(mda.mask == [True, False, False, False, True, True, False])
mda = np.diff(ma, n=2)
assert np.all(mda.unmasked == 0.0)
assert np.all(mda.mask == [True, False, False, True, True, True])
class TestMaskedQuantityCalculation(TestMaskedArrayCalculation, QuantitySetup):
pass
class TestMaskedLongitudeCalculation(TestMaskedArrayCalculation, LongitudeSetup):
pass
class TestMaskedArraySorting(MaskedArraySetup):
@pytest.mark.parametrize("axis", [-1, 0])
def test_lexsort1(self, axis):
ma_lexsort = np.lexsort((self.ma,), axis=axis)
filled = self.a.copy()
filled[self.mask_a] = 9e9
expected_data = filled.argsort(axis)
assert_array_equal(ma_lexsort, expected_data)
@pytest.mark.parametrize("axis", [-1, 0])
def test_lexsort2(self, axis):
mb = np.broadcast_to(-self.mb, self.ma.shape).copy()
mamb_lexsort = np.lexsort((self.ma, mb), axis=axis)
filled_a = self.ma.filled(9e9)
filled_b = mb.filled(9e9)
expected_ab = np.lexsort((filled_a, filled_b), axis=axis)
assert_array_equal(mamb_lexsort, expected_ab)
mbma_lexsort = np.lexsort((mb, self.ma), axis=axis)
expected_ba = np.lexsort((filled_b, filled_a), axis=axis)
assert_array_equal(mbma_lexsort, expected_ba)
mbma_lexsort2 = np.lexsort(np.stack([mb, self.ma], axis=0), axis=axis)
assert_array_equal(mbma_lexsort2, expected_ba)
@pytest.mark.parametrize("axis", [-1, 0])
def test_lexsort_mix(self, axis):
mb = np.broadcast_to(-self.mb, self.ma.shape).copy()
mamb_lexsort = np.lexsort((self.a, mb), axis=axis)
filled_b = mb.filled(9e9)
expected_ab = np.lexsort((self.a, filled_b), axis=axis)
assert_array_equal(mamb_lexsort, expected_ab)
mbma_lexsort = np.lexsort((mb, self.a), axis=axis)
expected_ba = np.lexsort((filled_b, self.a), axis=axis)
assert_array_equal(mbma_lexsort, expected_ba)
mbma_lexsort2 = np.lexsort(np.stack([mb, self.a], axis=0), axis=axis)
assert_array_equal(mbma_lexsort2, expected_ba)
|
458b955f6d4e13de1956c67c5b85fac4551344ae85ba4a30ecaae76bfd25da81 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test masked class initialization, methods, and operators.
Functions, including ufuncs, are tested in test_functions.py
"""
import operator
import numpy as np
import pytest
from numpy.testing import assert_array_equal
from astropy import units as u
from astropy.coordinates import Longitude
from astropy.units import Quantity
from astropy.utils.compat import NUMPY_LT_1_22
from astropy.utils.masked import Masked, MaskedNDArray
def assert_masked_equal(a, b):
assert_array_equal(a.unmasked, b.unmasked)
assert_array_equal(a.mask, b.mask)
VARIOUS_ITEMS = [(1, 1), slice(None, 1), (), 1]
class ArraySetup:
_data_cls = np.ndarray
@classmethod
def setup_class(self):
self.a = np.arange(6.0).reshape(2, 3)
self.mask_a = np.array([[True, False, False], [False, True, False]])
self.b = np.array([-3.0, -2.0, -1.0])
self.mask_b = np.array([False, True, False])
self.c = np.array([[0.25], [0.5]])
self.mask_c = np.array([[False], [True]])
self.sdt = np.dtype([("a", "f8"), ("b", "f8")])
self.mask_sdt = np.dtype([("a", "?"), ("b", "?")])
self.sa = np.array(
[
[(1.0, 2.0), (3.0, 4.0)],
[(11.0, 12.0), (13.0, 14.0)],
],
dtype=self.sdt,
)
self.mask_sa = np.array(
[
[(True, True), (False, False)],
[(False, True), (True, False)],
],
dtype=self.mask_sdt,
)
self.sb = np.array([(1.0, 2.0), (-3.0, 4.0)], dtype=self.sdt)
self.mask_sb = np.array([(True, False), (False, False)], dtype=self.mask_sdt)
self.scdt = np.dtype([("sa", "2f8"), ("sb", "i8", (2, 2))])
self.sc = np.array(
[
([1.0, 2.0], [[1, 2], [3, 4]]),
([-1.0, -2.0], [[-1, -2], [-3, -4]]),
],
dtype=self.scdt,
)
self.mask_scdt = np.dtype([("sa", "2?"), ("sb", "?", (2, 2))])
self.mask_sc = np.array(
[
([True, False], [[False, False], [True, True]]),
([False, True], [[True, False], [False, True]]),
],
dtype=self.mask_scdt,
)
class QuantitySetup(ArraySetup):
_data_cls = Quantity
@classmethod
def setup_class(self):
super().setup_class()
self.a = Quantity(self.a, u.m)
self.b = Quantity(self.b, u.cm)
self.c = Quantity(self.c, u.km)
self.sa = Quantity(self.sa, u.m, dtype=self.sdt)
self.sb = Quantity(self.sb, u.cm, dtype=self.sdt)
class LongitudeSetup(ArraySetup):
_data_cls = Longitude
@classmethod
def setup_class(self):
super().setup_class()
self.a = Longitude(self.a, u.deg)
self.b = Longitude(self.b, u.deg)
self.c = Longitude(self.c, u.deg)
# Note: Longitude does not work on structured arrays, so
# leaving it as regular array (which just reruns some tests).
class TestMaskedArrayInitialization(ArraySetup):
def test_simple(self):
ma = Masked(self.a, mask=self.mask_a)
assert isinstance(ma, np.ndarray)
assert isinstance(ma, type(self.a))
assert isinstance(ma, Masked)
assert_array_equal(ma.unmasked, self.a)
assert_array_equal(ma.mask, self.mask_a)
assert ma.mask is not self.mask_a
assert np.may_share_memory(ma.mask, self.mask_a)
def test_structured(self):
ma = Masked(self.sa, mask=self.mask_sa)
assert isinstance(ma, np.ndarray)
assert isinstance(ma, type(self.sa))
assert isinstance(ma, Masked)
assert_array_equal(ma.unmasked, self.sa)
assert_array_equal(ma.mask, self.mask_sa)
assert ma.mask is not self.mask_sa
assert np.may_share_memory(ma.mask, self.mask_sa)
def test_masked_ndarray_init():
# Note: as a straight ndarray subclass, MaskedNDArray passes on
# the arguments relevant for np.ndarray, not np.array.
a_in = np.arange(3, dtype=int)
m_in = np.array([True, False, False])
buff = a_in.tobytes()
# Check we're doing things correctly using regular ndarray.
a = np.ndarray(shape=(3,), dtype=int, buffer=buff)
assert_array_equal(a, a_in)
# Check with and without mask.
ma = MaskedNDArray((3,), dtype=int, mask=m_in, buffer=buff)
assert_array_equal(ma.unmasked, a_in)
assert_array_equal(ma.mask, m_in)
ma = MaskedNDArray((3,), dtype=int, buffer=buff)
assert_array_equal(ma.unmasked, a_in)
assert_array_equal(ma.mask, np.zeros(3, bool))
def test_cannot_initialize_with_masked():
with pytest.raises(ValueError, match="cannot handle np.ma.masked"):
Masked(np.ma.masked)
def test_cannot_just_use_anything_with_a_mask_attribute():
class my_array(np.ndarray):
mask = True
a = np.array([1.0, 2.0]).view(my_array)
with pytest.raises(AttributeError, match="unmasked"):
Masked(a)
class TestMaskedClassCreation:
"""Try creating a MaskedList and subclasses.
By no means meant to be realistic, just to check that the basic
machinery allows it.
"""
@classmethod
def setup_class(self):
self._base_classes_orig = Masked._base_classes.copy()
self._masked_classes_orig = Masked._masked_classes.copy()
class MaskedList(Masked, list, base_cls=list, data_cls=list):
def __new__(cls, *args, mask=None, copy=False, **kwargs):
self = super().__new__(cls)
self._unmasked = self._data_cls(*args, **kwargs)
self.mask = mask
return self
# Need to have shape for basics to work.
@property
def shape(self):
return (len(self._unmasked),)
self.MaskedList = MaskedList
def teardown_class(self):
Masked._base_classes = self._base_classes_orig
Masked._masked_classes = self._masked_classes_orig
def test_setup(self):
assert issubclass(self.MaskedList, Masked)
assert issubclass(self.MaskedList, list)
assert Masked(list) is self.MaskedList
def test_masked_list(self):
ml = self.MaskedList(range(3), mask=[True, False, False])
assert ml.unmasked == [0, 1, 2]
assert_array_equal(ml.mask, np.array([True, False, False]))
ml01 = ml[:2]
assert ml01.unmasked == [0, 1]
assert_array_equal(ml01.mask, np.array([True, False]))
def test_from_list(self):
ml = Masked([1, 2, 3], mask=[True, False, False])
assert ml.unmasked == [1, 2, 3]
assert_array_equal(ml.mask, np.array([True, False, False]))
def test_masked_list_subclass(self):
class MyList(list):
pass
ml = MyList(range(3))
mml = Masked(ml, mask=[False, True, False])
assert isinstance(mml, Masked)
assert isinstance(mml, MyList)
assert isinstance(mml.unmasked, MyList)
assert mml.unmasked == [0, 1, 2]
assert_array_equal(mml.mask, np.array([False, True, False]))
assert Masked(MyList) is type(mml)
class TestMaskedNDArraySubclassCreation:
"""Test that masked subclasses can be created directly and indirectly."""
@classmethod
def setup_class(self):
class MyArray(np.ndarray):
def __new__(cls, *args, **kwargs):
return np.asanyarray(*args, **kwargs).view(cls)
self.MyArray = MyArray
self.a = np.array([1.0, 2.0]).view(self.MyArray)
self.m = np.array([True, False], dtype=bool)
def teardown_method(self, method):
Masked._masked_classes.pop(self.MyArray, None)
def test_direct_creation(self):
assert self.MyArray not in Masked._masked_classes
mcls = Masked(self.MyArray)
assert issubclass(mcls, Masked)
assert issubclass(mcls, self.MyArray)
assert mcls.__name__ == "MaskedMyArray"
assert mcls.__doc__.startswith("Masked version of MyArray")
mms = mcls(self.a, mask=self.m)
assert isinstance(mms, mcls)
assert_array_equal(mms.unmasked, self.a)
assert_array_equal(mms.mask, self.m)
def test_initialization_without_mask(self):
# Default for not giving a mask should be False.
mcls = Masked(self.MyArray)
mms = mcls(self.a)
assert isinstance(mms, mcls)
assert_array_equal(mms.unmasked, self.a)
assert_array_equal(mms.mask, np.zeros(mms.shape, bool))
@pytest.mark.parametrize("masked_array", [Masked, np.ma.MaskedArray])
def test_initialization_with_masked_values(self, masked_array):
mcls = Masked(self.MyArray)
ma = masked_array(np.asarray(self.a), mask=self.m)
mms = mcls(ma)
assert isinstance(mms, Masked)
assert isinstance(mms, self.MyArray)
assert_array_equal(mms.unmasked, self.a)
assert_array_equal(mms.mask, self.m)
def test_indirect_creation(self):
assert self.MyArray not in Masked._masked_classes
mms = Masked(self.a, mask=self.m)
assert isinstance(mms, Masked)
assert isinstance(mms, self.MyArray)
assert_array_equal(mms.unmasked, self.a)
assert_array_equal(mms.mask, self.m)
assert self.MyArray in Masked._masked_classes
assert Masked(self.MyArray) is type(mms)
def test_can_initialize_with_masked_values(self):
mcls = Masked(self.MyArray)
mms = mcls(Masked(np.asarray(self.a), mask=self.m))
assert isinstance(mms, Masked)
assert isinstance(mms, self.MyArray)
assert_array_equal(mms.unmasked, self.a)
assert_array_equal(mms.mask, self.m)
def test_viewing(self):
mms = Masked(self.a, mask=self.m)
mms2 = mms.view()
assert type(mms2) is mms.__class__
assert_masked_equal(mms2, mms)
ma = mms.view(np.ndarray)
assert type(ma) is MaskedNDArray
assert_array_equal(ma.unmasked, self.a.view(np.ndarray))
assert_array_equal(ma.mask, self.m)
class TestMaskedQuantityInitialization(TestMaskedArrayInitialization, QuantitySetup):
def test_masked_quantity_class_init(self):
# TODO: class definitions should be more easily accessible.
mcls = Masked._masked_classes[self.a.__class__]
# This is not a very careful test.
mq = mcls([1.0, 2.0], mask=[True, False], unit=u.s)
assert mq.unit == u.s
assert np.all(mq.value.unmasked == [1.0, 2.0])
assert np.all(mq.value.mask == [True, False])
assert np.all(mq.mask == [True, False])
def test_masked_quantity_getting(self):
mcls = Masked._masked_classes[self.a.__class__]
MQ = Masked(Quantity)
assert MQ is mcls
def test_initialization_without_mask(self):
# Default for not giving a mask should be False.
MQ = Masked(Quantity)
mq = MQ([1.0, 2.0], u.s)
assert mq.unit == u.s
assert np.all(mq.value.unmasked == [1.0, 2.0])
assert np.all(mq.mask == [False, False])
@pytest.mark.parametrize("masked_array", [Masked, np.ma.MaskedArray])
def test_initialization_with_masked_values(self, masked_array):
MQ = Masked(Quantity)
a = np.array([1.0, 2.0])
m = np.array([True, False])
ma = masked_array(a, m)
mq = MQ(ma)
assert isinstance(mq, Masked)
assert isinstance(mq, Quantity)
assert_array_equal(mq.value.unmasked, a)
assert_array_equal(mq.mask, m)
class TestMaskSetting(ArraySetup):
def test_whole_mask_setting_simple(self):
ma = Masked(self.a)
assert ma.mask.shape == ma.shape
assert not ma.mask.any()
ma.mask = True
assert ma.mask.shape == ma.shape
assert ma.mask.all()
ma.mask = [[True], [False]]
assert ma.mask.shape == ma.shape
assert_array_equal(ma.mask, np.array([[True] * 3, [False] * 3]))
ma.mask = self.mask_a
assert ma.mask.shape == ma.shape
assert_array_equal(ma.mask, self.mask_a)
assert ma.mask is not self.mask_a
assert np.may_share_memory(ma.mask, self.mask_a)
def test_whole_mask_setting_structured(self):
ma = Masked(self.sa)
assert ma.mask.shape == ma.shape
assert not ma.mask["a"].any() and not ma.mask["b"].any()
ma.mask = True
assert ma.mask.shape == ma.shape
assert ma.mask["a"].all() and ma.mask["b"].all()
ma.mask = [[True], [False]]
assert ma.mask.shape == ma.shape
assert_array_equal(
ma.mask,
np.array([[(True, True)] * 2, [(False, False)] * 2], dtype=self.mask_sdt),
)
ma.mask = self.mask_sa
assert ma.mask.shape == ma.shape
assert_array_equal(ma.mask, self.mask_sa)
assert ma.mask is not self.mask_sa
assert np.may_share_memory(ma.mask, self.mask_sa)
@pytest.mark.parametrize("item", VARIOUS_ITEMS)
def test_part_mask_setting(self, item):
ma = Masked(self.a)
ma.mask[item] = True
expected = np.zeros(ma.shape, bool)
expected[item] = True
assert_array_equal(ma.mask, expected)
ma.mask[item] = False
assert_array_equal(ma.mask, np.zeros(ma.shape, bool))
# Mask propagation
mask = np.zeros(self.a.shape, bool)
ma = Masked(self.a, mask)
ma.mask[item] = True
assert np.may_share_memory(ma.mask, mask)
assert_array_equal(ma.mask, mask)
@pytest.mark.parametrize("item", ["a"] + VARIOUS_ITEMS)
def test_part_mask_setting_structured(self, item):
ma = Masked(self.sa)
ma.mask[item] = True
expected = np.zeros(ma.shape, self.mask_sdt)
expected[item] = True
assert_array_equal(ma.mask, expected)
ma.mask[item] = False
assert_array_equal(ma.mask, np.zeros(ma.shape, self.mask_sdt))
# Mask propagation
mask = np.zeros(self.sa.shape, self.mask_sdt)
ma = Masked(self.sa, mask)
ma.mask[item] = True
assert np.may_share_memory(ma.mask, mask)
assert_array_equal(ma.mask, mask)
# Following are tests where we trust the initializer works.
class MaskedArraySetup(ArraySetup):
@classmethod
def setup_class(self):
super().setup_class()
self.ma = Masked(self.a, mask=self.mask_a)
self.mb = Masked(self.b, mask=self.mask_b)
self.mc = Masked(self.c, mask=self.mask_c)
self.msa = Masked(self.sa, mask=self.mask_sa)
self.msb = Masked(self.sb, mask=self.mask_sb)
self.msc = Masked(self.sc, mask=self.mask_sc)
class TestViewing(MaskedArraySetup):
def test_viewing_as_new_type(self):
ma2 = self.ma.view(type(self.ma))
assert_masked_equal(ma2, self.ma)
ma3 = self.ma.view()
assert_masked_equal(ma3, self.ma)
def test_viewing_as_new_dtype(self):
# Not very meaningful, but possible...
ma2 = self.ma.view("c8")
assert_array_equal(ma2.unmasked, self.a.view("c8"))
assert_array_equal(ma2.mask, self.mask_a)
@pytest.mark.parametrize("new_dtype", ["2f4", "f8,f8,f8"])
def test_viewing_as_new_dtype_not_implemented(self, new_dtype):
# But cannot (yet) view in way that would need to create a new mask,
# even though that view is possible for a regular array.
check = self.a.view(new_dtype)
with pytest.raises(NotImplementedError, match="different.*size"):
self.ma.view(check.dtype)
def test_viewing_as_something_impossible(self):
with pytest.raises(TypeError):
# Use intp to ensure have the same size as object,
# otherwise we get a different error message
Masked(np.array([1, 2], dtype=np.intp)).view(Masked)
class TestMaskedArrayCopyFilled(MaskedArraySetup):
def test_copy(self):
ma_copy = self.ma.copy()
assert type(ma_copy) is type(self.ma)
assert_array_equal(ma_copy.unmasked, self.ma.unmasked)
assert_array_equal(ma_copy.mask, self.ma.mask)
assert not np.may_share_memory(ma_copy.unmasked, self.ma.unmasked)
assert not np.may_share_memory(ma_copy.mask, self.ma.mask)
@pytest.mark.parametrize("fill_value", (0, 1))
def test_filled(self, fill_value):
fill_value = fill_value * getattr(self.a, "unit", 1)
expected = self.a.copy()
expected[self.ma.mask] = fill_value
result = self.ma.filled(fill_value)
assert_array_equal(expected, result)
def test_filled_no_fill_value(self):
with pytest.raises(TypeError, match="missing 1 required"):
self.ma.filled()
@pytest.mark.parametrize("fill_value", [(0, 1), (-1, -1)])
def test_filled_structured(self, fill_value):
fill_value = np.array(fill_value, dtype=self.sdt)
if hasattr(self.sa, "unit"):
fill_value = fill_value << self.sa.unit
expected = self.sa.copy()
expected["a"][self.msa.mask["a"]] = fill_value["a"]
expected["b"][self.msa.mask["b"]] = fill_value["b"]
result = self.msa.filled(fill_value)
assert_array_equal(expected, result)
def test_flat(self):
ma_copy = self.ma.copy()
ma_flat = ma_copy.flat
# Check that single item keeps class and mask
ma_flat1 = ma_flat[1]
assert ma_flat1.unmasked == self.a.flat[1]
assert ma_flat1.mask == self.mask_a.flat[1]
# As well as getting items via iteration.
assert all(
(ma.unmasked == a and ma.mask == m)
for (ma, a, m) in zip(self.ma.flat, self.a.flat, self.mask_a.flat)
)
# check that flat works like a view of the real array
ma_flat[1] = self.b[1]
assert ma_flat[1] == self.b[1]
assert ma_copy[0, 1] == self.b[1]
class TestMaskedQuantityCopyFilled(TestMaskedArrayCopyFilled, QuantitySetup):
pass
class TestMaskedLongitudeCopyFilled(TestMaskedArrayCopyFilled, LongitudeSetup):
pass
class TestMaskedArrayShaping(MaskedArraySetup):
def test_reshape(self):
ma_reshape = self.ma.reshape((6,))
expected_data = self.a.reshape((6,))
expected_mask = self.mask_a.reshape((6,))
assert ma_reshape.shape == expected_data.shape
assert_array_equal(ma_reshape.unmasked, expected_data)
assert_array_equal(ma_reshape.mask, expected_mask)
def test_shape_setting(self):
ma_reshape = self.ma.copy()
ma_reshape.shape = (6,)
expected_data = self.a.reshape((6,))
expected_mask = self.mask_a.reshape((6,))
assert ma_reshape.shape == expected_data.shape
assert_array_equal(ma_reshape.unmasked, expected_data)
assert_array_equal(ma_reshape.mask, expected_mask)
def test_shape_setting_failure(self):
ma = self.ma.copy()
with pytest.raises(ValueError, match="cannot reshape"):
ma.shape = (5,)
assert ma.shape == self.ma.shape
assert ma.mask.shape == self.ma.shape
# Here, mask can be reshaped but array cannot.
ma2 = Masked(np.broadcast_to([[1.0], [2.0]], self.a.shape), mask=self.mask_a)
with pytest.raises(AttributeError, match="ncompatible shape"):
ma2.shape = (6,)
assert ma2.shape == self.ma.shape
assert ma2.mask.shape == self.ma.shape
# Here, array can be reshaped but mask cannot.
ma3 = Masked(
self.a.copy(), mask=np.broadcast_to([[True], [False]], self.mask_a.shape)
)
with pytest.raises(AttributeError, match="ncompatible shape"):
ma3.shape = (6,)
assert ma3.shape == self.ma.shape
assert ma3.mask.shape == self.ma.shape
def test_ravel(self):
ma_ravel = self.ma.ravel()
expected_data = self.a.ravel()
expected_mask = self.mask_a.ravel()
assert ma_ravel.shape == expected_data.shape
assert_array_equal(ma_ravel.unmasked, expected_data)
assert_array_equal(ma_ravel.mask, expected_mask)
def test_transpose(self):
ma_transpose = self.ma.transpose()
expected_data = self.a.transpose()
expected_mask = self.mask_a.transpose()
assert ma_transpose.shape == expected_data.shape
assert_array_equal(ma_transpose.unmasked, expected_data)
assert_array_equal(ma_transpose.mask, expected_mask)
def test_iter(self):
for ma, d, m in zip(self.ma, self.a, self.mask_a):
assert_array_equal(ma.unmasked, d)
assert_array_equal(ma.mask, m)
class MaskedItemTests(MaskedArraySetup):
@pytest.mark.parametrize("item", VARIOUS_ITEMS)
def test_getitem(self, item):
ma_part = self.ma[item]
expected_data = self.a[item]
expected_mask = self.mask_a[item]
assert_array_equal(ma_part.unmasked, expected_data)
assert_array_equal(ma_part.mask, expected_mask)
@pytest.mark.parametrize("item", ["a"] + VARIOUS_ITEMS)
def test_getitem_structured(self, item):
ma_part = self.msa[item]
expected_data = self.sa[item]
expected_mask = self.mask_sa[item]
assert_array_equal(ma_part.unmasked, expected_data)
assert_array_equal(ma_part.mask, expected_mask)
@pytest.mark.parametrize(
"indices,axis",
[([0, 1], 1), ([0, 1], 0), ([0, 1], None), ([[0, 1], [2, 3]], None)],
)
def test_take(self, indices, axis):
ma_take = self.ma.take(indices, axis=axis)
expected_data = self.a.take(indices, axis=axis)
expected_mask = self.mask_a.take(indices, axis=axis)
assert_array_equal(ma_take.unmasked, expected_data)
assert_array_equal(ma_take.mask, expected_mask)
ma_take2 = np.take(self.ma, indices, axis=axis)
assert_masked_equal(ma_take2, ma_take)
@pytest.mark.parametrize("item", VARIOUS_ITEMS)
@pytest.mark.parametrize("mask", [None, True, False])
def test_setitem(self, item, mask):
base = self.ma.copy()
expected_data = self.a.copy()
expected_mask = self.mask_a.copy()
value = self.a[0, 0] if mask is None else Masked(self.a[0, 0], mask)
base[item] = value
expected_data[item] = value if mask is None else value.unmasked
expected_mask[item] = False if mask is None else value.mask
assert_array_equal(base.unmasked, expected_data)
assert_array_equal(base.mask, expected_mask)
@pytest.mark.parametrize("item", ["a"] + VARIOUS_ITEMS)
@pytest.mark.parametrize("mask", [None, True, False])
def test_setitem_structured(self, item, mask):
base = self.msa.copy()
expected_data = self.sa.copy()
expected_mask = self.mask_sa.copy()
value = self.sa["b"] if item == "a" else self.sa[0, 0]
if mask is not None:
value = Masked(value, mask)
base[item] = value
expected_data[item] = value if mask is None else value.unmasked
expected_mask[item] = False if mask is None else value.mask
assert_array_equal(base.unmasked, expected_data)
assert_array_equal(base.mask, expected_mask)
@pytest.mark.parametrize("item", VARIOUS_ITEMS)
def test_setitem_np_ma_masked(self, item):
base = self.ma.copy()
expected_mask = self.mask_a.copy()
base[item] = np.ma.masked
expected_mask[item] = True
assert_array_equal(base.unmasked, self.a)
assert_array_equal(base.mask, expected_mask)
class TestMaskedArrayItems(MaskedItemTests):
@classmethod
def setup_class(self):
super().setup_class()
self.d = np.array(["aa", "bb"])
self.mask_d = np.array([True, False])
self.md = Masked(self.d, self.mask_d)
# Quantity, Longitude cannot hold strings.
def test_getitem_strings(self):
md = self.md.copy()
md0 = md[0]
assert md0.unmasked == self.d[0]
assert md0.mask
md_all = md[:]
assert_masked_equal(md_all, md)
def test_setitem_strings_np_ma_masked(self):
md = self.md.copy()
md[1] = np.ma.masked
assert_array_equal(md.unmasked, self.d)
assert_array_equal(md.mask, np.ones(2, bool))
class TestMaskedQuantityItems(MaskedItemTests, QuantitySetup):
pass
class TestMaskedLongitudeItems(MaskedItemTests, LongitudeSetup):
pass
class MaskedOperatorTests(MaskedArraySetup):
@pytest.mark.parametrize("op", (operator.add, operator.sub))
def test_add_subtract(self, op):
mapmb = op(self.ma, self.mb)
expected_data = op(self.a, self.b)
expected_mask = self.ma.mask | self.mb.mask
# Note: assert_array_equal also checks type, i.e., that, e.g.,
# Longitude decays into an Angle.
assert_array_equal(mapmb.unmasked, expected_data)
assert_array_equal(mapmb.mask, expected_mask)
@pytest.mark.parametrize("op", (operator.eq, operator.ne))
def test_equality(self, op):
mapmb = op(self.ma, self.mb)
expected_data = op(self.a, self.b)
expected_mask = self.ma.mask | self.mb.mask
# Note: assert_array_equal also checks type, i.e., that boolean
# output is represented as plain Masked ndarray.
assert_array_equal(mapmb.unmasked, expected_data)
assert_array_equal(mapmb.mask, expected_mask)
def test_not_implemented(self):
with pytest.raises(TypeError):
self.ma > "abc"
@pytest.mark.parametrize("different_names", [False, True])
@pytest.mark.parametrize("op", (operator.eq, operator.ne))
def test_structured_equality(self, op, different_names):
msb = self.msb
if different_names:
msb = msb.astype(
[(f"different_{name}", dt) for name, dt in msb.dtype.fields.items()]
)
mapmb = op(self.msa, self.msb)
# Expected is a bit tricky here: only unmasked fields count
expected_data = np.ones(mapmb.shape, bool)
expected_mask = np.ones(mapmb.shape, bool)
for field in self.sdt.names:
fa, mfa = self.sa[field], self.mask_sa[field]
fb, mfb = self.sb[field], self.mask_sb[field]
mfequal = mfa | mfb
fequal = (fa == fb) | mfequal
expected_data &= fequal
expected_mask &= mfequal
if op is operator.ne:
expected_data = ~expected_data
# Note: assert_array_equal also checks type, i.e., that boolean
# output is represented as plain Masked ndarray.
assert_array_equal(mapmb.unmasked, expected_data)
assert_array_equal(mapmb.mask, expected_mask)
def test_matmul(self):
result = self.ma.T @ self.ma
assert_array_equal(result.unmasked, self.a.T @ self.a)
mask1 = np.any(self.mask_a, axis=0)
expected_mask = np.logical_or.outer(mask1, mask1)
assert_array_equal(result.mask, expected_mask)
result2 = self.ma.T @ self.a
assert_array_equal(result2.unmasked, self.a.T @ self.a)
expected_mask2 = np.logical_or.outer(mask1, np.zeros(3, bool))
assert_array_equal(result2.mask, expected_mask2)
result3 = self.a.T @ self.ma
assert_array_equal(result3.unmasked, self.a.T @ self.a)
expected_mask3 = np.logical_or.outer(np.zeros(3, bool), mask1)
assert_array_equal(result3.mask, expected_mask3)
def test_matvec(self):
result = self.ma @ self.mb
assert np.all(result.mask)
assert_array_equal(result.unmasked, self.a @ self.b)
# Just using the masked vector still has all elements masked.
result2 = self.a @ self.mb
assert np.all(result2.mask)
assert_array_equal(result2.unmasked, self.a @ self.b)
new_ma = self.ma.copy()
new_ma.mask[0, 0] = False
result3 = new_ma @ self.b
assert_array_equal(result3.unmasked, self.a @ self.b)
assert_array_equal(result3.mask, new_ma.mask.any(-1))
def test_vecmat(self):
result = self.mb @ self.ma.T
assert np.all(result.mask)
assert_array_equal(result.unmasked, self.b @ self.a.T)
result2 = self.b @ self.ma.T
assert np.all(result2.mask)
assert_array_equal(result2.unmasked, self.b @ self.a.T)
new_ma = self.ma.T.copy()
new_ma.mask[0, 0] = False
result3 = self.b @ new_ma
assert_array_equal(result3.unmasked, self.b @ self.a.T)
assert_array_equal(result3.mask, new_ma.mask.any(0))
def test_vecvec(self):
result = self.mb @ self.mb
assert result.shape == ()
assert result.mask
assert result.unmasked == self.b @ self.b
mb_no_mask = Masked(self.b, False)
result2 = mb_no_mask @ mb_no_mask
assert not result2.mask
class TestMaskedArrayOperators(MaskedOperatorTests):
# Some further tests that use strings, which are not useful for Quantity.
@pytest.mark.parametrize("op", (operator.eq, operator.ne))
def test_equality_strings(self, op):
m1 = Masked(np.array(["a", "b", "c"]), mask=[True, False, False])
m2 = Masked(np.array(["a", "b", "d"]), mask=[False, False, False])
result = op(m1, m2)
assert_array_equal(result.unmasked, op(m1.unmasked, m2.unmasked))
assert_array_equal(result.mask, m1.mask | m2.mask)
result2 = op(m1, m2.unmasked)
assert_masked_equal(result2, result)
def test_not_implemented(self):
with pytest.raises(TypeError):
Masked(["a", "b"]) > object()
class TestMaskedQuantityOperators(MaskedOperatorTests, QuantitySetup):
pass
class TestMaskedLongitudeOperators(MaskedOperatorTests, LongitudeSetup):
pass
class TestMaskedArrayMethods(MaskedArraySetup):
def test_round(self):
# Goes via ufunc, hence easy.
mrc = self.mc.round()
expected = Masked(self.c.round(), self.mask_c)
assert_masked_equal(mrc, expected)
@pytest.mark.parametrize("axis", (0, 1, None))
def test_sum(self, axis):
ma_sum = self.ma.sum(axis)
expected_data = self.a.sum(axis)
expected_mask = self.ma.mask.any(axis)
assert_array_equal(ma_sum.unmasked, expected_data)
assert_array_equal(ma_sum.mask, expected_mask)
@pytest.mark.parametrize("axis", (0, 1, None))
def test_sum_where(self, axis):
where = np.array(
[
[True, False, False],
[True, True, True],
]
)
where_final = ~self.ma.mask & where
ma_sum = self.ma.sum(axis, where=where_final)
expected_data = self.ma.unmasked.sum(axis, where=where_final)
expected_mask = np.logical_or.reduce(
self.ma.mask, axis=axis, where=where_final
) | (~where_final).all(axis)
assert_array_equal(ma_sum.unmasked, expected_data)
assert_array_equal(ma_sum.mask, expected_mask)
@pytest.mark.parametrize("axis", (0, 1, None))
def test_cumsum(self, axis):
ma_sum = self.ma.cumsum(axis)
expected_data = self.a.cumsum(axis)
mask = self.mask_a
if axis is None:
mask = mask.ravel()
expected_mask = np.logical_or.accumulate(mask, axis=axis)
assert_array_equal(ma_sum.unmasked, expected_data)
assert_array_equal(ma_sum.mask, expected_mask)
@pytest.mark.parametrize("axis", (0, 1, None))
def test_mean(self, axis):
ma_mean = self.ma.mean(axis)
filled = self.a.copy()
filled[self.mask_a] = 0.0
count = 1 - self.ma.mask.astype(int)
expected_data = filled.sum(axis) / count.sum(axis)
expected_mask = self.ma.mask.all(axis)
assert_array_equal(ma_mean.unmasked, expected_data)
assert_array_equal(ma_mean.mask, expected_mask)
@pytest.mark.parametrize("axis", (0, 1, None))
def test_mean_all_masked(self, axis):
# test corner case when all values are masked
md = Masked(self.a, np.ones(self.a.shape, dtype=bool))
md_mean = md.mean(axis)
assert np.all(np.isnan(md_mean.unmasked))
assert np.all(md_mean.mask)
def test_mean_int16(self):
ma = self.ma.astype("i2")
ma_mean = ma.mean()
assert ma_mean.dtype == "f8"
expected = ma.astype("f8").mean()
assert_masked_equal(ma_mean, expected)
def test_mean_float16(self):
ma = self.ma.astype("f2")
ma_mean = ma.mean()
assert ma_mean.dtype == "f2"
expected = self.ma.mean().astype("f2")
assert_masked_equal(ma_mean, expected)
def test_mean_inplace(self):
expected = self.ma.mean(1)
out = Masked(np.zeros_like(expected.unmasked))
result = self.ma.mean(1, out=out)
assert result is out
assert_masked_equal(out, expected)
@pytest.mark.filterwarnings("ignore:.*encountered in.*divide")
@pytest.mark.filterwarnings("ignore:Mean of empty slice")
@pytest.mark.parametrize("axis", (0, 1, None))
def test_mean_where(self, axis):
where = np.array(
[
[True, False, False],
[True, True, True],
]
)
where_final = ~self.ma.mask & where
ma_mean = self.ma.mean(axis, where=where)
expected_data = self.ma.unmasked.mean(axis, where=where_final)
expected_mask = np.logical_or.reduce(
self.ma.mask, axis=axis, where=where_final
) | (~where_final).all(axis)
assert_array_equal(ma_mean.unmasked, expected_data)
assert_array_equal(ma_mean.mask, expected_mask)
@pytest.mark.filterwarnings("ignore:.*encountered in.*divide")
@pytest.mark.parametrize("axis", (0, 1, None))
def test_var(self, axis):
ma_var = self.ma.var(axis)
filled = (self.a - self.ma.mean(axis, keepdims=True)) ** 2
filled[self.mask_a] = 0.0
count = (1 - self.ma.mask.astype(int)).sum(axis)
expected_data = filled.sum(axis) / count
expected_mask = self.ma.mask.all(axis)
assert_array_equal(ma_var.unmasked, expected_data)
assert_array_equal(ma_var.mask, expected_mask)
ma_var1 = self.ma.var(axis, ddof=1)
expected_data1 = filled.sum(axis) / (count - 1)
expected_mask1 = self.ma.mask.all(axis) | (count <= 1)
assert_array_equal(ma_var1.unmasked, expected_data1)
assert_array_equal(ma_var1.mask, expected_mask1)
ma_var5 = self.ma.var(axis, ddof=5)
assert np.all(~np.isfinite(ma_var5.unmasked))
assert ma_var5.mask.all()
def test_var_int16(self):
ma = self.ma.astype("i2")
ma_var = ma.var()
assert ma_var.dtype == "f8"
expected = ma.astype("f8").var()
assert_masked_equal(ma_var, expected)
@pytest.mark.filterwarnings("ignore:.*encountered in.*divide")
@pytest.mark.filterwarnings("ignore:Degrees of freedom <= 0 for slice")
@pytest.mark.parametrize("axis", (0, 1, None))
def test_var_where(self, axis):
where = np.array(
[
[True, False, False],
[True, True, True],
]
)
where_final = ~self.ma.mask & where
ma_var = self.ma.var(axis, where=where)
expected_data = self.ma.unmasked.var(axis, where=where_final)
expected_mask = np.logical_or.reduce(
self.ma.mask, axis=axis, where=where_final
) | (~where_final).all(axis)
assert_array_equal(ma_var.unmasked, expected_data)
assert_array_equal(ma_var.mask, expected_mask)
def test_std(self):
ma_std = self.ma.std(1, ddof=1)
ma_var1 = self.ma.var(1, ddof=1)
expected = np.sqrt(ma_var1)
assert_masked_equal(ma_std, expected)
def test_std_inplace(self):
expected = self.ma.std(1, ddof=1)
out = Masked(np.zeros_like(expected.unmasked))
result = self.ma.std(1, ddof=1, out=out)
assert result is out
assert_masked_equal(result, expected)
@pytest.mark.filterwarnings("ignore:.*encountered in.*divide")
@pytest.mark.filterwarnings("ignore:Degrees of freedom <= 0 for slice")
@pytest.mark.parametrize("axis", (0, 1, None))
def test_std_where(self, axis):
where = np.array(
[
[True, False, False],
[True, True, True],
]
)
where_final = ~self.ma.mask & where
ma_std = self.ma.std(axis, where=where)
expected_data = self.ma.unmasked.std(axis, where=where_final)
expected_mask = np.logical_or.reduce(
self.ma.mask, axis=axis, where=where_final
) | (~where_final).all(axis)
assert_array_equal(ma_std.unmasked, expected_data)
assert_array_equal(ma_std.mask, expected_mask)
@pytest.mark.parametrize("axis", (0, 1, None))
def test_min(self, axis):
ma_min = self.ma.min(axis)
filled = self.a.copy()
filled[self.mask_a] = self.a.max()
expected_data = filled.min(axis)
assert_array_equal(ma_min.unmasked, expected_data)
assert not np.any(ma_min.mask)
def test_min_with_masked_nan(self):
ma = Masked([3.0, np.nan, 2.0], mask=[False, True, False])
ma_min = ma.min()
assert_array_equal(ma_min.unmasked, np.array(2.0))
assert not ma_min.mask
@pytest.mark.parametrize("axis", (0, 1, None))
def test_min_where(self, axis):
where = np.array(
[
[True, False, False],
[True, True, True],
]
)
where_final = ~self.ma.mask & where
ma_min = self.ma.min(axis, where=where_final, initial=np.inf)
expected_data = self.ma.unmasked.min(axis, where=where_final, initial=np.inf)
expected_mask = np.logical_or.reduce(
self.ma.mask, axis=axis, where=where_final
) | (~where_final).all(axis)
assert_array_equal(ma_min.unmasked, expected_data)
assert_array_equal(ma_min.mask, expected_mask)
@pytest.mark.parametrize("axis", (0, 1, None))
def test_max(self, axis):
ma_max = self.ma.max(axis)
filled = self.a.copy()
filled[self.mask_a] = self.a.min()
expected_data = filled.max(axis)
assert_array_equal(ma_max.unmasked, expected_data)
assert not np.any(ma_max.mask)
@pytest.mark.parametrize("axis", (0, 1, None))
def test_max_where(self, axis):
where = np.array(
[
[True, False, False],
[True, True, True],
]
)
where_final = ~self.ma.mask & where
ma_max = self.ma.max(axis, where=where_final, initial=-np.inf)
expected_data = self.ma.unmasked.max(axis, where=where_final, initial=-np.inf)
expected_mask = np.logical_or.reduce(
self.ma.mask, axis=axis, where=where_final
) | (~where_final).all(axis)
assert_array_equal(ma_max.unmasked, expected_data)
assert_array_equal(ma_max.mask, expected_mask)
@pytest.mark.parametrize("axis", (0, 1, None))
def test_argmin(self, axis):
ma_argmin = self.ma.argmin(axis)
filled = self.a.copy()
filled[self.mask_a] = self.a.max()
expected_data = filled.argmin(axis)
assert_array_equal(ma_argmin, expected_data)
def test_argmin_only_one_unmasked_element(self):
# Regression test for example from @taldcroft at
# https://github.com/astropy/astropy/pull/11127#discussion_r600864559
ma = Masked(data=[1, 2], mask=[True, False])
assert ma.argmin() == 1
if not NUMPY_LT_1_22:
def test_argmin_keepdims(self):
ma = Masked(data=[[1, 2], [3, 4]], mask=[[True, False], [False, False]])
assert_array_equal(ma.argmin(axis=0, keepdims=True), np.array([[1, 0]]))
@pytest.mark.parametrize("axis", (0, 1, None))
def test_argmax(self, axis):
ma_argmax = self.ma.argmax(axis)
filled = self.a.copy()
filled[self.mask_a] = self.a.min()
expected_data = filled.argmax(axis)
assert_array_equal(ma_argmax, expected_data)
if not NUMPY_LT_1_22:
def test_argmax_keepdims(self):
ma = Masked(data=[[1, 2], [3, 4]], mask=[[True, False], [False, False]])
assert_array_equal(ma.argmax(axis=1, keepdims=True), np.array([[1], [1]]))
@pytest.mark.parametrize("axis", (0, 1, None))
def test_argsort(self, axis):
ma_argsort = self.ma.argsort(axis)
filled = self.a.copy()
filled[self.mask_a] = self.a.max() * 1.1
expected_data = filled.argsort(axis)
assert_array_equal(ma_argsort, expected_data)
@pytest.mark.parametrize("order", [None, "a", ("a", "b"), ("b", "a")])
@pytest.mark.parametrize("axis", [0, 1])
def test_structured_argsort(self, axis, order):
ma_argsort = self.msa.argsort(axis, order=order)
filled = self.msa.filled(fill_value=np.array((np.inf, np.inf), dtype=self.sdt))
expected_data = filled.argsort(axis, order=order)
assert_array_equal(ma_argsort, expected_data)
def test_argsort_error(self):
with pytest.raises(ValueError, match="when the array has no fields"):
self.ma.argsort(axis=0, order="a")
@pytest.mark.parametrize("axis", (0, 1))
def test_sort(self, axis):
ma_sort = self.ma.copy()
ma_sort.sort(axis)
indices = self.ma.argsort(axis)
expected_data = np.take_along_axis(self.ma.unmasked, indices, axis)
expected_mask = np.take_along_axis(self.ma.mask, indices, axis)
assert_array_equal(ma_sort.unmasked, expected_data)
assert_array_equal(ma_sort.mask, expected_mask)
@pytest.mark.parametrize("kth", [1, 3])
def test_argpartition(self, kth):
ma = self.ma.ravel()
ma_argpartition = ma.argpartition(kth)
partitioned = ma[ma_argpartition]
assert (partitioned[:kth] < partitioned[kth]).all()
assert (partitioned[kth:] >= partitioned[kth]).all()
if partitioned[kth].mask:
assert all(partitioned.mask[kth:])
else:
assert not any(partitioned.mask[:kth])
@pytest.mark.parametrize("kth", [1, 3])
def test_partition(self, kth):
partitioned = self.ma.flatten()
partitioned.partition(kth)
assert (partitioned[:kth] < partitioned[kth]).all()
assert (partitioned[kth:] >= partitioned[kth]).all()
if partitioned[kth].mask:
assert all(partitioned.mask[kth:])
else:
assert not any(partitioned.mask[:kth])
def test_all_explicit(self):
a1 = np.array(
[
[1.0, 2.0],
[3.0, 4.0],
]
)
a2 = np.array(
[
[1.0, 0.0],
[3.0, 4.0],
]
)
if self._data_cls is not np.ndarray:
a1 = self._data_cls(a1, self.a.unit)
a2 = self._data_cls(a2, self.a.unit)
ma1 = Masked(
a1,
mask=[
[False, False],
[True, True],
],
)
ma2 = Masked(
a2,
mask=[
[False, True],
[False, True],
],
)
ma1_eq_ma2 = ma1 == ma2
assert_array_equal(
ma1_eq_ma2.unmasked,
np.array(
[
[True, False],
[True, True],
]
),
)
assert_array_equal(
ma1_eq_ma2.mask,
np.array(
[
[False, True],
[True, True],
]
),
)
assert ma1_eq_ma2.all()
assert not (ma1 != ma2).all()
ma_eq1 = ma1_eq_ma2.all(1)
assert_array_equal(ma_eq1.mask, np.array([False, True]))
assert bool(ma_eq1[0]) is True
assert bool(ma_eq1[1]) is False
ma_eq0 = ma1_eq_ma2.all(0)
assert_array_equal(ma_eq0.mask, np.array([False, True]))
assert bool(ma_eq1[0]) is True
assert bool(ma_eq1[1]) is False
@pytest.mark.parametrize("method", ["any", "all"])
@pytest.mark.parametrize(
"array,axis",
[("a", 0), ("a", 1), ("a", None), ("b", None), ("c", 0), ("c", 1), ("c", None)],
)
def test_all_and_any(self, array, axis, method):
ma = getattr(self, "m" + array)
ma_eq = ma == ma
ma_all_or_any = getattr(ma_eq, method)(axis=axis)
filled = ma_eq.unmasked.copy()
filled[ma_eq.mask] = method == "all"
a_all_or_any = getattr(filled, method)(axis=axis)
all_masked = ma.mask.all(axis)
assert_array_equal(ma_all_or_any.mask, all_masked)
assert_array_equal(ma_all_or_any.unmasked, a_all_or_any)
# interpretation as bool
as_bool = [bool(a) for a in ma_all_or_any.ravel()]
expected = [bool(a) for a in (a_all_or_any & ~all_masked).ravel()]
assert as_bool == expected
def test_any_inplace(self):
ma_eq = self.ma == self.ma
expected = ma_eq.any(1)
out = Masked(np.zeros_like(expected.unmasked))
result = ma_eq.any(1, out=out)
assert result is out
assert_masked_equal(result, expected)
@pytest.mark.parametrize("method", ("all", "any"))
@pytest.mark.parametrize("axis", (0, 1, None))
def test_all_and_any_where(self, method, axis):
where = np.array(
[
[True, False, False],
[True, True, True],
]
)
where_final = ~self.ma.mask & where
ma_eq = self.ma == self.ma
ma_any = getattr(ma_eq, method)(axis, where=where)
expected_data = getattr(ma_eq.unmasked, method)(axis, where=where_final)
expected_mask = np.logical_or.reduce(
self.ma.mask, axis=axis, where=where_final
) | (~where_final).all(axis)
assert_array_equal(ma_any.unmasked, expected_data)
assert_array_equal(ma_any.mask, expected_mask)
@pytest.mark.parametrize("offset", (0, 1))
def test_diagonal(self, offset):
mda = self.ma.diagonal(offset=offset)
expected = Masked(
self.a.diagonal(offset=offset), self.mask_a.diagonal(offset=offset)
)
assert_masked_equal(mda, expected)
@pytest.mark.parametrize("offset", (0, 1))
def test_trace(self, offset):
mta = self.ma.trace(offset=offset)
expected = Masked(
self.a.trace(offset=offset), self.mask_a.trace(offset=offset, dtype=bool)
)
assert_masked_equal(mta, expected)
def test_clip(self):
maclip = self.ma.clip(self.b, self.c)
expected = Masked(self.a.clip(self.b, self.c), self.mask_a)
assert_masked_equal(maclip, expected)
def test_clip_masked_min_max(self):
maclip = self.ma.clip(self.mb, self.mc)
# Need to be careful with min, max because of Longitude, which wraps.
dmax = np.maximum(np.maximum(self.a, self.b), self.c).max()
dmin = np.minimum(np.minimum(self.a, self.b), self.c).min()
expected = Masked(
self.a.clip(self.mb.filled(dmin), self.mc.filled(dmax)), mask=self.mask_a
)
assert_masked_equal(maclip, expected)
class TestMaskedQuantityMethods(TestMaskedArrayMethods, QuantitySetup):
pass
class TestMaskedLongitudeMethods(TestMaskedArrayMethods, LongitudeSetup):
pass
class TestMaskedArrayProductMethods(MaskedArraySetup):
# These cannot work on Quantity, so done separately
@pytest.mark.parametrize("axis", (0, 1, None))
def test_prod(self, axis):
ma_sum = self.ma.prod(axis)
expected_data = self.a.prod(axis)
expected_mask = self.ma.mask.any(axis)
assert_array_equal(ma_sum.unmasked, expected_data)
assert_array_equal(ma_sum.mask, expected_mask)
@pytest.mark.parametrize("axis", (0, 1, None))
def test_cumprod(self, axis):
ma_sum = self.ma.cumprod(axis)
expected_data = self.a.cumprod(axis)
mask = self.mask_a
if axis is None:
mask = mask.ravel()
expected_mask = np.logical_or.accumulate(mask, axis=axis)
assert_array_equal(ma_sum.unmasked, expected_data)
assert_array_equal(ma_sum.mask, expected_mask)
def test_masked_str_explicit():
sa = np.array([(1.0, 2.0), (3.0, 4.0)], dtype="f8,f8")
msa = Masked(sa, [(False, True), (False, False)])
assert str(msa) == "[(1., ——) (3., 4.)]"
assert str(msa[0]) == "(1., ——)"
assert str(msa[1]) == "(3., 4.)"
with np.printoptions(precision=3, floatmode="fixed"):
assert str(msa) == "[(1.000, ———) (3.000, 4.000)]"
def test_masked_repr_explicit():
# Use explicit endianness to ensure tests pass on all architectures
sa = np.array([(1.0, 2.0), (3.0, 4.0)], dtype=">f8,>f8")
msa = Masked(sa, [(False, True), (False, False)])
assert (
repr(msa)
== "MaskedNDArray([(1., ——), (3., 4.)], dtype=[('f0', '>f8'), ('f1', '>f8')])"
)
assert (
repr(msa[0]) == "MaskedNDArray((1., ——), dtype=[('f0', '>f8'), ('f1', '>f8')])"
)
assert (
repr(msa[1]) == "MaskedNDArray((3., 4.), dtype=[('f0', '>f8'), ('f1', '>f8')])"
)
def test_masked_repr_summary():
ma = Masked(np.arange(15.0), mask=[True] + [False] * 14)
with np.printoptions(threshold=2):
assert repr(ma) == "MaskedNDArray([———, 1., 2., ..., 12., 13., 14.])"
def test_masked_repr_nodata():
assert repr(Masked([])) == "MaskedNDArray([], dtype=float64)"
class TestMaskedArrayRepr(MaskedArraySetup):
def test_array_str(self):
# very blunt check they work at all.
str(self.ma)
str(self.mb)
str(self.mc)
str(self.msa)
str(self.msb)
str(self.msc)
def test_scalar_str(self):
assert self.mb[0].shape == ()
str(self.mb[0])
assert self.msb[0].shape == ()
str(self.msb[0])
assert self.msc[0].shape == ()
str(self.msc[0])
def test_array_repr(self):
repr(self.ma)
repr(self.mb)
repr(self.mc)
repr(self.msa)
repr(self.msb)
repr(self.msc)
def test_scalar_repr(self):
repr(self.mb[0])
repr(self.msb[0])
repr(self.msc[0])
class TestMaskedQuantityRepr(TestMaskedArrayRepr, QuantitySetup):
pass
class TestMaskedRecarray(MaskedArraySetup):
@classmethod
def setup_class(self):
super().setup_class()
self.ra = self.sa.view(np.recarray)
self.mra = Masked(self.ra, mask=self.mask_sa)
def test_recarray_setup(self):
assert isinstance(self.mra, Masked)
assert isinstance(self.mra, np.recarray)
assert np.all(self.mra.unmasked == self.ra)
assert np.all(self.mra.mask == self.mask_sa)
assert_array_equal(self.mra.view(np.ndarray), self.sa)
assert isinstance(self.mra.a, Masked)
assert_array_equal(self.mra.a.unmasked, self.sa["a"])
assert_array_equal(self.mra.a.mask, self.mask_sa["a"])
def test_recarray_setting(self):
mra = self.mra.copy()
mra.a = self.msa["b"]
assert_array_equal(mra.a.unmasked, self.msa["b"].unmasked)
assert_array_equal(mra.a.mask, self.msa["b"].mask)
@pytest.mark.parametrize("attr", [0, "a"])
def test_recarray_field_getting(self, attr):
mra_a = self.mra.field(attr)
assert isinstance(mra_a, Masked)
assert_array_equal(mra_a.unmasked, self.sa["a"])
assert_array_equal(mra_a.mask, self.mask_sa["a"])
@pytest.mark.parametrize("attr", [0, "a"])
def test_recarray_field_setting(self, attr):
mra = self.mra.copy()
mra.field(attr, self.msa["b"])
assert_array_equal(mra.a.unmasked, self.msa["b"].unmasked)
assert_array_equal(mra.a.mask, self.msa["b"].mask)
class TestMaskedArrayInteractionWithNumpyMA(MaskedArraySetup):
def test_masked_array_from_masked(self):
"""Check that we can initialize a MaskedArray properly."""
np_ma = np.ma.MaskedArray(self.ma)
assert type(np_ma) is np.ma.MaskedArray
assert type(np_ma.data) is self._data_cls
assert type(np_ma.mask) is np.ndarray
assert_array_equal(np_ma.data, self.a)
assert_array_equal(np_ma.mask, self.mask_a)
def test_view_as_masked_array(self):
"""Test that we can be viewed as a MaskedArray."""
np_ma = self.ma.view(np.ma.MaskedArray)
assert type(np_ma) is np.ma.MaskedArray
assert type(np_ma.data) is self._data_cls
assert type(np_ma.mask) is np.ndarray
assert_array_equal(np_ma.data, self.a)
assert_array_equal(np_ma.mask, self.mask_a)
class TestMaskedQuantityInteractionWithNumpyMA(
TestMaskedArrayInteractionWithNumpyMA, QuantitySetup
):
pass
|
925c2c522ce909a1d40516d67f6f408317feae9dac06fa0261129c97649a0d72 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test all functions covered by __array_function__.
Here, run through all functions, with simple tests just to check the helpers.
More complicated tests of functionality, including with subclasses, are done
in test_functions.
TODO: finish full coverage (see also `~astropy.utils.masked.function_helpers`)
- np.linalg
- np.fft (is there any point?)
- np.lib.nanfunctions
"""
import inspect
import itertools
import numpy as np
import pytest
from numpy.testing import assert_array_equal
from astropy.units.tests.test_quantity_non_ufuncs import get_wrapped_functions
from astropy.utils.compat import NUMPY_LT_1_23, NUMPY_LT_1_24, NUMPY_LT_1_25
from astropy.utils.masked import Masked, MaskedNDArray
from astropy.utils.masked.function_helpers import (
APPLY_TO_BOTH_FUNCTIONS,
DISPATCHED_FUNCTIONS,
IGNORED_FUNCTIONS,
MASKED_SAFE_FUNCTIONS,
UNSUPPORTED_FUNCTIONS,
)
from .test_masked import MaskedArraySetup, assert_masked_equal
all_wrapped_functions = get_wrapped_functions(np)
all_wrapped = set(all_wrapped_functions.values())
class BasicTestSetup(MaskedArraySetup):
def check(self, func, *args, **kwargs):
out = func(self.ma, *args, **kwargs)
expected = Masked(
func(self.a, *args, **kwargs), mask=func(self.mask_a, *args, **kwargs)
)
assert_masked_equal(out, expected)
def check2(self, func, *args, **kwargs):
out = func(self.ma, self.mb, *args, **kwargs)
expected = Masked(
func(self.a, self.b, *args, **kwargs),
mask=func(self.mask_a, self.mask_b, *args, **kwargs),
)
if isinstance(out, (tuple, list)):
for o, x in zip(out, expected):
assert_masked_equal(o, x)
else:
assert_masked_equal(out, expected)
class NoMaskTestSetup(MaskedArraySetup):
def check(self, func, *args, **kwargs):
o = func(self.ma, *args, **kwargs)
expected = func(self.a, *args, **kwargs)
assert_array_equal(o, expected)
class InvariantMaskTestSetup(MaskedArraySetup):
def check(self, func, *args, **kwargs):
o = func(self.ma, *args, **kwargs)
expected = func(self.a, *args, **kwargs)
assert_array_equal(o.unmasked, expected)
assert_array_equal(o.mask, self.mask_a)
class TestShapeInformation(BasicTestSetup):
def test_shape(self):
assert np.shape(self.ma) == (2, 3)
def test_size(self):
assert np.size(self.ma) == 6
def test_ndim(self):
assert np.ndim(self.ma) == 2
class TestShapeManipulation(BasicTestSetup):
# Note: do not parametrize the below, since test names are used
# to check coverage.
def test_reshape(self):
self.check(np.reshape, (6, 1))
def test_ravel(self):
self.check(np.ravel)
def test_moveaxis(self):
self.check(np.moveaxis, 0, 1)
def test_rollaxis(self):
self.check(np.rollaxis, 0, 2)
def test_swapaxes(self):
self.check(np.swapaxes, 0, 1)
def test_transpose(self):
self.check(np.transpose)
def test_atleast_1d(self):
self.check(np.atleast_1d)
o, so = np.atleast_1d(self.mb[0], self.mc[0])
assert o.shape == o.mask.shape == so.shape == so.mask.shape == (1,)
def test_atleast_2d(self):
self.check(np.atleast_2d)
o, so = np.atleast_2d(self.mb[0], self.mc[0])
assert o.shape == o.mask.shape == so.shape == so.mask.shape == (1, 1)
def test_atleast_3d(self):
self.check(np.atleast_3d)
o, so = np.atleast_3d(self.mb[0], self.mc[0])
assert o.shape == o.mask.shape == so.shape == so.mask.shape == (1, 1, 1)
def test_expand_dims(self):
self.check(np.expand_dims, 1)
def test_squeeze(self):
o = np.squeeze(self.mc)
assert o.shape == o.mask.shape == (2,)
assert_array_equal(o.unmasked, self.c.squeeze())
assert_array_equal(o.mask, self.mask_c.squeeze())
def test_flip(self):
self.check(np.flip)
def test_fliplr(self):
self.check(np.fliplr)
def test_flipud(self):
self.check(np.flipud)
def test_rot90(self):
self.check(np.rot90)
def test_broadcast_to(self):
self.check(np.broadcast_to, (3, 2, 3))
self.check(np.broadcast_to, (3, 2, 3), subok=False)
def test_broadcast_arrays(self):
self.check2(np.broadcast_arrays)
self.check2(np.broadcast_arrays, subok=False)
class TestArgFunctions(MaskedArraySetup):
def check(self, function, *args, fill_value=np.nan, **kwargs):
o = function(self.ma, *args, **kwargs)
a_filled = self.ma.filled(fill_value=fill_value)
expected = function(a_filled, *args, **kwargs)
assert_array_equal(o, expected)
def test_argmin(self):
self.check(np.argmin, fill_value=np.inf)
def test_argmax(self):
self.check(np.argmax, fill_value=-np.inf)
def test_argsort(self):
self.check(np.argsort, fill_value=np.nan)
def test_lexsort(self):
self.check(np.lexsort, fill_value=np.nan)
def test_nonzero(self):
self.check(np.nonzero, fill_value=0.0)
@pytest.mark.filterwarnings("ignore:Calling nonzero on 0d arrays is deprecated")
def test_nonzero_0d(self):
res1 = Masked(1, mask=False).nonzero()
assert len(res1) == 1
assert_array_equal(res1[0], np.ones(()).nonzero()[0])
res2 = Masked(1, mask=True).nonzero()
assert len(res2) == 1
assert_array_equal(res2[0], np.zeros(()).nonzero()[0])
def test_argwhere(self):
self.check(np.argwhere, fill_value=0.0)
def test_argpartition(self):
self.check(np.argpartition, 2, fill_value=np.inf)
def test_flatnonzero(self):
self.check(np.flatnonzero, fill_value=0.0)
class TestAlongAxis(MaskedArraySetup):
def test_take_along_axis(self):
indices = np.expand_dims(np.argmax(self.ma, axis=0), axis=0)
out = np.take_along_axis(self.ma, indices, axis=0)
expected = np.take_along_axis(self.a, indices, axis=0)
expected_mask = np.take_along_axis(self.mask_a, indices, axis=0)
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_put_along_axis(self):
ma = self.ma.copy()
indices = np.expand_dims(np.argmax(self.ma, axis=0), axis=0)
np.put_along_axis(ma, indices, axis=0, values=-1)
expected = self.a.copy()
np.put_along_axis(expected, indices, axis=0, values=-1)
assert_array_equal(ma.unmasked, expected)
assert_array_equal(ma.mask, self.mask_a)
np.put_along_axis(ma, indices, axis=0, values=np.ma.masked)
assert_array_equal(ma.unmasked, expected)
expected_mask = self.mask_a.copy()
np.put_along_axis(expected_mask, indices, axis=0, values=True)
assert_array_equal(ma.mask, expected_mask)
@pytest.mark.parametrize("axis", (0, 1))
def test_apply_along_axis(self, axis):
out = np.apply_along_axis(np.square, axis, self.ma)
expected = np.apply_along_axis(np.square, axis, self.a)
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, self.mask_a)
@pytest.mark.parametrize("axes", [(1,), 0, (0, -1)])
def test_apply_over_axes(self, axes):
def function(x, axis):
return np.mean(np.square(x), axis)
out = np.apply_over_axes(function, self.ma, axes)
expected = self.ma
for axis in axes if isinstance(axes, tuple) else (axes,):
expected = (expected**2).mean(axis, keepdims=True)
assert_array_equal(out.unmasked, expected.unmasked)
assert_array_equal(out.mask, expected.mask)
def test_apply_over_axes_no_reduction(self):
out = np.apply_over_axes(np.cumsum, self.ma, 0)
expected = self.ma.cumsum(axis=0)
assert_masked_equal(out, expected)
def test_apply_over_axes_wrong_size(self):
with pytest.raises(ValueError, match="not.*correct shape"):
np.apply_over_axes(lambda x, axis: x[..., np.newaxis], self.ma, 0)
class TestIndicesFrom(NoMaskTestSetup):
@classmethod
def setup_class(self):
self.a = np.arange(9).reshape(3, 3)
self.mask_a = np.eye(3, dtype=bool)
self.ma = Masked(self.a, self.mask_a)
def test_diag_indices_from(self):
self.check(np.diag_indices_from)
def test_triu_indices_from(self):
self.check(np.triu_indices_from)
def test_tril_indices_from(self):
self.check(np.tril_indices_from)
class TestRealImag(InvariantMaskTestSetup):
@classmethod
def setup_class(self):
self.a = np.array([1 + 2j, 3 + 4j])
self.mask_a = np.array([True, False])
self.ma = Masked(self.a, mask=self.mask_a)
def test_real(self):
self.check(np.real)
def test_imag(self):
self.check(np.imag)
class TestCopyAndCreation(InvariantMaskTestSetup):
def test_copy(self):
self.check(np.copy)
# Also as kwarg
copy = np.copy(a=self.ma)
assert_array_equal(copy, self.ma)
def test_asfarray(self):
self.check(np.asfarray)
farray = np.asfarray(a=self.ma)
assert_array_equal(farray, self.ma)
class TestArrayCreation(MaskedArraySetup):
def test_empty_like(self):
o = np.empty_like(self.ma)
assert o.shape == (2, 3)
assert isinstance(o, Masked)
assert isinstance(o, np.ndarray)
o2 = np.empty_like(prototype=self.ma)
assert o2.shape == (2, 3)
assert isinstance(o2, Masked)
assert isinstance(o2, np.ndarray)
o3 = np.empty_like(self.ma, subok=False)
assert type(o3) is MaskedNDArray
def test_zeros_like(self):
o = np.zeros_like(self.ma)
assert_array_equal(o.unmasked, np.zeros_like(self.a))
assert_array_equal(o.mask, np.zeros_like(self.mask_a))
o2 = np.zeros_like(a=self.ma)
assert_array_equal(o2.unmasked, np.zeros_like(self.a))
assert_array_equal(o2.mask, np.zeros_like(self.mask_a))
def test_ones_like(self):
o = np.ones_like(self.ma)
assert_array_equal(o.unmasked, np.ones_like(self.a))
assert_array_equal(o.mask, np.zeros_like(self.mask_a))
o2 = np.ones_like(a=self.ma)
assert_array_equal(o2.unmasked, np.ones_like(self.a))
assert_array_equal(o2.mask, np.zeros_like(self.mask_a))
@pytest.mark.parametrize("value", [0.5, Masked(0.5, mask=True), np.ma.masked])
def test_full_like(self, value):
o = np.full_like(self.ma, value)
if value is np.ma.masked:
expected = Masked(o.unmasked, True)
else:
expected = Masked(np.empty_like(self.a))
expected[...] = value
assert_array_equal(o.unmasked, expected.unmasked)
assert_array_equal(o.mask, expected.mask)
class TestAccessingParts(BasicTestSetup):
def test_diag(self):
self.check(np.diag)
def test_diag_1d_input(self):
ma = self.ma.ravel()
o = np.diag(ma)
assert_array_equal(o.unmasked, np.diag(self.a.ravel()))
assert_array_equal(o.mask, np.diag(self.mask_a.ravel()))
def test_diagonal(self):
self.check(np.diagonal)
def test_diagflat(self):
self.check(np.diagflat)
def test_compress(self):
o = np.compress([True, False], self.ma, axis=0)
expected = np.compress([True, False], self.a, axis=0)
expected_mask = np.compress([True, False], self.mask_a, axis=0)
assert_array_equal(o.unmasked, expected)
assert_array_equal(o.mask, expected_mask)
def test_extract(self):
o = np.extract([True, False, True], self.ma)
expected = np.extract([True, False, True], self.a)
expected_mask = np.extract([True, False, True], self.mask_a)
assert_array_equal(o.unmasked, expected)
assert_array_equal(o.mask, expected_mask)
def test_delete(self):
self.check(np.delete, slice(1, 2), 0)
self.check(np.delete, [0, 2], 1)
def test_roll(self):
self.check(np.roll, 1)
self.check(np.roll, 1, axis=0)
def test_take(self):
self.check(np.take, [0, 1], axis=1)
self.check(np.take, 1)
class TestSettingParts(MaskedArraySetup):
def test_put(self):
ma = self.ma.copy()
v = Masked([50, 150], [False, True])
np.put(ma, [0, 2], v)
expected = self.a.copy()
np.put(expected, [0, 2], [50, 150])
expected_mask = self.mask_a.copy()
np.put(expected_mask, [0, 2], [False, True])
assert_array_equal(ma.unmasked, expected)
assert_array_equal(ma.mask, expected_mask)
with pytest.raises(TypeError):
# Indices cannot be masked.
np.put(ma, Masked([0, 2]), v)
with pytest.raises(TypeError):
# Array to put masked values in must be masked.
np.put(self.a.copy(), [0, 2], v)
def test_putmask(self):
ma = self.ma.flatten()
mask = [True, False, False, False, True, False]
values = Masked(
np.arange(100, 650, 100), mask=[False, True, True, True, False, False]
)
np.putmask(ma, mask, values)
expected = self.a.flatten()
np.putmask(expected, mask, values.unmasked)
expected_mask = self.mask_a.flatten()
np.putmask(expected_mask, mask, values.mask)
assert_array_equal(ma.unmasked, expected)
assert_array_equal(ma.mask, expected_mask)
with pytest.raises(TypeError):
np.putmask(self.a.flatten(), mask, values)
def test_place(self):
ma = self.ma.flatten()
mask = [True, False, False, False, True, False]
values = Masked([100, 200], mask=[False, True])
np.place(ma, mask, values)
expected = self.a.flatten()
np.place(expected, mask, values.unmasked)
expected_mask = self.mask_a.flatten()
np.place(expected_mask, mask, values.mask)
assert_array_equal(ma.unmasked, expected)
assert_array_equal(ma.mask, expected_mask)
with pytest.raises(TypeError):
np.place(self.a.flatten(), mask, values)
def test_copyto(self):
ma = self.ma.flatten()
mask = [True, False, False, False, True, False]
values = Masked(
np.arange(100, 650, 100), mask=[False, True, True, True, False, False]
)
np.copyto(ma, values, where=mask)
expected = self.a.flatten()
np.copyto(expected, values.unmasked, where=mask)
expected_mask = self.mask_a.flatten()
np.copyto(expected_mask, values.mask, where=mask)
assert_array_equal(ma.unmasked, expected)
assert_array_equal(ma.mask, expected_mask)
with pytest.raises(TypeError):
np.copyto(self.a.flatten(), values, where=mask)
@pytest.mark.parametrize("value", [0.25, np.ma.masked])
def test_fill_diagonal(self, value):
ma = self.ma[:2, :2].copy()
np.fill_diagonal(ma, value)
expected = ma.copy()
expected[np.diag_indices_from(expected)] = value
assert_array_equal(ma.unmasked, expected.unmasked)
assert_array_equal(ma.mask, expected.mask)
class TestRepeat(BasicTestSetup):
def test_tile(self):
self.check(np.tile, 2)
def test_repeat(self):
self.check(np.repeat, 2)
def test_resize(self):
self.check(np.resize, (4, 4))
class TestConcatenate(MaskedArraySetup):
# More tests at TestMaskedArrayConcatenation in test_functions.
def check(self, func, *args, **kwargs):
ma_list = kwargs.pop("ma_list", [self.ma, self.ma])
a_list = [Masked(ma).unmasked for ma in ma_list]
m_list = [Masked(ma).mask for ma in ma_list]
o = func(ma_list, *args, **kwargs)
expected = func(a_list, *args, **kwargs)
expected_mask = func(m_list, *args, **kwargs)
assert_array_equal(o.unmasked, expected)
assert_array_equal(o.mask, expected_mask)
def test_concatenate(self):
self.check(np.concatenate)
self.check(np.concatenate, axis=1)
self.check(np.concatenate, ma_list=[self.a, self.ma])
self.check(np.concatenate, dtype="f4")
out = Masked(np.empty((4, 3)))
result = np.concatenate([self.ma, self.ma], out=out)
assert out is result
expected = np.concatenate([self.a, self.a])
expected_mask = np.concatenate([self.mask_a, self.mask_a])
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
with pytest.raises(TypeError):
np.concatenate([self.ma, self.ma], out=np.empty((4, 3)))
def test_stack(self):
self.check(np.stack)
def test_column_stack(self):
self.check(np.column_stack)
def test_hstack(self):
self.check(np.hstack)
def test_vstack(self):
self.check(np.vstack)
def test_dstack(self):
self.check(np.dstack)
def test_block(self):
self.check(np.block)
out = np.block([[0.0, Masked(1.0, True)], [Masked(1, False), Masked(2, False)]])
expected = np.array([[0, 1.0], [1, 2]])
expected_mask = np.array([[False, True], [False, False]])
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_append(self):
out = np.append(self.ma, self.mc, axis=1)
expected = np.append(self.a, self.c, axis=1)
expected_mask = np.append(self.mask_a, self.mask_c, axis=1)
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_insert(self):
obj = (1, 1)
values = Masked([50.0, 25.0], mask=[True, False])
out = np.insert(self.ma.flatten(), obj, values)
expected = np.insert(self.a.flatten(), obj, [50.0, 25.0])
expected_mask = np.insert(self.mask_a.flatten(), obj, [True, False])
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
with pytest.raises(TypeError):
np.insert(self.a.flatten(), obj, values)
with pytest.raises(TypeError):
np.insert(self.ma.flatten(), Masked(obj), values)
class TestSplit:
@classmethod
def setup_class(self):
self.a = np.arange(54.0).reshape(3, 3, 6)
self.mask_a = np.zeros(self.a.shape, dtype=bool)
self.mask_a[1, 1, 1] = True
self.mask_a[0, 1, 4] = True
self.mask_a[1, 2, 5] = True
self.ma = Masked(self.a, mask=self.mask_a)
def check(self, func, *args, **kwargs):
out = func(self.ma, *args, **kwargs)
expected = func(self.a, *args, **kwargs)
expected_mask = func(self.mask_a, *args, **kwargs)
assert len(out) == len(expected)
for o, x, xm in zip(out, expected, expected_mask):
assert_array_equal(o.unmasked, x)
assert_array_equal(o.mask, xm)
def test_split(self):
self.check(np.split, [1])
def test_array_split(self):
self.check(np.array_split, 2)
def test_hsplit(self):
self.check(np.hsplit, [1, 4])
def test_vsplit(self):
self.check(np.vsplit, [1])
def test_dsplit(self):
self.check(np.dsplit, [1])
class TestMethodLikes(MaskedArraySetup):
def check(self, function, *args, method=None, **kwargs):
if method is None:
method = function.__name__
o = function(self.ma, *args, **kwargs)
x = getattr(self.ma, method)(*args, **kwargs)
assert_masked_equal(o, x)
def test_max(self):
self.check(np.max, method="max")
def test_min(self):
self.check(np.min, method="min")
def test_amax(self):
self.check(np.amax, method="max")
def test_amin(self):
self.check(np.amin, method="min")
def test_sum(self):
self.check(np.sum)
def test_cumsum(self):
self.check(np.cumsum)
def test_any(self):
self.check(np.any)
def test_all(self):
self.check(np.all)
# NUMPY_LT_1_25
@pytest.mark.filterwarnings("ignore:`sometrue` is deprecated as of NumPy 1.25.0")
def test_sometrue(self):
self.check(np.sometrue, method="any")
# NUMPY_LT_1_25
@pytest.mark.filterwarnings("ignore:`alltrue` is deprecated as of NumPy 1.25.0")
def test_alltrue(self):
self.check(np.alltrue, method="all")
def test_prod(self):
self.check(np.prod)
# NUMPY_LT_1_25
@pytest.mark.filterwarnings("ignore:`product` is deprecated as of NumPy 1.25.0")
def test_product(self):
self.check(np.product, method="prod")
def test_cumprod(self):
self.check(np.cumprod)
# NUMPY_LT_1_25
@pytest.mark.filterwarnings("ignore:`cumproduct` is deprecated as of NumPy 1.25.0")
def test_cumproduct(self):
self.check(np.cumproduct, method="cumprod")
def test_ptp(self):
self.check(np.ptp)
self.check(np.ptp, axis=0)
def test_round(self):
self.check(np.round, method="round")
# NUMPY_LT_1_25
@pytest.mark.filterwarnings("ignore:`round_` is deprecated as of NumPy 1.25.0")
def test_round_(self):
self.check(np.round_, method="round")
def test_around(self):
self.check(np.around, method="round")
def test_clip(self):
self.check(np.clip, 2.0, 4.0)
self.check(np.clip, self.mb, self.mc)
def test_mean(self):
self.check(np.mean)
def test_std(self):
self.check(np.std)
def test_var(self):
self.check(np.var)
class TestUfuncLike(InvariantMaskTestSetup):
def test_fix(self):
self.check(np.fix)
# Check np.fix with out argument for completeness
# (Note: could be done in self.check, but np.fix is the only
# invariant mask function that has `out`, so no point.)
out = np.zeros_like(self.ma)
result = np.fix(self.ma, out=out)
assert result is out
expected = np.fix(self.a)
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, self.mask_a)
def test_angle(self):
a = np.array([1 + 0j, 0 + 1j, 1 + 1j, 0 + 0j])
mask_a = np.array([True, False, True, False])
ma = Masked(a, mask=mask_a)
out = np.angle(ma)
expected = np.angle(ma.unmasked)
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, mask_a)
def test_i0(self):
self.check(np.i0)
def test_sinc(self):
self.check(np.sinc)
def test_where(self):
mask = [True, False, True]
out = np.where(mask, self.ma, 1000.0)
expected = np.where(mask, self.a, 1000.0)
expected_mask = np.where(mask, self.mask_a, False)
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
mask2 = Masked(mask, [True, False, False])
out2 = np.where(mask2, self.ma, 1000.0)
expected2 = np.where(mask, self.a, 1000.0)
expected_mask2 = np.where(mask, self.mask_a, False) | mask2.mask
assert_array_equal(out2.unmasked, expected2)
assert_array_equal(out2.mask, expected_mask2)
def test_where_single_arg(self):
m = Masked(np.arange(3), mask=[True, False, False])
out = np.where(m)
expected = m.nonzero()
assert isinstance(out, tuple) and len(out) == 1
assert_array_equal(out[0], expected[0])
def test_where_wrong_number_of_arg(self):
with pytest.raises(ValueError, match="either both or neither"):
np.where([True, False, False], self.a)
def test_choose(self):
a = np.array([0, 1]).reshape((2, 1))
result = np.choose(a, (self.ma, self.mb))
expected = np.choose(a, (self.a, self.b))
expected_mask = np.choose(a, (self.mask_a, self.mask_b))
assert_array_equal(result.unmasked, expected)
assert_array_equal(result.mask, expected_mask)
out = np.zeros_like(result)
result2 = np.choose(a, (self.ma, self.mb), out=out)
assert result2 is out
assert_array_equal(result2, result)
with pytest.raises(TypeError):
np.choose(a, (self.ma, self.mb), out=np.zeros_like(expected))
def test_choose_masked(self):
ma = Masked(np.array([-1, 1]), mask=[True, False]).reshape((2, 1))
out = ma.choose((self.ma, self.mb))
expected = np.choose(ma.filled(0), (self.a, self.b))
expected_mask = np.choose(ma.filled(0), (self.mask_a, self.mask_b)) | ma.mask
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
with pytest.raises(ValueError):
ma.unmasked.choose((self.ma, self.mb))
@pytest.mark.parametrize("default", [-1.0, np.ma.masked, Masked(-1, mask=True)])
def test_select(self, default):
a, mask_a, ma = self.a, self.mask_a, self.ma
out = np.select([a < 1.5, a > 3.5], [ma, ma + 1], default=default)
expected = np.select(
[a < 1.5, a > 3.5],
[a, a + 1],
default=-1 if default is not np.ma.masked else 0,
)
expected_mask = np.select(
[a < 1.5, a > 3.5],
[mask_a, mask_a],
default=getattr(default, "mask", False),
)
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_real_if_close(self):
a = np.array([1 + 0j, 0 + 1j, 1 + 1j, 0 + 0j])
mask_a = np.array([True, False, True, False])
ma = Masked(a, mask=mask_a)
out = np.real_if_close(ma)
expected = np.real_if_close(a)
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, mask_a)
def test_tril(self):
self.check(np.tril)
def test_triu(self):
self.check(np.triu)
def test_unwrap(self):
self.check(np.unwrap)
def test_nan_to_num(self):
self.check(np.nan_to_num)
ma = Masked([np.nan, 1.0], mask=[True, False])
o = np.nan_to_num(ma, copy=False)
assert_masked_equal(o, Masked([0.0, 1.0], mask=[True, False]))
assert ma is o
class TestUfuncLikeTests:
@classmethod
def setup_class(self):
self.a = np.array([[-np.inf, +np.inf, np.nan, 3.0, 4.0]] * 2)
self.mask_a = np.array([[False] * 5, [True] * 4 + [False]])
self.ma = Masked(self.a, mask=self.mask_a)
self.b = np.array([[3.0001], [3.9999]])
self.mask_b = np.array([[True], [False]])
self.mb = Masked(self.b, mask=self.mask_b)
def check(self, func):
out = func(self.ma)
expected = func(self.a)
assert type(out) is MaskedNDArray
assert out.dtype.kind == "b"
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, self.mask_a)
assert not np.may_share_memory(out.mask, self.mask_a)
def test_isposinf(self):
self.check(np.isposinf)
def test_isneginf(self):
self.check(np.isneginf)
def test_isreal(self):
self.check(np.isreal)
o = np.isreal(Masked([1.0 + 1j], mask=False))
assert not o.unmasked and not o.mask
o = np.isreal(Masked([1.0 + 1j], mask=True))
assert not o.unmasked and o.mask
def test_iscomplex(self):
self.check(np.iscomplex)
o = np.iscomplex(Masked([1.0 + 1j], mask=False))
assert o.unmasked and not o.mask
o = np.iscomplex(Masked([1.0 + 1j], mask=True))
assert o.unmasked and o.mask
def test_isclose(self):
out = np.isclose(self.ma, self.mb, atol=0.01)
expected = np.isclose(self.ma, self.mb, atol=0.01)
expected_mask = self.mask_a | self.mask_b
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_allclose(self):
out = np.allclose(self.ma, self.mb, atol=0.01)
expected = np.isclose(self.ma, self.mb, atol=0.01)[
self.mask_a | self.mask_b
].all()
assert_array_equal(out, expected)
def test_array_equal(self):
assert not np.array_equal(self.ma, self.ma)
assert not np.array_equal(self.ma, self.a)
assert np.array_equal(self.ma, self.ma, equal_nan=True)
assert np.array_equal(self.ma, self.a, equal_nan=True)
assert not np.array_equal(self.ma, self.mb)
ma2 = self.ma.copy()
ma2.mask |= np.isnan(self.a)
assert np.array_equal(ma2, self.ma)
def test_array_equiv(self):
assert np.array_equiv(self.mb, self.mb)
assert np.array_equiv(self.mb, self.b)
assert not np.array_equiv(self.ma, self.mb)
assert np.array_equiv(self.mb, np.stack([self.mb, self.mb]))
class TestOuterLikeFunctions(MaskedArraySetup):
def test_outer(self):
result = np.outer(self.ma, self.mb)
expected_data = np.outer(self.a.ravel(), self.b.ravel())
expected_mask = np.logical_or.outer(self.mask_a.ravel(), self.mask_b.ravel())
assert_array_equal(result.unmasked, expected_data)
assert_array_equal(result.mask, expected_mask)
out = np.zeros_like(result)
result2 = np.outer(self.ma, self.mb, out=out)
assert result2 is out
assert result2 is not result
assert_masked_equal(result2, result)
out2 = np.zeros_like(result.unmasked)
with pytest.raises(TypeError):
np.outer(self.ma, self.mb, out=out2)
def test_kron(self):
result = np.kron(self.ma, self.mb)
expected_data = np.kron(self.a, self.b)
expected_mask = np.logical_or.outer(self.mask_a, self.mask_b).reshape(
result.shape
)
assert_array_equal(result.unmasked, expected_data)
assert_array_equal(result.mask, expected_mask)
class TestReductionLikeFunctions(MaskedArraySetup):
def test_average(self):
o = np.average(self.ma)
assert_masked_equal(o, self.ma.mean())
o = np.average(self.ma, weights=self.mb, axis=-1)
expected = np.average(self.a, weights=self.b, axis=-1)
expected_mask = (self.mask_a | self.mask_b).any(-1)
assert_array_equal(o.unmasked, expected)
assert_array_equal(o.mask, expected_mask)
def test_trace(self):
o = np.trace(self.ma)
expected = np.trace(self.a)
expected_mask = np.trace(self.mask_a).astype(bool)
assert_array_equal(o.unmasked, expected)
assert_array_equal(o.mask, expected_mask)
@pytest.mark.parametrize("axis", [0, 1, None])
def test_count_nonzero(self, axis):
o = np.count_nonzero(self.ma, axis=axis)
expected = np.count_nonzero(self.ma.filled(0), axis=axis)
assert_array_equal(o, expected)
@pytest.mark.filterwarnings("ignore:all-nan")
class TestPartitionLikeFunctions:
@classmethod
def setup_class(self):
self.a = np.arange(36.0).reshape(6, 6)
self.mask_a = np.zeros_like(self.a, bool)
# On purpose fill diagonal, so we get all masked elements.
self.mask_a[np.tril_indices_from(self.a)] = True
self.ma = Masked(self.a, mask=self.mask_a)
def check(self, function, *args, **kwargs):
# Check function by comparing to nan-equivalent, with masked
# values set to NaN.
o = function(self.ma, *args, **kwargs)
nanfunc = getattr(np, "nan" + function.__name__)
nanfilled = self.ma.filled(np.nan)
expected = nanfunc(nanfilled, *args, **kwargs)
assert_array_equal(o.filled(np.nan), expected)
assert_array_equal(o.mask, np.isnan(expected))
# Also check that we can give an output MaskedArray.
if NUMPY_LT_1_25 and kwargs.get("keepdims", False):
# numpy bug gh-22714 prevents using out with keepdims=True.
# This is fixed in numpy 1.25.
return
out = np.zeros_like(o)
o2 = function(self.ma, *args, out=out, **kwargs)
assert o2 is out
assert_masked_equal(o2, o)
# But that a regular array cannot be used since it has no mask.
with pytest.raises(TypeError):
function(self.ma, *args, out=np.zeros_like(expected), **kwargs)
@pytest.mark.parametrize("keepdims", [False, True])
@pytest.mark.parametrize("axis", [None, 0, 1])
def test_median(self, axis, keepdims):
self.check(np.median, axis=axis, keepdims=keepdims)
@pytest.mark.parametrize("keepdims", [False, True])
@pytest.mark.parametrize("axis", [None, 0, 1])
def test_quantile(self, axis, keepdims):
self.check(np.quantile, q=[0.25, 0.5], axis=axis, keepdims=keepdims)
def test_quantile_out_of_range(self):
with pytest.raises(ValueError, match="must be in the range"):
np.quantile(self.ma, q=1.5)
@pytest.mark.parametrize("axis", [None, 0, 1])
def test_percentile(self, axis):
self.check(np.percentile, q=50, axis=axis)
class TestIntDiffFunctions(MaskedArraySetup):
def test_diff(self):
out = np.diff(self.ma)
expected = np.diff(self.a)
expected_mask = self.mask_a[:, 1:] | self.mask_a[:, :-1]
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_diff_prepend_append(self):
out = np.diff(self.ma, prepend=Masked(-1, mask=True), append=1)
expected = np.diff(self.a, prepend=-1, append=1.0)
mask = np.concatenate(
[np.ones((2, 1), bool), self.mask_a, np.zeros((2, 1), bool)], axis=-1
)
expected_mask = mask[:, 1:] | mask[:, :-1]
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_trapz(self):
ma = self.ma.copy()
ma.mask[1] = False
out = np.trapz(ma)
assert_array_equal(out.unmasked, np.trapz(self.a))
assert_array_equal(out.mask, np.array([True, False]))
def test_gradient(self):
out = np.gradient(self.ma)
expected = np.gradient(self.a)
expected_mask = [
(self.mask_a[1:] | self.mask_a[:-1]).repeat(2, axis=0),
np.stack(
[
self.mask_a[:, 0] | self.mask_a[:, 1],
self.mask_a[:, 0] | self.mask_a[:, 2],
self.mask_a[:, 1] | self.mask_a[:, 2],
],
axis=-1,
),
]
for o, x, m in zip(out, expected, expected_mask):
assert_array_equal(o.unmasked, x)
assert_array_equal(o.mask, m)
class TestSpaceFunctions:
@classmethod
def setup_class(self):
self.a = np.arange(1.0, 7.0).reshape(2, 3)
self.mask_a = np.array(
[
[True, False, False],
[False, True, False],
]
)
self.ma = Masked(self.a, mask=self.mask_a)
self.b = np.array([2.5, 10.0, 3.0])
self.mask_b = np.array([False, True, False])
self.mb = Masked(self.b, mask=self.mask_b)
def check(self, function, *args, **kwargs):
out = function(self.ma, self.mb, 5)
expected = function(self.a, self.b, 5)
expected_mask = np.broadcast_to(
self.mask_a | self.mask_b, expected.shape
).copy()
# TODO: make implementation that also ensures start point mask is
# determined just by start point? (as for geomspace in numpy 1.20)?
expected_mask[-1] = self.mask_b
if function is np.geomspace:
expected_mask[0] = self.mask_a
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_linspace(self):
self.check(np.linspace, 5)
def test_logspace(self):
self.check(np.logspace, 10)
def test_geomspace(self):
self.check(np.geomspace, 5)
class TestInterpolationFunctions(MaskedArraySetup):
def test_interp(self):
xp = np.arange(5.0)
fp = np.array([1.0, 5.0, 6.0, 19.0, 20.0])
mask_fp = np.array([False, False, False, True, False])
mfp = Masked(fp, mask=mask_fp)
x = np.array([1.5, 17.0])
mask_x = np.array([False, True])
mx = Masked(x, mask=mask_x)
out = np.interp(mx, xp, mfp)
expected = np.interp(x, xp[~mask_fp], fp[~mask_fp])
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, mask_x)
def test_piecewise(self):
condlist = [self.a < 1, self.a >= 1]
out = np.piecewise(self.ma, condlist, [Masked(-1, mask=True), 1.0])
expected = np.piecewise(self.a, condlist, [-1, 1.0])
expected_mask = np.piecewise(self.mask_a, condlist, [True, False])
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
condlist2 = [self.a < 1, self.a >= 3]
out2 = np.piecewise(
self.ma,
condlist2,
[Masked(-1, True), 1, lambda x: Masked(np.full_like(x, 2.0), mask=~x.mask)],
)
expected = np.piecewise(self.a, condlist2, [-1, 1, 2])
expected_mask = np.piecewise(
self.mask_a, condlist2, [True, False, lambda x: ~x]
)
assert_array_equal(out2.unmasked, expected)
assert_array_equal(out2.mask, expected_mask)
with pytest.raises(ValueError, match="with 2 condition"):
np.piecewise(self.ma, condlist2, [])
def test_regression_12978(self):
"""Regression tests for https://github.com/astropy/astropy/pull/12978"""
# This case produced incorrect results
mask = [False, True, False]
x = np.array([1, 2, 3])
xp = Masked(np.array([1, 2, 3]), mask=mask)
fp = Masked(np.array([1, 2, 3]), mask=mask)
result = np.interp(x, xp, fp)
assert_array_equal(result, x)
# This case raised a ValueError
xp = np.array([1, 3])
fp = Masked(np.array([1, 3]))
result = np.interp(x, xp, fp)
assert_array_equal(result, x)
class TestBincount(MaskedArraySetup):
def test_bincount(self):
i = np.array([1, 1, 2, 3, 2, 4])
mask_i = np.array([True, False, False, True, False, False])
mi = Masked(i, mask=mask_i)
out = np.bincount(mi)
expected = np.bincount(i[~mask_i])
assert_array_equal(out, expected)
w = np.arange(len(i))
mask_w = np.array([True] + [False] * 5)
mw = Masked(w, mask=mask_w)
out2 = np.bincount(i, mw)
expected = np.bincount(i, w)
expected_mask = np.array([False, True, False, False, False])
assert_array_equal(out2.unmasked, expected)
assert_array_equal(out2.mask, expected_mask)
out3 = np.bincount(mi, mw)
expected = np.bincount(i[~mask_i], w[~mask_i])
expected_mask = np.array([False, False, False, False, False])
assert_array_equal(out3.unmasked, expected)
assert_array_equal(out3.mask, expected_mask)
class TestSortFunctions(MaskedArraySetup):
def test_sort(self):
o = np.sort(self.ma)
expected = self.ma.copy()
expected.sort()
assert_masked_equal(o, expected)
def test_sort_complex(self):
ma = Masked(
np.array([1 + 2j, 0 + 4j, 3 + 0j, -1 - 1j]),
mask=[True, False, False, False],
)
o = np.sort_complex(ma)
indx = np.lexsort((ma.unmasked.imag, ma.unmasked.real, ma.mask))
expected = ma[indx]
assert_masked_equal(o, expected)
@pytest.mark.skipif(not NUMPY_LT_1_24, reason="np.msort is deprecated")
def test_msort(self):
o = np.msort(self.ma)
expected = np.sort(self.ma, axis=0)
assert_masked_equal(o, expected)
def test_partition(self):
o = np.partition(self.ma, 1)
expected = self.ma.copy()
expected.partition(1)
assert_masked_equal(o, expected)
class TestStringFunctions:
# More elaborate tests done in test_masked.py
@classmethod
def setup_class(self):
self.ma = Masked(np.arange(3), mask=[True, False, False])
def test_array2string(self):
out0 = np.array2string(self.ma)
assert out0 == "[— 1 2]"
# Arguments are interpreted as usual.
out1 = np.array2string(self.ma, separator=", ")
assert out1 == "[—, 1, 2]"
# If we do pass in a formatter, though, it should be used.
out2 = np.array2string(self.ma, separator=", ", formatter={"all": hex})
assert out2 == "[———, 0x1, 0x2]"
# Also as positional argument (no, nobody will do this!)
out3 = np.array2string(
self.ma, None, None, None, ", ", "", np._NoValue, {"int": hex}
)
assert out3 == out2
# But not if the formatter is not relevant for us.
out4 = np.array2string(self.ma, separator=", ", formatter={"float": hex})
assert out4 == out1
def test_array_repr(self):
out = np.array_repr(self.ma)
assert out == "MaskedNDArray([—, 1, 2])"
ma2 = self.ma.astype("f4")
out2 = np.array_repr(ma2)
assert out2 == "MaskedNDArray([——, 1., 2.], dtype=float32)"
def test_array_str(self):
out = np.array_str(self.ma)
assert out == "[— 1 2]"
class TestBitFunctions:
@classmethod
def setup_class(self):
self.a = np.array([15, 255, 0], dtype="u1")
self.mask_a = np.array([False, True, False])
self.ma = Masked(self.a, mask=self.mask_a)
self.b = np.unpackbits(self.a).reshape(6, 4)
self.mask_b = np.array([False] * 15 + [True, True] + [False] * 7).reshape(6, 4)
self.mb = Masked(self.b, mask=self.mask_b)
@pytest.mark.parametrize("axis", [None, 1, 0])
def test_packbits(self, axis):
out = np.packbits(self.mb, axis=axis)
if axis is None:
expected = self.a
else:
expected = np.packbits(self.b, axis=axis)
expected_mask = np.packbits(self.mask_b, axis=axis) > 0
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_unpackbits(self):
out = np.unpackbits(self.ma)
mask = np.where(self.mask_a, np.uint8(255), np.uint8(0))
expected_mask = np.unpackbits(mask) > 0
assert_array_equal(out.unmasked, self.b.ravel())
assert_array_equal(out.mask, expected_mask)
class TestIndexFunctions(MaskedArraySetup):
"""Does not seem much sense to support these..."""
def test_unravel_index(self):
with pytest.raises(TypeError):
np.unravel_index(self.ma, 3)
def test_ravel_multi_index(self):
with pytest.raises(TypeError):
np.ravel_multi_index((self.ma,), 3)
def test_ix_(self):
with pytest.raises(TypeError):
np.ix_(self.ma)
class TestDtypeFunctions(MaskedArraySetup):
def check(self, function, *args, **kwargs):
out = function(self.ma, *args, **kwargs)
expected = function(self.a, *args, **kwargs)
assert out == expected
def test_common_type(self):
self.check(np.common_type)
def test_result_type(self):
self.check(np.result_type)
def test_can_cast(self):
self.check(np.can_cast, self.a.dtype)
self.check(np.can_cast, "f4")
def test_min_scalar_type(self):
out = np.min_scalar_type(self.ma[0, 0])
expected = np.min_scalar_type(self.a[0, 0])
assert out == expected
def test_iscomplexobj(self):
self.check(np.iscomplexobj)
def test_isrealobj(self):
self.check(np.isrealobj)
class TestMeshGrid(MaskedArraySetup):
def test_meshgrid(self):
a = np.arange(1.0, 4.0)
mask_a = np.array([True, False, False])
ma = Masked(a, mask=mask_a)
b = np.array([2.5, 10.0, 3.0, 4.0])
mask_b = np.array([False, True, False, True])
mb = Masked(b, mask=mask_b)
oa, ob = np.meshgrid(ma, mb)
xa, xb = np.broadcast_arrays(a, b[:, np.newaxis])
ma, mb = np.broadcast_arrays(mask_a, mask_b[:, np.newaxis])
for o, x, m in ((oa, xa, ma), (ob, xb, mb)):
assert_array_equal(o.unmasked, x)
assert_array_equal(o.mask, m)
class TestMemoryFunctions(MaskedArraySetup):
def test_shares_memory(self):
assert np.shares_memory(self.ma, self.ma.unmasked)
assert not np.shares_memory(self.ma, self.ma.mask)
def test_may_share_memory(self):
assert np.may_share_memory(self.ma, self.ma.unmasked)
assert not np.may_share_memory(self.ma, self.ma.mask)
class TestDatetimeFunctions:
# Could in principle support np.is_busday, np.busday_count, np.busday_offset.
@classmethod
def setup_class(self):
self.a = np.array(["2020-12-31", "2021-01-01", "2021-01-02"], dtype="M")
self.mask_a = np.array([False, True, False])
self.ma = Masked(self.a, mask=self.mask_a)
self.b = np.array([["2021-01-07"], ["2021-01-31"]], dtype="M")
self.mask_b = np.array([[False], [True]])
self.mb = Masked(self.b, mask=self.mask_b)
def test_datetime_as_string(self):
out = np.datetime_as_string(self.ma)
expected = np.datetime_as_string(self.a)
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, self.mask_a)
@pytest.mark.filterwarnings("ignore:all-nan")
class TestNaNFunctions:
def setup_class(self):
self.a = np.array(
[
[np.nan, np.nan, 3.0],
[4.0, 5.0, 6.0],
]
)
self.mask_a = np.array(
[
[True, False, False],
[False, True, False],
]
)
self.b = np.arange(1, 7).reshape(2, 3)
self.mask_b = self.mask_a
self.ma = Masked(self.a, mask=self.mask_a)
self.mb = Masked(self.b, mask=self.mask_b)
def check(self, function, exact_fill_value=None, masked_result=True, **kwargs):
result = function(self.ma, **kwargs)
expected_data = function(self.ma.filled(np.nan), **kwargs)
expected_mask = np.isnan(expected_data)
if masked_result:
assert isinstance(result, Masked)
assert_array_equal(result.mask, expected_mask)
assert np.all(result == expected_data)
else:
assert not isinstance(result, Masked)
assert_array_equal(result, expected_data)
assert not np.any(expected_mask)
out = np.zeros_like(result)
result2 = function(self.ma, out=out, **kwargs)
assert result2 is out
assert_array_equal(result2, result)
def check_arg(self, function, **kwargs):
# arg functions do not have an 'out' argument, so just test directly.
result = function(self.ma, **kwargs)
assert not isinstance(result, Masked)
expected = function(self.ma.filled(np.nan), **kwargs)
assert_array_equal(result, expected)
def test_nanmin(self):
self.check(np.nanmin)
self.check(np.nanmin, axis=0)
self.check(np.nanmin, axis=1)
resi = np.nanmin(self.mb, axis=1)
assert_array_equal(resi.unmasked, np.array([2, 4]))
assert_array_equal(resi.mask, np.array([False, False]))
def test_nanmax(self):
self.check(np.nanmax)
def test_nanargmin(self):
self.check_arg(np.nanargmin)
self.check_arg(np.nanargmin, axis=1)
def test_nanargmax(self):
self.check_arg(np.nanargmax)
def test_nansum(self):
self.check(np.nansum, masked_result=False)
resi = np.nansum(self.mb, axis=1)
assert not isinstance(resi, Masked)
assert_array_equal(resi, np.array([5, 10]))
def test_nanprod(self):
self.check(np.nanprod, masked_result=False)
resi = np.nanprod(self.mb, axis=1)
assert not isinstance(resi, Masked)
assert_array_equal(resi, np.array([6, 24]))
def test_nancumsum(self):
self.check(np.nancumsum, masked_result=False)
resi = np.nancumsum(self.mb, axis=1)
assert not isinstance(resi, Masked)
assert_array_equal(resi, np.array([[0, 2, 5], [4, 4, 10]]))
def test_nancumprod(self):
self.check(np.nancumprod, masked_result=False)
resi = np.nancumprod(self.mb, axis=1)
assert not isinstance(resi, Masked)
assert_array_equal(resi, np.array([[1, 2, 6], [4, 4, 24]]))
def test_nanmean(self):
self.check(np.nanmean)
resi = np.nanmean(self.mb, axis=1)
assert_array_equal(resi.unmasked, np.mean(self.mb, axis=1).unmasked)
assert_array_equal(resi.mask, np.array([False, False]))
def test_nanvar(self):
self.check(np.nanvar)
self.check(np.nanvar, ddof=1)
def test_nanstd(self):
self.check(np.nanstd)
def test_nanmedian(self):
self.check(np.nanmedian)
def test_nanquantile(self):
self.check(np.nanquantile, q=0.5)
def test_nanpercentile(self):
self.check(np.nanpercentile, q=50)
untested_functions = set()
if NUMPY_LT_1_23:
deprecated_functions = {
# Deprecated, removed in numpy 1.23
np.asscalar,
np.alen,
}
else:
deprecated_functions = set()
untested_functions |= deprecated_functions
io_functions = {np.save, np.savez, np.savetxt, np.savez_compressed}
untested_functions |= io_functions
poly_functions = {
np.poly, np.polyadd, np.polyder, np.polydiv, np.polyfit, np.polyint,
np.polymul, np.polysub, np.polyval, np.roots, np.vander,
} # fmt: skip
untested_functions |= poly_functions
# Get covered functions
tested_functions = set()
for cov_cls in list(filter(inspect.isclass, locals().values())):
for k, v in cov_cls.__dict__.items():
if inspect.isfunction(v) and k.startswith("test"):
f = k.replace("test_", "")
if f in all_wrapped_functions:
tested_functions.add(all_wrapped_functions[f])
def test_basic_testing_completeness():
assert all_wrapped == (tested_functions | IGNORED_FUNCTIONS | UNSUPPORTED_FUNCTIONS)
@pytest.mark.xfail(reason="coverage not completely set up yet")
def test_testing_completeness():
assert not tested_functions.intersection(untested_functions)
assert all_wrapped == (tested_functions | untested_functions)
class TestFunctionHelpersCompleteness:
@pytest.mark.parametrize(
"one, two",
itertools.combinations(
(
MASKED_SAFE_FUNCTIONS,
UNSUPPORTED_FUNCTIONS,
set(APPLY_TO_BOTH_FUNCTIONS.keys()),
set(DISPATCHED_FUNCTIONS.keys()),
),
2,
),
)
def test_no_duplicates(self, one, two):
assert not one.intersection(two)
def test_all_included(self):
included_in_helpers = (
MASKED_SAFE_FUNCTIONS
| UNSUPPORTED_FUNCTIONS
| set(APPLY_TO_BOTH_FUNCTIONS.keys())
| set(DISPATCHED_FUNCTIONS.keys())
)
assert all_wrapped == included_in_helpers
@pytest.mark.xfail(reason="coverage not completely set up yet")
def test_ignored_are_untested(self):
assert IGNORED_FUNCTIONS == untested_functions
|
279b868f0104ac35f7a17d58f2cfe4460a58fc1c2d4bcae650e7c66ffb9fde9f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from collections import defaultdict
from functools import partial
import numpy as np
from matplotlib import rcParams
from matplotlib.artist import Artist
from matplotlib.axes import Axes, subplot_class_factory
from matplotlib.transforms import Affine2D, Bbox, Transform
from astropy.coordinates import BaseCoordinateFrame, SkyCoord
from astropy.utils import minversion
from astropy.utils.compat.optional_deps import HAS_PIL
from astropy.wcs import WCS
from astropy.wcs.wcsapi import BaseHighLevelWCS, BaseLowLevelWCS
from .coordinates_map import CoordinatesMap
from .frame import RectangularFrame, RectangularFrame1D
from .transforms import CoordinateTransform
from .utils import get_coord_meta, transform_contour_set_inplace
from .wcsapi import IDENTITY, transform_coord_meta_from_wcs
__all__ = ["WCSAxes", "WCSAxesSubplot"]
VISUAL_PROPERTIES = ["facecolor", "edgecolor", "linewidth", "alpha", "linestyle"]
class _WCSAxesArtist(Artist):
"""This is a dummy artist to enforce the correct z-order of axis ticks,
tick labels, and gridlines.
FIXME: This is a bit of a hack. ``Axes.draw`` sorts the artists by zorder
and then renders them in sequence. For normal Matplotlib axes, the ticks,
tick labels, and gridlines are included in this list of artists and hence
are automatically drawn in the correct order. However, ``WCSAxes`` disables
the native ticks, labels, and gridlines. Instead, ``WCSAxes.draw`` renders
ersatz ticks, labels, and gridlines by explicitly calling the functions
``CoordinateHelper._draw_ticks``, ``CoordinateHelper._draw_grid``, etc.
This hack would not be necessary if ``WCSAxes`` drew ticks, tick labels,
and gridlines in the standary way.
"""
def draw(self, renderer, *args, **kwargs):
self.axes.draw_wcsaxes(renderer)
class WCSAxes(Axes):
"""
The main axes class that can be used to show world coordinates from a WCS.
Parameters
----------
fig : `~matplotlib.figure.Figure`
The figure to add the axes to
*args
``*args`` can be a single ``(left, bottom, width, height)``
rectangle or a single `matplotlib.transforms.Bbox`. This specifies
the rectangle (in figure coordinates) where the Axes is positioned.
``*args`` can also consist of three numbers or a single three-digit
number; in the latter case, the digits are considered as
independent numbers. The numbers are interpreted as ``(nrows,
ncols, index)``: ``(nrows, ncols)`` specifies the size of an array
of subplots, and ``index`` is the 1-based index of the subplot
being created. Finally, ``*args`` can also directly be a
`matplotlib.gridspec.SubplotSpec` instance.
wcs : :class:`~astropy.wcs.WCS`, optional
The WCS for the data. If this is specified, ``transform`` cannot be
specified.
transform : `~matplotlib.transforms.Transform`, optional
The transform for the data. If this is specified, ``wcs`` cannot be
specified.
coord_meta : dict, optional
A dictionary providing additional metadata when ``transform`` is
specified. This should include the keys ``type``, ``wrap``, and
``unit``. Each of these should be a list with as many items as the
dimension of the WCS. The ``type`` entries should be one of
``longitude``, ``latitude``, or ``scalar``, the ``wrap`` entries should
give, for the longitude, the angle at which the coordinate wraps (and
`None` otherwise), and the ``unit`` should give the unit of the
coordinates as :class:`~astropy.units.Unit` instances. This can
optionally also include a ``format_unit`` entry giving the units to use
for the tick labels (if not specified, this defaults to ``unit``).
transData : `~matplotlib.transforms.Transform`, optional
Can be used to override the default data -> pixel mapping.
slices : tuple, optional
For WCS transformations with more than two dimensions, we need to
choose which dimensions are being shown in the 2D image. The slice
should contain one ``x`` entry, one ``y`` entry, and the rest of the
values should be integers indicating the slice through the data. The
order of the items in the slice should be the same as the order of the
dimensions in the :class:`~astropy.wcs.WCS`, and the opposite of the
order of the dimensions in Numpy. For example, ``(50, 'x', 'y')`` means
that the first WCS dimension (last Numpy dimension) will be sliced at
an index of 50, the second WCS and Numpy dimension will be shown on the
x axis, and the final WCS dimension (first Numpy dimension) will be
shown on the y-axis (and therefore the data will be plotted using
``data[:, :, 50].transpose()``)
frame_class : type, optional
The class for the frame, which should be a subclass of
:class:`~astropy.visualization.wcsaxes.frame.BaseFrame`. The default is to use a
:class:`~astropy.visualization.wcsaxes.frame.RectangularFrame`
Attributes
----------
coords : :class:`~astropy.visualization.wcsaxes.CoordinatesMap`
Container for coordinate information.
"""
def __init__(
self,
fig,
*args,
wcs=None,
transform=None,
coord_meta=None,
transData=None,
slices=None,
frame_class=None,
**kwargs,
):
""" """
super().__init__(fig, *args, **kwargs)
self._bboxes = []
if frame_class is not None:
self.frame_class = frame_class
elif wcs is not None and (
wcs.pixel_n_dim == 1 or (slices is not None and "y" not in slices)
):
self.frame_class = RectangularFrame1D
else:
self.frame_class = RectangularFrame
if transData is not None:
# User wants to override the transform for the final
# data->pixel mapping
self.transData = transData
self.reset_wcs(
wcs=wcs, slices=slices, transform=transform, coord_meta=coord_meta
)
self._hide_parent_artists()
self.format_coord = self._display_world_coords
self._display_coords_index = 0
fig.canvas.mpl_connect("key_press_event", self._set_cursor_prefs)
self.patch = self.coords.frame.patch
self._wcsaxesartist = _WCSAxesArtist()
self.add_artist(self._wcsaxesartist)
self._drawn = False
def _display_world_coords(self, x, y):
if not self._drawn:
return ""
if self._display_coords_index == -1:
return f"{x} {y} (pixel)"
pixel = np.array([x, y])
coords = self._all_coords[self._display_coords_index]
world = coords._transform.transform(np.array([pixel]))[0]
coord_strings = []
for idx, coord in enumerate(coords):
if coord.coord_index is not None:
coord_strings.append(
coord.format_coord(world[coord.coord_index], format="ascii")
)
coord_string = " ".join(coord_strings)
if self._display_coords_index == 0:
system = "world"
else:
system = f"world, overlay {self._display_coords_index}"
coord_string = f"{coord_string} ({system})"
return coord_string
def _set_cursor_prefs(self, event, **kwargs):
if event.key == "w":
self._display_coords_index += 1
if self._display_coords_index + 1 > len(self._all_coords):
self._display_coords_index = -1
def _hide_parent_artists(self):
# Turn off spines and current axes
for s in self.spines.values():
s.set_visible(False)
self.xaxis.set_visible(False)
if self.frame_class is not RectangularFrame1D:
self.yaxis.set_visible(False)
# We now overload ``imshow`` because we need to make sure that origin is
# set to ``lower`` for all images, which means that we need to flip RGB
# images.
def imshow(self, X, *args, **kwargs):
"""
Wrapper to Matplotlib's :meth:`~matplotlib.axes.Axes.imshow`.
If an RGB image is passed as a PIL object, it will be flipped
vertically and ``origin`` will be set to ``lower``, since WCS
transformations - like FITS files - assume that the origin is the lower
left pixel of the image (whereas RGB images have the origin in the top
left).
All arguments are passed to :meth:`~matplotlib.axes.Axes.imshow`.
"""
origin = kwargs.pop("origin", "lower")
# plt.imshow passes origin as None, which we should default to lower.
if origin is None:
origin = "lower"
elif origin == "upper":
raise ValueError("Cannot use images with origin='upper' in WCSAxes.")
if HAS_PIL:
from PIL.Image import Image
if minversion("PIL", "9.1"):
from PIL.Image import Transpose
FLIP_TOP_BOTTOM = Transpose.FLIP_TOP_BOTTOM
else:
from PIL.Image import FLIP_TOP_BOTTOM
if isinstance(X, Image) or hasattr(X, "getpixel"):
X = X.transpose(FLIP_TOP_BOTTOM)
return super().imshow(X, *args, origin=origin, **kwargs)
def contour(self, *args, **kwargs):
"""
Plot contours.
This is a custom implementation of :meth:`~matplotlib.axes.Axes.contour`
which applies the transform (if specified) to all contours in one go for
performance rather than to each contour line individually. All
positional and keyword arguments are the same as for
:meth:`~matplotlib.axes.Axes.contour`.
"""
# In Matplotlib, when calling contour() with a transform, each
# individual path in the contour map is transformed separately. However,
# this is much too slow for us since each call to the transforms results
# in an Astropy coordinate transformation, which has a non-negligible
# overhead - therefore a better approach is to override contour(), call
# the Matplotlib one with no transform, then apply the transform in one
# go to all the segments that make up the contour map.
transform = kwargs.pop("transform", None)
cset = super().contour(*args, **kwargs)
if transform is not None:
# The transform passed to self.contour will normally include
# a transData component at the end, but we can remove that since
# we are already working in data space.
transform = transform - self.transData
transform_contour_set_inplace(cset, transform)
return cset
def contourf(self, *args, **kwargs):
"""
Plot filled contours.
This is a custom implementation of :meth:`~matplotlib.axes.Axes.contourf`
which applies the transform (if specified) to all contours in one go for
performance rather than to each contour line individually. All
positional and keyword arguments are the same as for
:meth:`~matplotlib.axes.Axes.contourf`.
"""
# See notes for contour above.
transform = kwargs.pop("transform", None)
cset = super().contourf(*args, **kwargs)
if transform is not None:
# The transform passed to self.contour will normally include
# a transData component at the end, but we can remove that since
# we are already working in data space.
transform = transform - self.transData
transform_contour_set_inplace(cset, transform)
return cset
def _transform_plot_args(self, *args, **kwargs):
"""
Apply transformations to arguments to ``plot_coord`` and
``scatter_coord``.
"""
if isinstance(args[0], (SkyCoord, BaseCoordinateFrame)):
# Extract the frame from the first argument.
frame0 = args[0]
if isinstance(frame0, SkyCoord):
frame0 = frame0.frame
native_frame = self._transform_pixel2world.frame_out
# Transform to the native frame of the plot
frame0 = frame0.transform_to(native_frame)
plot_data = []
for coord in self.coords:
if coord.coord_type == "longitude":
plot_data.append(frame0.spherical.lon.to_value(coord.coord_unit))
elif coord.coord_type == "latitude":
plot_data.append(frame0.spherical.lat.to_value(coord.coord_unit))
else:
raise NotImplementedError(
"Coordinates cannot be plotted with this "
"method because the WCS does not represent longitude/latitude."
)
if "transform" in kwargs.keys():
raise TypeError(
"The 'transform' keyword argument is not allowed,"
" as it is automatically determined by the input coordinate frame."
)
transform = self.get_transform(native_frame)
kwargs.update({"transform": transform})
args = tuple(plot_data) + args[1:]
return args, kwargs
def plot_coord(self, *args, **kwargs):
"""
Plot `~astropy.coordinates.SkyCoord` or
`~astropy.coordinates.BaseCoordinateFrame` objects onto the axes.
The first argument to
:meth:`~astropy.visualization.wcsaxes.WCSAxes.plot_coord` should be a
coordinate, which will then be converted to the first two parameters to
`matplotlib.axes.Axes.plot`. All other arguments are the same as
`matplotlib.axes.Axes.plot`. If not specified a ``transform`` keyword
argument will be created based on the coordinate.
Parameters
----------
coordinate : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The coordinate object to plot on the axes. This is converted to the
first two arguments to `matplotlib.axes.Axes.plot`.
See Also
--------
matplotlib.axes.Axes.plot :
This method is called from this function with all arguments passed to it.
"""
args, kwargs = self._transform_plot_args(*args, **kwargs)
return super().plot(*args, **kwargs)
def scatter_coord(self, *args, **kwargs):
"""
Scatter `~astropy.coordinates.SkyCoord` or
`~astropy.coordinates.BaseCoordinateFrame` objects onto the axes.
The first argument to
:meth:`~astropy.visualization.wcsaxes.WCSAxes.scatter_coord` should be a
coordinate, which will then be converted to the first two parameters to
`matplotlib.axes.Axes.scatter`. All other arguments are the same as
`matplotlib.axes.Axes.scatter`. If not specified a ``transform``
keyword argument will be created based on the coordinate.
Parameters
----------
coordinate : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The coordinate object to scatter on the axes. This is converted to
the first two arguments to `matplotlib.axes.Axes.scatter`.
See Also
--------
matplotlib.axes.Axes.scatter : This method is called from this function with all arguments passed to it.
"""
args, kwargs = self._transform_plot_args(*args, **kwargs)
return super().scatter(*args, **kwargs)
def reset_wcs(self, wcs=None, slices=None, transform=None, coord_meta=None):
"""
Reset the current Axes, to use a new WCS object.
"""
# Here determine all the coordinate axes that should be shown.
if wcs is None and transform is None:
self.wcs = IDENTITY
else:
# We now force call 'set', which ensures the WCS object is
# consistent, which will only be important if the WCS has been set
# by hand. For example if the user sets a celestial WCS by hand and
# forgets to set the units, WCS.wcs.set() will do this.
if wcs is not None:
# Check if the WCS object is an instance of `astropy.wcs.WCS`
# This check is necessary as only `astropy.wcs.WCS` supports
# wcs.set() method
if isinstance(wcs, WCS):
wcs.wcs.set()
if isinstance(wcs, BaseHighLevelWCS):
wcs = wcs.low_level_wcs
self.wcs = wcs
# If we are making a new WCS, we need to preserve the path object since
# it may already be used by objects that have been plotted, and we need
# to continue updating it. CoordinatesMap will create a new frame
# instance, but we can tell that instance to keep using the old path.
if hasattr(self, "coords"):
previous_frame = {
"path": self.coords.frame._path,
"color": self.coords.frame.get_color(),
"linewidth": self.coords.frame.get_linewidth(),
}
else:
previous_frame = {"path": None}
if self.wcs is not None:
transform, coord_meta = transform_coord_meta_from_wcs(
self.wcs, self.frame_class, slices=slices
)
self.coords = CoordinatesMap(
self,
transform=transform,
coord_meta=coord_meta,
frame_class=self.frame_class,
previous_frame_path=previous_frame["path"],
)
self._transform_pixel2world = transform
if previous_frame["path"] is not None:
self.coords.frame.set_color(previous_frame["color"])
self.coords.frame.set_linewidth(previous_frame["linewidth"])
self._all_coords = [self.coords]
# Common default settings for Rectangular Frame
for ind, pos in enumerate(
coord_meta.get("default_axislabel_position", ["b", "l"])
):
self.coords[ind].set_axislabel_position(pos)
for ind, pos in enumerate(
coord_meta.get("default_ticklabel_position", ["b", "l"])
):
self.coords[ind].set_ticklabel_position(pos)
for ind, pos in enumerate(
coord_meta.get("default_ticks_position", ["bltr", "bltr"])
):
self.coords[ind].set_ticks_position(pos)
if rcParams["axes.grid"]:
self.grid()
def draw_wcsaxes(self, renderer):
if not self.axison:
return
# Here need to find out range of all coordinates, and update range for
# each coordinate axis. For now, just assume it covers the whole sky.
self._bboxes = []
# This generates a structure like [coords][axis] = [...]
ticklabels_bbox = defaultdict(partial(defaultdict, list))
visible_ticks = []
for coords in self._all_coords:
# Draw grids
coords.frame.update()
for coord in coords:
coord._draw_grid(renderer)
for coords in self._all_coords:
# Draw tick labels
for coord in coords:
coord._draw_ticks(
renderer,
bboxes=self._bboxes,
ticklabels_bbox=ticklabels_bbox[coord],
)
visible_ticks.extend(coord.ticklabels.get_visible_axes())
for coords in self._all_coords:
# Draw axis labels
for coord in coords:
coord._draw_axislabels(
renderer,
bboxes=self._bboxes,
ticklabels_bbox=ticklabels_bbox,
visible_ticks=visible_ticks,
)
self.coords.frame.draw(renderer)
def draw(self, renderer, **kwargs):
"""Draw the axes."""
# Before we do any drawing, we need to remove any existing grid lines
# drawn with contours, otherwise if we try and remove the contours
# part way through drawing, we end up with the issue mentioned in
# https://github.com/astropy/astropy/issues/12446
for coords in self._all_coords:
for coord in coords:
coord._clear_grid_contour()
# In Axes.draw, the following code can result in the xlim and ylim
# values changing, so we need to force call this here to make sure that
# the limits are correct before we update the patch.
locator = self.get_axes_locator()
if locator:
pos = locator(self, renderer)
self.apply_aspect(pos)
else:
self.apply_aspect()
if self._axisbelow is True:
self._wcsaxesartist.set_zorder(0.5)
elif self._axisbelow is False:
self._wcsaxesartist.set_zorder(2.5)
else:
# 'line': above patches, below lines
self._wcsaxesartist.set_zorder(1.5)
# We need to make sure that that frame path is up to date
self.coords.frame._update_patch_path()
super().draw(renderer, **kwargs)
self._drawn = True
# Matplotlib internally sometimes calls set_xlabel(label=...).
def set_xlabel(self, xlabel=None, labelpad=1, loc=None, **kwargs):
"""Set x-label."""
if xlabel is None:
xlabel = kwargs.pop("label", None)
if xlabel is None:
raise TypeError(
"set_xlabel() missing 1 required positional argument: 'xlabel'"
)
for coord in self.coords:
if (
"b" in coord.axislabels.get_visible_axes()
or "h" in coord.axislabels.get_visible_axes()
):
coord.set_axislabel(xlabel, minpad=labelpad, **kwargs)
break
def set_ylabel(self, ylabel=None, labelpad=1, loc=None, **kwargs):
"""Set y-label."""
if ylabel is None:
ylabel = kwargs.pop("label", None)
if ylabel is None:
raise TypeError(
"set_ylabel() missing 1 required positional argument: 'ylabel'"
)
if self.frame_class is RectangularFrame1D:
return super().set_ylabel(ylabel, labelpad=labelpad, **kwargs)
for coord in self.coords:
if (
"l" in coord.axislabels.get_visible_axes()
or "c" in coord.axislabels.get_visible_axes()
):
coord.set_axislabel(ylabel, minpad=labelpad, **kwargs)
break
def get_xlabel(self):
for coord in self.coords:
if (
"b" in coord.axislabels.get_visible_axes()
or "h" in coord.axislabels.get_visible_axes()
):
return coord.get_axislabel()
def get_ylabel(self):
if self.frame_class is RectangularFrame1D:
return super().get_ylabel()
for coord in self.coords:
if (
"l" in coord.axislabels.get_visible_axes()
or "c" in coord.axislabels.get_visible_axes()
):
return coord.get_axislabel()
def get_coords_overlay(self, frame, coord_meta=None):
"""Get coordinates overlay on given frame.
Parameters
----------
frame : str, `~astropy.coordinates.BaseCoordinateFrame`
Frame to get overlay for. If a string must correspond to
one of the coordinate frames registered in the astropy
frame transform graph.
coord_meta : dict
Metadata for the coordinates overlay.
Returns
-------
overlay : `~astropy.visualization.wcsaxes.CoordinatesMap`
Coordinates overlay.
"""
# Here we can't use get_transform because that deals with
# pixel-to-pixel transformations when passing a WCS object.
if isinstance(frame, WCS):
transform, coord_meta = transform_coord_meta_from_wcs(
frame, self.frame_class
)
else:
transform = self._get_transform_no_transdata(frame)
if coord_meta is None:
coord_meta = get_coord_meta(frame)
coords = CoordinatesMap(
self,
transform=transform,
coord_meta=coord_meta,
frame_class=self.frame_class,
)
self._all_coords.append(coords)
# Common settings for overlay
coords[0].set_axislabel_position("t")
coords[1].set_axislabel_position("r")
coords[0].set_ticklabel_position("t")
coords[1].set_ticklabel_position("r")
self.overlay_coords = coords
return coords
def get_transform(self, frame):
"""
Return a transform from the specified frame to display coordinates.
This does not include the transData transformation
Parameters
----------
frame : :class:`~astropy.wcs.WCS` or :class:`~matplotlib.transforms.Transform` or str
The ``frame`` parameter can have several possible types:
* :class:`~astropy.wcs.WCS` instance: assumed to be a
transformation from pixel to world coordinates, where the
world coordinates are the same as those in the WCS
transformation used for this ``WCSAxes`` instance. This is
used for example to show contours, since this involves
plotting an array in pixel coordinates that are not the
final data coordinate and have to be transformed to the
common world coordinate system first.
* :class:`~matplotlib.transforms.Transform` instance: it is
assumed to be a transform to the world coordinates that are
part of the WCS used to instantiate this ``WCSAxes``
instance.
* ``'pixel'`` or ``'world'``: return a transformation that
allows users to plot in pixel/data coordinates (essentially
an identity transform) and ``world`` (the default
world-to-pixel transformation used to instantiate the
``WCSAxes`` instance).
* ``'fk5'`` or ``'galactic'``: return a transformation from
the specified frame to the pixel/data coordinates.
* :class:`~astropy.coordinates.BaseCoordinateFrame` instance.
"""
return self._get_transform_no_transdata(frame).inverted() + self.transData
def _get_transform_no_transdata(self, frame):
"""
Return a transform from data to the specified frame.
"""
if isinstance(frame, (BaseLowLevelWCS, BaseHighLevelWCS)):
if isinstance(frame, BaseHighLevelWCS):
frame = frame.low_level_wcs
transform, coord_meta = transform_coord_meta_from_wcs(
frame, self.frame_class
)
transform_world2pixel = transform.inverted()
if self._transform_pixel2world.frame_out == transform_world2pixel.frame_in:
return self._transform_pixel2world + transform_world2pixel
else:
return (
self._transform_pixel2world
+ CoordinateTransform(
self._transform_pixel2world.frame_out,
transform_world2pixel.frame_in,
)
+ transform_world2pixel
)
elif isinstance(frame, str) and frame == "pixel":
return Affine2D()
elif isinstance(frame, Transform):
return self._transform_pixel2world + frame
else:
if isinstance(frame, str) and frame == "world":
return self._transform_pixel2world
else:
coordinate_transform = CoordinateTransform(
self._transform_pixel2world.frame_out, frame
)
if coordinate_transform.same_frames:
return self._transform_pixel2world
else:
return self._transform_pixel2world + coordinate_transform
def get_tightbbox(self, renderer, *args, **kwargs):
# FIXME: we should determine what to do with the extra arguments here.
# Note that the expected signature of this method is different in
# Matplotlib 3.x compared to 2.x, but we only support 3.x now.
if not self.get_visible():
return
# Do a draw to populate the self._bboxes list
self.draw_wcsaxes(renderer)
bb = [b for b in self._bboxes if b and (b.width != 0 or b.height != 0)]
bb.append(super().get_tightbbox(renderer, *args, **kwargs))
if bb:
_bbox = Bbox.union(bb)
return _bbox
else:
return self.get_window_extent(renderer)
def grid(self, b=None, axis="both", *, which="major", **kwargs):
"""
Plot gridlines for both coordinates.
Standard matplotlib appearance options (color, alpha, etc.) can be
passed as keyword arguments. This behaves like `matplotlib.axes.Axes`
except that if no arguments are specified, the grid is shown rather
than toggled.
Parameters
----------
b : bool
Whether to show the gridlines.
axis : 'both', 'x', 'y'
Which axis to turn the gridlines on/off for.
which : str
Currently only ``'major'`` is supported.
"""
if not hasattr(self, "coords"):
return
if which != "major":
raise NotImplementedError(
"Plotting the grid for the minor ticks is not supported."
)
if axis == "both":
self.coords.grid(draw_grid=b, **kwargs)
elif axis == "x":
self.coords[0].grid(draw_grid=b, **kwargs)
elif axis == "y":
self.coords[1].grid(draw_grid=b, **kwargs)
else:
raise ValueError("axis should be one of x/y/both")
def tick_params(self, axis="both", **kwargs):
"""
Method to set the tick and tick label parameters in the same way as the
:meth:`~matplotlib.axes.Axes.tick_params` method in Matplotlib.
This is provided for convenience, but the recommended API is to use
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticks`,
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticklabel`,
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticks_position`,
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticklabel_position`,
and :meth:`~astropy.visualization.wcsaxes.CoordinateHelper.grid`.
Parameters
----------
axis : int or str, optional
Which axis to apply the parameters to. This defaults to 'both'
but this can also be set to an `int` or `str` that refers to the
axis to apply it to, following the valid values that can index
``ax.coords``. Note that ``'x'`` and ``'y``' are also accepted in
the case of rectangular axes.
which : {'both', 'major', 'minor'}, optional
Which ticks to apply the settings to. By default, setting are
applied to both major and minor ticks. Note that if ``'minor'`` is
specified, only the length of the ticks can be set currently.
direction : {'in', 'out'}, optional
Puts ticks inside the axes, or outside the axes.
length : float, optional
Tick length in points.
width : float, optional
Tick width in points.
color : color, optional
Tick color (accepts any valid Matplotlib color)
pad : float, optional
Distance in points between tick and label.
labelsize : float or str, optional
Tick label font size in points or as a string (e.g., 'large').
labelcolor : color, optional
Tick label color (accepts any valid Matplotlib color)
colors : color, optional
Changes the tick color and the label color to the same value
(accepts any valid Matplotlib color).
bottom, top, left, right : bool, optional
Where to draw the ticks. Note that this can only be given if a
specific coordinate is specified via the ``axis`` argument, and it
will not work correctly if the frame is not rectangular.
labelbottom, labeltop, labelleft, labelright : bool, optional
Where to draw the tick labels. Note that this can only be given if a
specific coordinate is specified via the ``axis`` argument, and it
will not work correctly if the frame is not rectangular.
grid_color : color, optional
The color of the grid lines (accepts any valid Matplotlib color).
grid_alpha : float, optional
Transparency of grid lines: 0 (transparent) to 1 (opaque).
grid_linewidth : float, optional
Width of grid lines in points.
grid_linestyle : str, optional
The style of the grid lines (accepts any valid Matplotlib line
style).
"""
if not hasattr(self, "coords"):
# Axes haven't been fully initialized yet, so just ignore, as
# Axes.__init__ calls this method
return
if axis == "both":
for pos in ("bottom", "left", "top", "right"):
if pos in kwargs:
raise ValueError(f"Cannot specify {pos}= when axis='both'")
if "label" + pos in kwargs:
raise ValueError(f"Cannot specify label{pos}= when axis='both'")
for coord in self.coords:
coord.tick_params(**kwargs)
elif axis in self.coords:
self.coords[axis].tick_params(**kwargs)
elif axis in ("x", "y") and self.frame_class is RectangularFrame:
spine = "b" if axis == "x" else "l"
for coord in self.coords:
if spine in coord.axislabels.get_visible_axes():
coord.tick_params(**kwargs)
# In the following, we put the generated subplot class in a temporary class and
# we then inherit it - if we don't do this, the generated class appears to
# belong in matplotlib, not in WCSAxes, from the API's point of view.
class WCSAxesSubplot(subplot_class_factory(WCSAxes)):
"""
A subclass class for WCSAxes.
"""
pass
|
0b77e7efaa298a4e22cbde0bada8ffcdd36ce3cb30e475c678da1c99c980a0e9 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from astropy import units as u
from astropy.coordinates import BaseCoordinateFrame, UnitSphericalRepresentation
__all__ = [
"select_step_degree",
"select_step_hour",
"select_step_scalar",
"transform_contour_set_inplace",
]
def select_step_degree(dv):
# Modified from axis_artist, supports astropy.units
if dv > 1.0 * u.arcsec:
degree_limits_ = [1.5, 3, 7, 13, 20, 40, 70, 120, 270, 520]
degree_steps_ = [1, 2, 5, 10, 15, 30, 45, 90, 180, 360]
degree_units = [u.degree] * len(degree_steps_)
minsec_limits_ = [1.5, 2.5, 3.5, 8, 11, 18, 25, 45]
minsec_steps_ = [1, 2, 3, 5, 10, 15, 20, 30]
minute_limits_ = np.array(minsec_limits_) / 60.0
minute_units = [u.arcmin] * len(minute_limits_)
second_limits_ = np.array(minsec_limits_) / 3600.0
second_units = [u.arcsec] * len(second_limits_)
degree_limits = np.concatenate([second_limits_, minute_limits_, degree_limits_])
degree_steps = minsec_steps_ + minsec_steps_ + degree_steps_
degree_units = second_units + minute_units + degree_units
n = degree_limits.searchsorted(dv.to(u.degree))
step = degree_steps[n]
unit = degree_units[n]
return step * unit
else:
return select_step_scalar(dv.to_value(u.arcsec)) * u.arcsec
def select_step_hour(dv):
if dv > 15.0 * u.arcsec:
hour_limits_ = [1.5, 2.5, 3.5, 5, 7, 10, 15, 21, 36]
hour_steps_ = [1, 2, 3, 4, 6, 8, 12, 18, 24]
hour_units = [u.hourangle] * len(hour_steps_)
minsec_limits_ = [1.5, 2.5, 3.5, 4.5, 5.5, 8, 11, 14, 18, 25, 45]
minsec_steps_ = [1, 2, 3, 4, 5, 6, 10, 12, 15, 20, 30]
minute_limits_ = np.array(minsec_limits_) / 60.0
minute_units = [15.0 * u.arcmin] * len(minute_limits_)
second_limits_ = np.array(minsec_limits_) / 3600.0
second_units = [15.0 * u.arcsec] * len(second_limits_)
hour_limits = np.concatenate([second_limits_, minute_limits_, hour_limits_])
hour_steps = minsec_steps_ + minsec_steps_ + hour_steps_
hour_units = second_units + minute_units + hour_units
n = hour_limits.searchsorted(dv.to(u.hourangle))
step = hour_steps[n]
unit = hour_units[n]
return step * unit
else:
return select_step_scalar(dv.to_value(15.0 * u.arcsec)) * (15.0 * u.arcsec)
def select_step_scalar(dv):
log10_dv = np.log10(dv)
base = np.floor(log10_dv)
frac = log10_dv - base
steps = np.log10([1, 2, 5, 10])
imin = np.argmin(np.abs(frac - steps))
return 10.0 ** (base + steps[imin])
def get_coord_meta(frame):
coord_meta = {}
coord_meta["type"] = ("longitude", "latitude")
from astropy.coordinates import frame_transform_graph
if isinstance(frame, str):
initial_frame = frame
frame = frame_transform_graph.lookup_name(frame)
if frame is None:
raise ValueError(f"Unknown frame: {initial_frame}")
if not isinstance(frame, BaseCoordinateFrame):
frame = frame()
names = list(frame.representation_component_names.keys())
coord_meta["name"] = names[:2]
# Add dummy data to the frame to determine the longitude wrap angle and the units
frame = frame.realize_frame(UnitSphericalRepresentation(0 * u.deg, 0 * u.deg))
coord_meta["wrap"] = (frame.spherical.lon.wrap_angle, None)
coord_meta["unit"] = (frame.spherical.lon.unit, frame.spherical.lat.unit)
return coord_meta
def transform_contour_set_inplace(cset, transform):
"""
Transform a contour set in-place using a specified
:class:`matplotlib.transform.Transform`.
Using transforms with the native Matplotlib contour/contourf can be slow if
the transforms have a non-negligible overhead (which is the case for
WCS/SkyCoord transforms) since the transform is called for each individual
contour line. It is more efficient to stack all the contour lines together
temporarily and transform them in one go.
"""
# The contours are represented as paths grouped into levels. Each can have
# one or more paths. The approach we take here is to stack the vertices of
# all paths and transform them in one go. The pos_level list helps us keep
# track of where the set of segments for each overall contour level ends.
# The pos_segments list helps us keep track of where each segmnt ends for
# each contour level.
all_paths = []
pos_level = []
pos_segments = []
for collection in cset.collections:
paths = collection.get_paths()
if len(paths) == 0:
continue
all_paths.append(paths)
# The last item in pos isn't needed for np.split and in fact causes
# issues if we keep it because it will cause an extra empty array to be
# returned.
pos = np.cumsum([len(x) for x in paths])
pos_segments.append(pos[:-1])
pos_level.append(pos[-1])
# As above the last item isn't needed
pos_level = np.cumsum(pos_level)[:-1]
# Stack all the segments into a single (n, 2) array
vertices = [path.vertices for paths in all_paths for path in paths]
if len(vertices) > 0:
vertices = np.concatenate(vertices)
else:
return
# Transform all coordinates in one go
vertices = transform.transform(vertices)
# Split up into levels again
vertices = np.split(vertices, pos_level)
# Now re-populate the segments in the line collections
for ilevel, vert in enumerate(vertices):
vert = np.split(vert, pos_segments[ilevel])
for iseg, ivert in enumerate(vert):
all_paths[ilevel][iseg].vertices = ivert
|
66b266e951dd8c45576ea5b4e47211a6630a17428bc818c578ce2b9f79634071 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
import numpy as np
from astropy import units as u
from astropy.utils.exceptions import AstropyDeprecationWarning
# Algorithm inspired by PGSBOX from WCSLIB by M. Calabretta
LONLAT = {"longitude", "latitude"}
def wrap_180(values):
values_new = values % 360.0
with np.errstate(invalid="ignore"):
values_new[values_new > 180.0] -= 360
return values_new
def find_coordinate_range(transform, extent, coord_types, coord_units, coord_wraps):
"""
Find the range of coordinates to use for ticks/grids.
Parameters
----------
transform : func
Function to transform pixel to world coordinates. Should take two
values (the pixel coordinates) and return two values (the world
coordinates).
extent : iterable
The range of the image viewport in pixel coordinates, given as [xmin,
xmax, ymin, ymax].
coord_types : list of str
Whether each coordinate is a ``'longitude'``, ``'latitude'``, or
``'scalar'`` value.
coord_units : list of `astropy.units.Unit`
The units for each coordinate.
coord_wraps : list of `astropy.units.Quantity`
The wrap angles for longitudes.
"""
# Sample coordinates on a NX x NY grid.
from . import conf
if len(extent) == 4:
nx = ny = conf.coordinate_range_samples
x = np.linspace(extent[0], extent[1], nx + 1)
y = np.linspace(extent[2], extent[3], ny + 1)
xp, yp = np.meshgrid(x, y)
with np.errstate(invalid="ignore"):
world = transform.transform(np.vstack([xp.ravel(), yp.ravel()]).transpose())
else:
nx = conf.coordinate_range_samples
xp = np.linspace(extent[0], extent[1], nx + 1)[None]
with np.errstate(invalid="ignore"):
world = transform.transform(xp.T)
ranges = []
for coord_index, coord_type in enumerate(coord_types):
xw = world[:, coord_index].reshape(xp.shape)
if coord_type in LONLAT:
unit = coord_units[coord_index]
xw = xw * unit.to(u.deg)
# Iron out coordinates along first row
wjump = xw[0, 1:] - xw[0, :-1]
with np.errstate(invalid="ignore"):
reset = np.abs(wjump) > 180.0
if np.any(reset):
wjump = wjump + np.sign(wjump) * 180.0
wjump = 360.0 * np.trunc(wjump / 360.0)
xw[0, 1:][reset] -= wjump[reset]
# Now iron out coordinates along all columns, starting with first row.
wjump = xw[1:] - xw[:1]
with np.errstate(invalid="ignore"):
reset = np.abs(wjump) > 180.0
if np.any(reset):
wjump = wjump + np.sign(wjump) * 180.0
wjump = 360.0 * np.trunc(wjump / 360.0)
xw[1:][reset] -= wjump[reset]
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
xw_min = np.nanmin(xw)
xw_max = np.nanmax(xw)
# Check if range is smaller when normalizing to the range 0 to 360
if coord_type in LONLAT:
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
xw_min_check = np.nanmin(xw % 360.0)
xw_max_check = np.nanmax(xw % 360.0)
if xw_max_check - xw_min_check <= xw_max - xw_min < 360.0:
xw_min = xw_min_check
xw_max = xw_max_check
# Check if range is smaller when normalizing to the range -180 to 180
if coord_type in LONLAT:
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
xw_min_check = np.nanmin(wrap_180(xw))
xw_max_check = np.nanmax(wrap_180(xw))
if (
xw_max_check - xw_min_check < 360.0
and xw_max - xw_min >= xw_max_check - xw_min_check
):
xw_min = xw_min_check
xw_max = xw_max_check
x_range = xw_max - xw_min
if coord_type == "longitude":
if x_range > 300.0:
coord_wrap = coord_wraps[coord_index]
if not isinstance(coord_wrap, u.Quantity):
warnings.warn(
"Passing 'coord_wraps' as numbers is deprecated. Use a Quantity with units convertible to angular degrees instead.",
AstropyDeprecationWarning,
)
coord_wrap = coord_wrap * u.deg
xw_min = coord_wrap.to_value(u.deg) - 360
xw_max = coord_wrap.to_value(u.deg) - np.spacing(360.0)
elif xw_min < 0.0:
xw_min = max(-180.0, xw_min - 0.1 * x_range)
xw_max = min(+180.0, xw_max + 0.1 * x_range)
else:
xw_min = max(0.0, xw_min - 0.1 * x_range)
xw_max = min(360.0, xw_max + 0.1 * x_range)
elif coord_type == "latitude":
xw_min = max(-90.0, xw_min - 0.1 * x_range)
xw_max = min(+90.0, xw_max + 0.1 * x_range)
if coord_type in LONLAT:
xw_min *= u.deg.to(unit)
xw_max *= u.deg.to(unit)
ranges.append((xw_min, xw_max))
return ranges
|
4ee977ecdf52288562631692e71ce0cd24d508625e961f7b0b42ff49065fa098 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import abc
import warnings
from collections import OrderedDict
import numpy as np
from matplotlib import rcParams
from matplotlib.lines import Line2D, Path
from matplotlib.patches import PathPatch
from astropy.utils.exceptions import AstropyDeprecationWarning
__all__ = [
"RectangularFrame1D",
"Spine",
"BaseFrame",
"RectangularFrame",
"EllipticalFrame",
]
class Spine:
"""
A single side of an axes.
This does not need to be a straight line, but represents a 'side' when
determining which part of the frame to put labels and ticks on.
Parameters
----------
parent_axes : `~astropy.visualization.wcsaxes.WCSAxes`
The parent axes
transform : `~matplotlib.transforms.Transform`
The transform from data to world
data_func : callable
If not ``None``, it should be a function that returns the appropriate spine
data when called with this object as the sole argument. If ``None``, the
spine data must be manually updated in ``update_spines()``.
"""
def __init__(self, parent_axes, transform, *, data_func=None):
self.parent_axes = parent_axes
self.transform = transform
self.data_func = data_func
self._data = None
self._world = None
@property
def data(self):
if self._data is None and self.data_func:
self.data = self.data_func(self)
return self._data
@data.setter
def data(self, value):
self._data = value
if value is None:
self._world = None
else:
with np.errstate(invalid="ignore"):
self._world = self.transform.transform(self._data)
self._update_normal()
def _get_pixel(self):
return self.parent_axes.transData.transform(self._data)
@property
def pixel(self):
warnings.warn(
"Pixel coordinates cannot be accurately calculated unless "
"Matplotlib is currently drawing a figure, so the .pixel "
"attribute is deprecated and will be removed in a future "
"astropy release.",
AstropyDeprecationWarning,
)
return self._get_pixel()
@pixel.setter
def pixel(self, value):
warnings.warn(
"Manually setting pixel values of a Spine can lead to incorrect results "
"as these can only be accurately calculated when Matplotlib is drawing "
"a figure. As such the .pixel setter now does nothing, is deprecated, "
"and will be removed in a future astropy release.",
AstropyDeprecationWarning,
)
@property
def world(self):
return self._world
@world.setter
def world(self, value):
self._world = value
if value is None:
self._data = None
self._pixel = None
else:
self._data = self.transform.transform(value)
self._pixel = self.parent_axes.transData.transform(self._data)
self._update_normal()
def _update_normal(self):
pixel = self._get_pixel()
# Find angle normal to border and inwards, in display coordinate
dx = pixel[1:, 0] - pixel[:-1, 0]
dy = pixel[1:, 1] - pixel[:-1, 1]
self.normal_angle = np.degrees(np.arctan2(dx, -dy))
def _halfway_x_y_angle(self):
"""
Return the x, y, normal_angle values halfway along the spine.
"""
pixel = self._get_pixel()
x_disp, y_disp = pixel[:, 0], pixel[:, 1]
# Get distance along the path
d = np.hstack(
[0.0, np.cumsum(np.sqrt(np.diff(x_disp) ** 2 + np.diff(y_disp) ** 2))]
)
xcen = np.interp(d[-1] / 2.0, d, x_disp)
ycen = np.interp(d[-1] / 2.0, d, y_disp)
# Find segment along which the mid-point lies
imin = np.searchsorted(d, d[-1] / 2.0) - 1
# Find normal of the axis label facing outwards on that segment
normal_angle = self.normal_angle[imin] + 180.0
return xcen, ycen, normal_angle
class SpineXAligned(Spine):
"""
A single side of an axes, aligned with the X data axis.
This does not need to be a straight line, but represents a 'side' when
determining which part of the frame to put labels and ticks on.
"""
@property
def data(self):
return self._data
@data.setter
def data(self, value):
self._data = value
if value is None:
self._world = None
else:
with np.errstate(invalid="ignore"):
self._world = self.transform.transform(self._data[:, 0:1])
self._update_normal()
class BaseFrame(OrderedDict, metaclass=abc.ABCMeta):
"""
Base class for frames, which are collections of
:class:`~astropy.visualization.wcsaxes.frame.Spine` instances.
"""
spine_class = Spine
def __init__(self, parent_axes, transform, path=None):
super().__init__()
self.parent_axes = parent_axes
self._transform = transform
self._linewidth = rcParams["axes.linewidth"]
self._color = rcParams["axes.edgecolor"]
self._path = path
for axis in self.spine_names:
self[axis] = self.spine_class(parent_axes, transform)
@property
def origin(self):
ymin, ymax = self.parent_axes.get_ylim()
return "lower" if ymin < ymax else "upper"
@property
def transform(self):
return self._transform
@transform.setter
def transform(self, value):
self._transform = value
for axis in self:
self[axis].transform = value
def _update_patch_path(self):
self.update_spines()
x, y = [], []
for axis in self.spine_names:
x.append(self[axis].data[:, 0])
y.append(self[axis].data[:, 1])
vertices = np.vstack([np.hstack(x), np.hstack(y)]).transpose()
if self._path is None:
self._path = Path(vertices)
else:
self._path.vertices = vertices
@property
def patch(self):
self._update_patch_path()
return PathPatch(
self._path,
transform=self.parent_axes.transData,
facecolor=rcParams["axes.facecolor"],
edgecolor="white",
)
def draw(self, renderer):
for axis in self:
pixel = self[axis]._get_pixel()
x, y = pixel[:, 0], pixel[:, 1]
line = Line2D(
x, y, linewidth=self._linewidth, color=self._color, zorder=1000
)
line.draw(renderer)
def sample(self, n_samples):
self.update_spines()
spines = OrderedDict()
for axis in self:
data = self[axis].data
spines[axis] = self.spine_class(self.parent_axes, self.transform)
if data.size > 0:
p = np.linspace(0.0, 1.0, data.shape[0])
p_new = np.linspace(0.0, 1.0, n_samples)
spines[axis].data = np.array(
[np.interp(p_new, p, d) for d in data.T]
).transpose()
else:
spines[axis].data = data
return spines
def set_color(self, color):
"""
Sets the color of the frame.
Parameters
----------
color : str
The color of the frame.
"""
self._color = color
def get_color(self):
return self._color
def set_linewidth(self, linewidth):
"""
Sets the linewidth of the frame.
Parameters
----------
linewidth : float
The linewidth of the frame in points.
"""
self._linewidth = linewidth
def get_linewidth(self):
return self._linewidth
def update_spines(self):
for spine in self.values():
if spine.data_func:
spine.data = spine.data_func(spine)
class RectangularFrame1D(BaseFrame):
"""
A classic rectangular frame.
"""
spine_names = "bt"
spine_class = SpineXAligned
def update_spines(self):
xmin, xmax = self.parent_axes.get_xlim()
ymin, ymax = self.parent_axes.get_ylim()
self["b"].data = np.array(([xmin, ymin], [xmax, ymin]))
self["t"].data = np.array(([xmax, ymax], [xmin, ymax]))
super().update_spines()
def _update_patch_path(self):
self.update_spines()
xmin, xmax = self.parent_axes.get_xlim()
ymin, ymax = self.parent_axes.get_ylim()
x = [xmin, xmax, xmax, xmin, xmin]
y = [ymin, ymin, ymax, ymax, ymin]
vertices = np.vstack([np.hstack(x), np.hstack(y)]).transpose()
if self._path is None:
self._path = Path(vertices)
else:
self._path.vertices = vertices
def draw(self, renderer):
xmin, xmax = self.parent_axes.get_xlim()
ymin, ymax = self.parent_axes.get_ylim()
x = [xmin, xmax, xmax, xmin, xmin]
y = [ymin, ymin, ymax, ymax, ymin]
line = Line2D(
x,
y,
linewidth=self._linewidth,
color=self._color,
zorder=1000,
transform=self.parent_axes.transData,
)
line.draw(renderer)
class RectangularFrame(BaseFrame):
"""
A classic rectangular frame.
"""
spine_names = "brtl"
def update_spines(self):
xmin, xmax = self.parent_axes.get_xlim()
ymin, ymax = self.parent_axes.get_ylim()
self["b"].data = np.array(([xmin, ymin], [xmax, ymin]))
self["r"].data = np.array(([xmax, ymin], [xmax, ymax]))
self["t"].data = np.array(([xmax, ymax], [xmin, ymax]))
self["l"].data = np.array(([xmin, ymax], [xmin, ymin]))
super().update_spines()
class EllipticalFrame(BaseFrame):
"""
An elliptical frame.
"""
spine_names = "chv"
def update_spines(self):
xmin, xmax = self.parent_axes.get_xlim()
ymin, ymax = self.parent_axes.get_ylim()
xmid = 0.5 * (xmax + xmin)
ymid = 0.5 * (ymax + ymin)
dx = xmid - xmin
dy = ymid - ymin
theta = np.linspace(0.0, 2 * np.pi, 1000)
self["c"].data = np.array(
[xmid + dx * np.cos(theta), ymid + dy * np.sin(theta)]
).transpose()
self["h"].data = np.array(
[np.linspace(xmin, xmax, 1000), np.repeat(ymid, 1000)]
).transpose()
self["v"].data = np.array(
[np.repeat(xmid, 1000), np.linspace(ymin, ymax, 1000)]
).transpose()
super().update_spines()
def _update_patch_path(self):
"""Override path patch to include only the outer ellipse,
not the major and minor axes in the middle.
"""
self.update_spines()
vertices = self["c"].data
if self._path is None:
self._path = Path(vertices)
else:
self._path.vertices = vertices
def draw(self, renderer):
"""Override to draw only the outer ellipse,
not the major and minor axes in the middle.
FIXME: we may want to add a general method to give the user control
over which spines are drawn.
"""
axis = "c"
pixel = self[axis]._get_pixel()
line = Line2D(
pixel[:, 0],
pixel[:, 1],
linewidth=self._linewidth,
color=self._color,
zorder=1000,
)
line.draw(renderer)
|
14297e107c7e502879b60885309a57b73813620cb38f4d05a70871f464aeef93 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import io
import pytest
from astropy.utils.compat.optional_deps import HAS_PLT
if HAS_PLT:
import matplotlib.pyplot as plt
import numpy as np
from astropy import units as u
from astropy.coordinates import Angle
from astropy.visualization.units import quantity_support
def teardown_function(function):
plt.close("all")
@pytest.mark.skipif(not HAS_PLT, reason="requires matplotlib.pyplot")
def test_units():
plt.figure()
with quantity_support():
buff = io.BytesIO()
plt.plot([1, 2, 3] * u.m, [3, 4, 5] * u.kg, label="label")
plt.plot([105, 210, 315] * u.cm, [3050, 3025, 3010] * u.g)
plt.legend()
# Also test fill_between, which requires actual conversion to ndarray
# with numpy >=1.10 (#4654).
plt.fill_between([1, 3] * u.m, [3, 5] * u.kg, [3050, 3010] * u.g)
plt.savefig(buff, format="svg")
assert plt.gca().xaxis.get_units() == u.m
assert plt.gca().yaxis.get_units() == u.kg
@pytest.mark.skipif(not HAS_PLT, reason="requires matplotlib.pyplot")
def test_units_errbarr():
pytest.importorskip("matplotlib")
plt.figure()
with quantity_support():
x = [1, 2, 3] * u.s
y = [1, 2, 3] * u.m
yerr = [3, 2, 1] * u.cm
fig, ax = plt.subplots()
ax.errorbar(x, y, yerr=yerr)
assert ax.xaxis.get_units() == u.s
assert ax.yaxis.get_units() == u.m
@pytest.mark.skipif(not HAS_PLT, reason="requires matplotlib.pyplot")
def test_incompatible_units():
# NOTE: minversion check does not work properly for matplotlib dev.
try:
# https://github.com/matplotlib/matplotlib/pull/13005
from matplotlib.units import ConversionError
except ImportError:
err_type = u.UnitConversionError
else:
err_type = ConversionError
plt.figure()
with quantity_support():
plt.plot([1, 2, 3] * u.m)
with pytest.raises(err_type):
plt.plot([105, 210, 315] * u.kg)
@pytest.mark.skipif(not HAS_PLT, reason="requires matplotlib.pyplot")
def test_quantity_subclass():
"""Check that subclasses are recognized.
Also see https://github.com/matplotlib/matplotlib/pull/13536
"""
plt.figure()
with quantity_support():
plt.scatter(Angle([1, 2, 3], u.deg), [3, 4, 5] * u.kg)
plt.scatter([105, 210, 315] * u.arcsec, [3050, 3025, 3010] * u.g)
plt.plot(Angle([105, 210, 315], u.arcsec), [3050, 3025, 3010] * u.g)
assert plt.gca().xaxis.get_units() == u.deg
assert plt.gca().yaxis.get_units() == u.kg
@pytest.mark.skipif(not HAS_PLT, reason="requires matplotlib.pyplot")
def test_nested():
with quantity_support():
with quantity_support():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.scatter(Angle([1, 2, 3], u.deg), [3, 4, 5] * u.kg)
assert ax.xaxis.get_units() == u.deg
assert ax.yaxis.get_units() == u.kg
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.scatter(Angle([1, 2, 3], u.arcsec), [3, 4, 5] * u.pc)
assert ax.xaxis.get_units() == u.arcsec
assert ax.yaxis.get_units() == u.pc
@pytest.mark.skipif(not HAS_PLT, reason="requires matplotlib.pyplot")
def test_empty_hist():
with quantity_support():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.hist([1, 2, 3, 4] * u.mmag, bins=100)
# The second call results in an empty list being passed to the
# unit converter in matplotlib >= 3.1
ax.hist([] * u.mmag, bins=100)
@pytest.mark.skipif(not HAS_PLT, reason="requires matplotlib.pyplot")
def test_radian_formatter():
with quantity_support():
fig, ax = plt.subplots()
ax.plot([1, 2, 3], [1, 2, 3] * u.rad * np.pi)
fig.canvas.draw()
labels = [tl.get_text() for tl in ax.yaxis.get_ticklabels()]
assert labels == ["π/2", "π", "3π/2", "2π", "5π/2", "3π", "7π/2"]
|
73fe3705957f09504af7eec059154a0ede63b9e948e0adef6fbc831c6938f42a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
from numpy import ma
from numpy.testing import assert_allclose, assert_equal
from astropy.utils.compat.optional_deps import HAS_MATPLOTLIB, HAS_PLT
from astropy.visualization.interval import ManualInterval, PercentileInterval
from astropy.visualization.mpl_normalize import ImageNormalize, imshow_norm, simple_norm
from astropy.visualization.stretch import LogStretch, PowerStretch, SqrtStretch
DATA = np.linspace(0.0, 15.0, 6)
DATA2 = np.arange(3)
DATA2SCL = 0.5 * DATA2
DATA3 = np.linspace(-3.0, 3.0, 7)
STRETCHES = (SqrtStretch(), PowerStretch(0.5), LogStretch())
INVALID = (None, -np.inf, -1)
@pytest.mark.skipif(HAS_MATPLOTLIB, reason="matplotlib is installed")
def test_normalize_error_message():
with pytest.raises(
ImportError, match=r"matplotlib is required in order to use this class."
):
ImageNormalize()
@pytest.mark.skipif(not HAS_MATPLOTLIB, reason="requires matplotlib")
class TestNormalize:
def test_invalid_interval(self):
with pytest.raises(TypeError):
ImageNormalize(vmin=2.0, vmax=10.0, interval=ManualInterval, clip=True)
def test_invalid_stretch(self):
with pytest.raises(TypeError):
ImageNormalize(vmin=2.0, vmax=10.0, stretch=SqrtStretch, clip=True)
def test_stretch_none(self):
with pytest.raises(ValueError):
ImageNormalize(vmin=2.0, vmax=10.0, stretch=None)
def test_scalar(self):
norm = ImageNormalize(vmin=2.0, vmax=10.0, stretch=SqrtStretch(), clip=True)
norm2 = ImageNormalize(
data=6, interval=ManualInterval(2, 10), stretch=SqrtStretch(), clip=True
)
assert_allclose(norm(6), 0.70710678)
assert_allclose(norm(6), norm2(6))
def test_clip(self):
norm = ImageNormalize(vmin=2.0, vmax=10.0, stretch=SqrtStretch(), clip=True)
norm2 = ImageNormalize(
DATA, interval=ManualInterval(2, 10), stretch=SqrtStretch(), clip=True
)
output = norm(DATA)
expected = [0.0, 0.35355339, 0.70710678, 0.93541435, 1.0, 1.0]
assert_allclose(output, expected)
assert_allclose(output.mask, [0, 0, 0, 0, 0, 0])
assert_allclose(output, norm2(DATA))
def test_noclip(self):
norm = ImageNormalize(
vmin=2.0, vmax=10.0, stretch=SqrtStretch(), clip=False, invalid=None
)
norm2 = ImageNormalize(
DATA,
interval=ManualInterval(2, 10),
stretch=SqrtStretch(),
clip=False,
invalid=None,
)
output = norm(DATA)
expected = [np.nan, 0.35355339, 0.70710678, 0.93541435, 1.11803399, 1.27475488]
assert_allclose(output, expected)
assert_allclose(output.mask, [0, 0, 0, 0, 0, 0])
assert_allclose(norm.inverse(norm(DATA))[1:], DATA[1:])
assert_allclose(output, norm2(DATA))
def test_implicit_autoscale(self):
norm = ImageNormalize(vmin=None, vmax=10.0, stretch=SqrtStretch(), clip=False)
norm2 = ImageNormalize(
DATA, interval=ManualInterval(None, 10), stretch=SqrtStretch(), clip=False
)
output = norm(DATA)
assert norm.vmin == np.min(DATA)
assert norm.vmax == 10.0
assert_allclose(output, norm2(DATA))
norm = ImageNormalize(vmin=2.0, vmax=None, stretch=SqrtStretch(), clip=False)
norm2 = ImageNormalize(
DATA, interval=ManualInterval(2, None), stretch=SqrtStretch(), clip=False
)
output = norm(DATA)
assert norm.vmin == 2.0
assert norm.vmax == np.max(DATA)
assert_allclose(output, norm2(DATA))
def test_call_clip(self):
"""Test that the clip keyword is used when calling the object."""
data = np.arange(5)
norm = ImageNormalize(vmin=1.0, vmax=3.0, clip=False)
output = norm(data, clip=True)
assert_equal(output.data, [0, 0, 0.5, 1.0, 1.0])
assert np.all(~output.mask)
output = norm(data, clip=False)
assert_equal(output.data, [-0.5, 0, 0.5, 1.0, 1.5])
assert np.all(~output.mask)
def test_masked_clip(self):
mdata = ma.array(DATA, mask=[0, 0, 1, 0, 0, 0])
norm = ImageNormalize(vmin=2.0, vmax=10.0, stretch=SqrtStretch(), clip=True)
norm2 = ImageNormalize(
mdata, interval=ManualInterval(2, 10), stretch=SqrtStretch(), clip=True
)
output = norm(mdata)
expected = [0.0, 0.35355339, 1.0, 0.93541435, 1.0, 1.0]
assert_allclose(output.filled(-10), expected)
assert_allclose(output.mask, [0, 0, 0, 0, 0, 0])
assert_allclose(output, norm2(mdata))
def test_masked_noclip(self):
mdata = ma.array(DATA, mask=[0, 0, 1, 0, 0, 0])
norm = ImageNormalize(
vmin=2.0, vmax=10.0, stretch=SqrtStretch(), clip=False, invalid=None
)
norm2 = ImageNormalize(
mdata,
interval=ManualInterval(2, 10),
stretch=SqrtStretch(),
clip=False,
invalid=None,
)
output = norm(mdata)
expected = [np.nan, 0.35355339, -10, 0.93541435, 1.11803399, 1.27475488]
assert_allclose(output.filled(-10), expected)
assert_allclose(output.mask, [0, 0, 1, 0, 0, 0])
assert_allclose(norm.inverse(norm(DATA))[1:], DATA[1:])
assert_allclose(output, norm2(mdata))
def test_invalid_data(self):
data = np.arange(25.0).reshape((5, 5))
data[2, 2] = np.nan
data[1, 2] = np.inf
percent = 85.0
interval = PercentileInterval(percent)
# initialized without data
norm = ImageNormalize(interval=interval)
norm(data) # sets vmin/vmax
assert_equal((norm.vmin, norm.vmax), (1.65, 22.35))
# initialized with data
norm2 = ImageNormalize(data, interval=interval)
assert_equal((norm2.vmin, norm2.vmax), (norm.vmin, norm.vmax))
norm3 = simple_norm(data, "linear", percent=percent)
assert_equal((norm3.vmin, norm3.vmax), (norm.vmin, norm.vmax))
assert_allclose(norm(data), norm2(data))
assert_allclose(norm(data), norm3(data))
norm4 = ImageNormalize()
norm4(data) # sets vmin/vmax
assert_equal((norm4.vmin, norm4.vmax), (0, 24))
norm5 = ImageNormalize(data)
assert_equal((norm5.vmin, norm5.vmax), (norm4.vmin, norm4.vmax))
@pytest.mark.parametrize("stretch", STRETCHES)
def test_invalid_keyword(self, stretch):
norm1 = ImageNormalize(
stretch=stretch, vmin=-1, vmax=1, clip=False, invalid=None
)
norm2 = ImageNormalize(stretch=stretch, vmin=-1, vmax=1, clip=False)
norm3 = ImageNormalize(
DATA3, stretch=stretch, vmin=-1, vmax=1, clip=False, invalid=-1.0
)
result1 = norm1(DATA3)
result2 = norm2(DATA3)
result3 = norm3(DATA3)
assert_equal(result1[0:2], (np.nan, np.nan))
assert_equal(result2[0:2], (-1.0, -1.0))
assert_equal(result1[2:], result2[2:])
assert_equal(result2, result3)
@pytest.mark.skipif(not HAS_MATPLOTLIB, reason="requires matplotlib")
class TestImageScaling:
def test_linear(self):
"""Test linear scaling."""
norm = simple_norm(DATA2, stretch="linear")
assert_allclose(norm(DATA2), DATA2SCL, atol=0, rtol=1.0e-5)
def test_sqrt(self):
"""Test sqrt scaling."""
norm1 = simple_norm(DATA2, stretch="sqrt")
assert_allclose(norm1(DATA2), np.sqrt(DATA2SCL), atol=0, rtol=1.0e-5)
@pytest.mark.parametrize("invalid", INVALID)
def test_sqrt_invalid_kw(self, invalid):
stretch = SqrtStretch()
norm1 = simple_norm(
DATA3, stretch="sqrt", min_cut=-1, max_cut=1, clip=False, invalid=invalid
)
norm2 = ImageNormalize(
stretch=stretch, vmin=-1, vmax=1, clip=False, invalid=invalid
)
assert_equal(norm1(DATA3), norm2(DATA3))
def test_power(self):
"""Test power scaling."""
power = 3.0
norm = simple_norm(DATA2, stretch="power", power=power)
assert_allclose(norm(DATA2), DATA2SCL**power, atol=0, rtol=1.0e-5)
def test_log(self):
"""Test log10 scaling."""
norm = simple_norm(DATA2, stretch="log")
ref = np.log10(1000 * DATA2SCL + 1.0) / np.log10(1001.0)
assert_allclose(norm(DATA2), ref, atol=0, rtol=1.0e-5)
def test_log_with_log_a(self):
"""Test log10 scaling with a custom log_a."""
log_a = 100
norm = simple_norm(DATA2, stretch="log", log_a=log_a)
ref = np.log10(log_a * DATA2SCL + 1.0) / np.log10(log_a + 1)
assert_allclose(norm(DATA2), ref, atol=0, rtol=1.0e-5)
def test_asinh(self):
"""Test asinh scaling."""
norm = simple_norm(DATA2, stretch="asinh")
ref = np.arcsinh(10 * DATA2SCL) / np.arcsinh(10)
assert_allclose(norm(DATA2), ref, atol=0, rtol=1.0e-5)
def test_asinh_with_asinh_a(self):
"""Test asinh scaling with a custom asinh_a."""
asinh_a = 0.5
norm = simple_norm(DATA2, stretch="asinh", asinh_a=asinh_a)
ref = np.arcsinh(DATA2SCL / asinh_a) / np.arcsinh(1.0 / asinh_a)
assert_allclose(norm(DATA2), ref, atol=0, rtol=1.0e-5)
def test_sinh(self):
"""Test sinh scaling."""
sinh_a = 0.5
norm = simple_norm(DATA2, stretch="sinh", sinh_a=sinh_a)
ref = np.sinh(DATA2SCL / sinh_a) / np.sinh(1 / sinh_a)
assert_allclose(norm(DATA2), ref, atol=0, rtol=1.0e-5)
def test_min(self):
"""Test linear scaling."""
norm = simple_norm(DATA2, stretch="linear", min_cut=1.0, clip=True)
assert_allclose(norm(DATA2), [0.0, 0.0, 1.0], atol=0, rtol=1.0e-5)
def test_percent(self):
"""Test percent keywords."""
norm = simple_norm(DATA2, stretch="linear", percent=99.0, clip=True)
assert_allclose(norm(DATA2), DATA2SCL, atol=0, rtol=1.0e-5)
norm2 = simple_norm(
DATA2, stretch="linear", min_percent=0.5, max_percent=99.5, clip=True
)
assert_allclose(norm(DATA2), norm2(DATA2), atol=0, rtol=1.0e-5)
def test_invalid_stretch(self):
"""Test invalid stretch keyword."""
with pytest.raises(ValueError):
simple_norm(DATA2, stretch="invalid")
@pytest.mark.skipif(not HAS_PLT, reason="requires matplotlib.pyplot")
def test_imshow_norm():
import matplotlib.pyplot as plt
image = np.random.randn(10, 10)
plt.clf()
ax = plt.subplot(label="test_imshow_norm")
imshow_norm(image, ax=ax)
with pytest.raises(ValueError):
# X and data are the same, can't give both
imshow_norm(image, X=image, ax=ax)
with pytest.raises(ValueError):
# illegal to manually pass in normalization since that defeats the point
imshow_norm(image, ax=ax, norm=ImageNormalize())
plt.clf()
imshow_norm(image, ax=ax, vmin=0, vmax=1)
# make sure the pyplot version works
plt.clf()
imres, norm = imshow_norm(image, ax=None)
assert isinstance(norm, ImageNormalize)
plt.close("all")
|
a9c2147e84c2a67a0908c464d0af96f3bfc01d7d40ca131d0d8488f018135b17 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
from astropy.io import fits
from astropy.utils.compat.optional_deps import HAS_MATPLOTLIB
if HAS_MATPLOTLIB:
import matplotlib.image as mpimg
from astropy.visualization.scripts.fits2bitmap import fits2bitmap, main
@pytest.mark.skipif(not HAS_MATPLOTLIB, reason="requires matplotlib")
class TestFits2Bitmap:
def setup_class(self):
self.filename = "test.fits"
self.array = np.arange(16384).reshape((128, 128))
def test_function(self, tmp_path):
filename = tmp_path / self.filename
fits.writeto(filename, self.array)
fits2bitmap(filename)
def test_script(self, tmp_path):
filename = str(tmp_path / self.filename)
fits.writeto(filename, self.array)
main([filename, "-e", "0"])
def test_exten_num(self, tmp_path):
filename = str(tmp_path / self.filename)
hdu1 = fits.PrimaryHDU()
hdu2 = fits.ImageHDU(self.array)
hdulist = fits.HDUList([hdu1, hdu2])
hdulist.writeto(filename)
main([filename, "-e", "1"])
def test_exten_name(self, tmp_path):
filename = str(tmp_path / self.filename)
hdu1 = fits.PrimaryHDU()
extname = "SCI"
hdu2 = fits.ImageHDU(self.array)
hdu2.header["EXTNAME"] = extname
hdulist = fits.HDUList([hdu1, hdu2])
hdulist.writeto(filename)
main([filename, "-e", extname])
@pytest.mark.parametrize("file_exten", [".gz", ".bz2"])
def test_compressed_fits(self, tmp_path, file_exten):
filename = str(tmp_path / f"test.fits{file_exten}")
fits.writeto(filename, self.array)
main([filename, "-e", "0"])
def test_orientation(self, tmp_path):
"""
Regression test to check the image vertical orientation/origin.
"""
filename = str(tmp_path / self.filename)
out_filename = "fits2bitmap_test.png"
out_filename = str(tmp_path / out_filename)
data = np.zeros((32, 32))
data[0:16, :] = 1.0
fits.writeto(filename, data)
main([filename, "-e", "0", "-o", out_filename])
img = mpimg.imread(out_filename)
assert img[0, 0, 0] == 0
assert img[31, 31, 0] == 1
|
c69a38dea486b56b186ae15d9cbdd0027daca5e3b63c04c20a3e07ede520c1a5 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
from contextlib import nullcontext
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pytest
from matplotlib.contour import QuadContourSet
from packaging.version import Version
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.io import fits
from astropy.utils.data import get_pkg_data_filename
from astropy.visualization.wcsaxes.core import WCSAxes
from astropy.visualization.wcsaxes.frame import (
EllipticalFrame,
RectangularFrame,
RectangularFrame1D,
)
from astropy.visualization.wcsaxes.transforms import CurvedTransform
from astropy.visualization.wcsaxes.utils import get_coord_meta
from astropy.wcs import WCS
from astropy.wcs.wcsapi import HighLevelWCSWrapper, SlicedLowLevelWCS
ft_version = Version(matplotlib.ft2font.__freetype_version__)
FREETYPE_261 = ft_version == Version("2.6.1")
# We cannot use matplotlib.checkdep_usetex() anymore, see
# https://github.com/matplotlib/matplotlib/issues/23244
TEX_UNAVAILABLE = True
# matplotlib 3.7 is not released yet.
MATPLOTLIB_LT_3_7 = Version(matplotlib.__version__) < Version("3.6.99")
def teardown_function(function):
plt.close("all")
def test_grid_regression(ignore_matplotlibrc):
# Regression test for a bug that meant that if the rc parameter
# axes.grid was set to True, WCSAxes would crash upon initialization.
plt.rc("axes", grid=True)
fig = plt.figure(figsize=(3, 3))
WCSAxes(fig, [0.1, 0.1, 0.8, 0.8])
def test_format_coord_regression(ignore_matplotlibrc, tmp_path):
# Regression test for a bug that meant that if format_coord was called by
# Matplotlib before the axes were drawn, an error occurred.
fig = plt.figure(figsize=(3, 3))
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8])
fig.add_axes(ax)
assert ax.format_coord(10, 10) == ""
assert ax.coords[0].format_coord(10) == ""
assert ax.coords[1].format_coord(10) == ""
fig.savefig(tmp_path / "nothing")
assert ax.format_coord(10, 10) == "10.0 10.0 (world)"
assert ax.coords[0].format_coord(10) == "10.0"
assert ax.coords[1].format_coord(10) == "10.0"
TARGET_HEADER = fits.Header.fromstring(
"""
NAXIS = 2
NAXIS1 = 200
NAXIS2 = 100
CTYPE1 = 'RA---MOL'
CRPIX1 = 500
CRVAL1 = 180.0
CDELT1 = -0.4
CUNIT1 = 'deg '
CTYPE2 = 'DEC--MOL'
CRPIX2 = 400
CRVAL2 = 0.0
CDELT2 = 0.4
CUNIT2 = 'deg '
COORDSYS= 'icrs '
""",
sep="\n",
)
@pytest.mark.parametrize("grid_type", ["lines", "contours"])
def test_no_numpy_warnings(ignore_matplotlibrc, tmp_path, grid_type):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection=WCS(TARGET_HEADER))
ax.imshow(np.zeros((100, 200)))
ax.coords.grid(color="white", grid_type=grid_type)
# There should be no warnings raised if some pixels are outside WCS
# (since this is normal).
# BUT our own catch_warning was ignoring some warnings before, so now we
# have to catch it. Otherwise, the pytest filterwarnings=error
# setting in setup.cfg will fail this test.
# There are actually multiple warnings but they are all similar.
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", message=r".*converting a masked element to nan.*"
)
warnings.filterwarnings(
"ignore", message=r".*No contour levels were found within the data range.*"
)
warnings.filterwarnings(
"ignore", message=r".*np\.asscalar\(a\) is deprecated since NumPy v1\.16.*"
)
warnings.filterwarnings(
"ignore", message=r".*PY_SSIZE_T_CLEAN will be required.*"
)
fig.savefig(tmp_path / "test.png")
def test_invalid_frame_overlay(ignore_matplotlibrc):
# Make sure a nice error is returned if a frame doesn't exist
ax = plt.subplot(1, 1, 1, projection=WCS(TARGET_HEADER))
with pytest.raises(ValueError, match=r"Frame banana not found"):
ax.get_coords_overlay("banana")
with pytest.raises(ValueError, match=r"Unknown frame: banana"):
get_coord_meta("banana")
def test_plot_coord_transform(ignore_matplotlibrc):
twoMASS_k_header = get_pkg_data_filename("data/2MASS_k_header")
twoMASS_k_header = fits.Header.fromtextfile(twoMASS_k_header)
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes(
[0.15, 0.15, 0.8, 0.8], projection=WCS(twoMASS_k_header), aspect="equal"
)
ax.set_xlim(-0.5, 720.5)
ax.set_ylim(-0.5, 720.5)
c = SkyCoord(359.76045223 * u.deg, 0.26876217 * u.deg)
with pytest.raises(TypeError):
ax.plot_coord(c, "o", transform=ax.get_transform("galactic"))
def test_scatter_coord_transform(ignore_matplotlibrc):
twoMASS_k_header = get_pkg_data_filename("data/2MASS_k_header")
twoMASS_k_header = fits.Header.fromtextfile(twoMASS_k_header)
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes(
[0.15, 0.15, 0.8, 0.8], projection=WCS(twoMASS_k_header), aspect="equal"
)
ax.set_xlim(-0.5, 720.5)
ax.set_ylim(-0.5, 720.5)
c = SkyCoord(359.76045223 * u.deg, 0.26876217 * u.deg)
with pytest.raises(TypeError):
ax.scatter_coord(c, marker="o", transform=ax.get_transform("galactic"))
def test_set_label_properties(ignore_matplotlibrc):
# Regression test to make sure that arguments passed to
# set_xlabel/set_ylabel are passed to the underlying coordinate helpers
ax = plt.subplot(1, 1, 1, projection=WCS(TARGET_HEADER))
ax.set_xlabel("Test x label", labelpad=2, color="red")
ax.set_ylabel("Test y label", labelpad=3, color="green")
assert ax.coords[0].axislabels.get_text() == "Test x label"
assert ax.coords[0].axislabels.get_minpad("b") == 2
assert ax.coords[0].axislabels.get_color() == "red"
assert ax.coords[1].axislabels.get_text() == "Test y label"
assert ax.coords[1].axislabels.get_minpad("l") == 3
assert ax.coords[1].axislabels.get_color() == "green"
assert ax.get_xlabel() == "Test x label"
assert ax.get_ylabel() == "Test y label"
GAL_HEADER = fits.Header.fromstring(
"""
SIMPLE = T / conforms to FITS standard
BITPIX = -32 / array data type
NAXIS = 3 / number of array dimensions
NAXIS1 = 31
NAXIS2 = 2881
NAXIS3 = 480
EXTEND = T
CTYPE1 = 'DISTMOD '
CRVAL1 = 3.5
CDELT1 = 0.5
CRPIX1 = 1.0
CTYPE2 = 'GLON-CAR'
CRVAL2 = 180.0
CDELT2 = -0.125
CRPIX2 = 1.0
CTYPE3 = 'GLAT-CAR'
CRVAL3 = 0.0
CDELT3 = 0.125
CRPIX3 = 241.0
""",
sep="\n",
)
def test_slicing_warnings(ignore_matplotlibrc, tmp_path):
# Regression test to make sure that no warnings are emitted by the tick
# locator for the sliced axis when slicing a cube.
# Scalar case
wcs3d = WCS(naxis=3)
wcs3d.wcs.ctype = ["x", "y", "z"]
wcs3d.wcs.cunit = ["deg", "deg", "km/s"]
wcs3d.wcs.crpix = [614.5, 856.5, 333]
wcs3d.wcs.cdelt = [6.25, 6.25, 23]
wcs3d.wcs.crval = [0.0, 0.0, 1.0]
with warnings.catch_warnings():
# https://github.com/astropy/astropy/issues/9690
warnings.filterwarnings("ignore", message=r".*PY_SSIZE_T_CLEAN.*")
plt.subplot(1, 1, 1, projection=wcs3d, slices=("x", "y", 1))
plt.savefig(tmp_path / "test.png")
# Angle case
wcs3d = WCS(GAL_HEADER)
with warnings.catch_warnings():
# https://github.com/astropy/astropy/issues/9690
warnings.filterwarnings("ignore", message=r".*PY_SSIZE_T_CLEAN.*")
plt.clf()
plt.subplot(1, 1, 1, projection=wcs3d, slices=("x", "y", 2))
plt.savefig(tmp_path / "test.png")
def test_plt_xlabel_ylabel(tmp_path):
# Regression test for a bug that happened when using plt.xlabel
# and plt.ylabel with Matplotlib 3.0
plt.subplot(projection=WCS())
plt.xlabel("Galactic Longitude")
plt.ylabel("Galactic Latitude")
plt.savefig(tmp_path / "test.png")
def test_grid_type_contours_transform(tmp_path):
# Regression test for a bug that caused grid_type='contours' to not work
# with custom transforms
class CustomTransform(CurvedTransform):
# We deliberately don't define the inverse, and has_inverse should
# default to False.
def transform(self, values):
return values * 1.3
transform = CustomTransform()
coord_meta = {
"type": ("scalar", "scalar"),
"unit": (u.m, u.s),
"wrap": (None, None),
"name": ("x", "y"),
}
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], transform=transform, coord_meta=coord_meta)
fig.add_axes(ax)
ax.grid(grid_type="contours")
fig.savefig(tmp_path / "test.png")
def test_plt_imshow_origin():
# Regression test for a bug that caused origin to be set to upper when
# plt.imshow was called.
ax = plt.subplot(projection=WCS())
plt.imshow(np.ones((2, 2)))
assert ax.get_xlim() == (-0.5, 1.5)
assert ax.get_ylim() == (-0.5, 1.5)
def test_ax_imshow_origin():
# Regression test for a bug that caused origin to be set to upper when
# ax.imshow was called with no origin
ax = plt.subplot(projection=WCS())
ax.imshow(np.ones((2, 2)))
assert ax.get_xlim() == (-0.5, 1.5)
assert ax.get_ylim() == (-0.5, 1.5)
def test_grid_contour_large_spacing(tmp_path):
# Regression test for a bug that caused a crash when grid was called and
# didn't produce grid lines (due e.g. to too large spacing) and was then
# called again.
filename = tmp_path / "test.png"
ax = plt.subplot(projection=WCS())
ax.set_xlim(-0.5, 1.5)
ax.set_ylim(-0.5, 1.5)
ax.coords[0].set_ticks(values=[] * u.one)
ax.coords[0].grid(grid_type="contours")
plt.savefig(filename)
ax.coords[0].grid(grid_type="contours")
plt.savefig(filename)
def test_contour_return():
# Regression test for a bug that caused contour and contourf to return None
# instead of the contour object.
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8])
fig.add_axes(ax)
cset = ax.contour(np.arange(16).reshape(4, 4), transform=ax.get_transform("world"))
assert isinstance(cset, QuadContourSet)
cset = ax.contourf(np.arange(16).reshape(4, 4), transform=ax.get_transform("world"))
assert isinstance(cset, QuadContourSet)
def test_contour_empty():
# Regression test for a bug that caused contour to crash if no contours
# were present.
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8])
fig.add_axes(ax)
if MATPLOTLIB_LT_3_7:
ctx = pytest.warns(
UserWarning, match="No contour levels were found within the data range"
)
else:
ctx = nullcontext()
with ctx:
ax.contour(np.zeros((4, 4)), transform=ax.get_transform("world"))
def test_iterate_coords(ignore_matplotlibrc):
# Regression test for a bug that caused ax.coords to return too few axes
wcs3d = WCS(naxis=3)
wcs3d.wcs.ctype = ["x", "y", "z"]
wcs3d.wcs.cunit = ["deg", "deg", "km/s"]
wcs3d.wcs.crpix = [614.5, 856.5, 333]
wcs3d.wcs.cdelt = [6.25, 6.25, 23]
wcs3d.wcs.crval = [0.0, 0.0, 1.0]
ax = plt.subplot(1, 1, 1, projection=wcs3d, slices=("x", "y", 1))
x, y, z = ax.coords
def test_invalid_slices_errors(ignore_matplotlibrc):
# Make sure that users get a clear message when specifying a WCS with
# >2 dimensions without giving the 'slices' argument, or if the 'slices'
# argument has too many/few elements.
wcs3d = WCS(naxis=3)
wcs3d.wcs.ctype = ["x", "y", "z"]
plt.subplot(1, 1, 1, projection=wcs3d, slices=("x", "y", 1))
with pytest.raises(
ValueError,
match=r"WCS has more than 2 pixel dimensions, so 'slices' should be set",
):
plt.subplot(1, 1, 1, projection=wcs3d)
with pytest.raises(
ValueError,
match=(
r"'slices' should have as many elements as WCS has pixel dimensions .should"
r" be 3."
),
):
plt.subplot(1, 1, 1, projection=wcs3d, slices=("x", "y", 1, 2))
wcs2d = WCS(naxis=2)
wcs2d.wcs.ctype = ["x", "y"]
plt.clf()
ax = plt.subplot(1, 1, 1, projection=wcs2d)
assert ax.frame_class is RectangularFrame
plt.clf()
ax = plt.subplot(1, 1, 1, projection=wcs2d, slices=("x", "y"))
assert ax.frame_class is RectangularFrame
plt.clf()
ax = plt.subplot(1, 1, 1, projection=wcs2d, slices=("y", "x"))
assert ax.frame_class is RectangularFrame
plt.clf()
ax = plt.subplot(1, 1, 1, projection=wcs2d, slices=["x", "y"])
assert ax.frame_class is RectangularFrame
plt.clf()
ax = plt.subplot(1, 1, 1, projection=wcs2d, slices=(1, "x"))
assert ax.frame_class is RectangularFrame1D
wcs1d = WCS(naxis=1)
wcs1d.wcs.ctype = ["x"]
plt.clf()
ax = plt.subplot(1, 1, 1, projection=wcs1d)
assert ax.frame_class is RectangularFrame1D
with pytest.raises(ValueError):
plt.subplot(1, 1, 1, projection=wcs2d, slices=(1, "y"))
EXPECTED_REPR_1 = """
<CoordinatesMap with 3 world coordinates:
index aliases type ... wrap format_unit visible
----- ------------------------------ --------- ... --------- ----------- -------
0 distmod dist scalar ... None no
1 pos.galactic.lon glon-car glon longitude ... 360.0 deg deg yes
2 pos.galactic.lat glat-car glat latitude ... None deg yes
>
""".strip()
EXPECTED_REPR_2 = """
<CoordinatesMap with 3 world coordinates:
index aliases type ... wrap format_unit visible
----- ------------------------------ --------- ... --------- ----------- -------
0 distmod dist scalar ... None yes
1 pos.galactic.lon glon-car glon longitude ... 360.0 deg deg yes
2 pos.galactic.lat glat-car glat latitude ... None deg yes
>
""".strip()
def test_repr(ignore_matplotlibrc):
# Unit test to make sure __repr__ looks as expected
wcs3d = WCS(GAL_HEADER)
# Cube header has world coordinates as distance, lon, lat, so start off
# by slicing in a way that we select just lon,lat:
ax = plt.subplot(1, 1, 1, projection=wcs3d, slices=(1, "x", "y"))
assert repr(ax.coords) == EXPECTED_REPR_1
# Now slice in a way that all world coordinates are still present:
plt.clf()
ax = plt.subplot(1, 1, 1, projection=wcs3d, slices=("x", "y", 1))
assert repr(ax.coords) == EXPECTED_REPR_2
@pytest.fixture
def time_spectral_wcs_2d():
wcs = WCS(naxis=2)
wcs.wcs.ctype = ["FREQ", "TIME"]
wcs.wcs.set()
return wcs
def test_time_wcs(time_spectral_wcs_2d):
# Regression test for a bug that caused WCSAxes to error when using a WCS
# with a time axis.
plt.subplot(projection=time_spectral_wcs_2d)
@pytest.mark.skipif(TEX_UNAVAILABLE, reason="TeX is unavailable")
def test_simplify_labels_usetex(ignore_matplotlibrc, tmp_path):
"""Regression test for https://github.com/astropy/astropy/issues/8004."""
plt.rc("text", usetex=True)
header = {
"NAXIS": 2,
"NAXIS1": 360,
"NAXIS2": 180,
"CRPIX1": 180.5,
"CRPIX2": 90.5,
"CRVAL1": 180.0,
"CRVAL2": 0.0,
"CDELT1": -2 * np.sqrt(2) / np.pi,
"CDELT2": 2 * np.sqrt(2) / np.pi,
"CTYPE1": "RA---MOL",
"CTYPE2": "DEC--MOL",
"RADESYS": "ICRS",
}
wcs = WCS(header)
fig, ax = plt.subplots(subplot_kw=dict(frame_class=EllipticalFrame, projection=wcs))
ax.set_xlim(-0.5, header["NAXIS1"] - 0.5)
ax.set_ylim(-0.5, header["NAXIS2"] - 0.5)
ax.coords[0].set_ticklabel(exclude_overlapping=True)
ax.coords[1].set_ticklabel(exclude_overlapping=True)
ax.coords[0].set_ticks(spacing=45 * u.deg)
ax.coords[1].set_ticks(spacing=30 * u.deg)
ax.grid()
fig.savefig(tmp_path / "plot.png")
@pytest.mark.parametrize("frame_class", [RectangularFrame, EllipticalFrame])
def test_set_labels_with_coords(ignore_matplotlibrc, frame_class):
"""Test if ``axis.set_xlabel()`` calls the correct ``coords[i]_set_axislabel()`` in a
WCS plot. Regression test for https://github.com/astropy/astropy/issues/10435.
"""
labels = ["RA", "Declination"]
header = {
"NAXIS": 2,
"NAXIS1": 360,
"NAXIS2": 180,
"CRPIX1": 180.5,
"CRPIX2": 90.5,
"CRVAL1": 180.0,
"CRVAL2": 0.0,
"CDELT1": -2 * np.sqrt(2) / np.pi,
"CDELT2": 2 * np.sqrt(2) / np.pi,
"CTYPE1": "RA---AIT",
"CTYPE2": "DEC--AIT",
}
wcs = WCS(header)
fig, ax = plt.subplots(subplot_kw=dict(frame_class=frame_class, projection=wcs))
ax.set_xlabel(labels[0])
ax.set_ylabel(labels[1])
assert ax.get_xlabel() == labels[0]
assert ax.get_ylabel() == labels[1]
for i in range(2):
assert ax.coords[i].get_axislabel() == labels[i]
@pytest.mark.parametrize("atol", [0.2, 1.0e-8])
def test_bbox_size(atol):
# Test for the size of a WCSAxes bbox (only have Matplotlib >= 3.0 now)
extents = [11.38888888888889, 3.5, 576.0, 432.0]
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8])
fig.add_axes(ax)
fig.canvas.draw()
renderer = fig.canvas.renderer
ax_bbox = ax.get_tightbbox(renderer)
# Enforce strict test only with reference Freetype version
if atol < 0.1 and not FREETYPE_261:
pytest.xfail(
"Exact BoundingBox dimensions are only ensured with FreeType 2.6.1"
)
assert np.allclose(ax_bbox.extents, extents, atol=atol)
def test_wcs_type_transform_regression():
wcs = WCS(TARGET_HEADER)
sliced_wcs = SlicedLowLevelWCS(wcs, np.s_[1:-1, 1:-1])
ax = plt.subplot(1, 1, 1, projection=wcs)
ax.get_transform(sliced_wcs)
high_wcs = HighLevelWCSWrapper(sliced_wcs)
ax.get_transform(sliced_wcs)
def test_multiple_draws_grid_contours(tmp_path):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection=WCS())
ax.grid(color="black", grid_type="contours")
fig.savefig(tmp_path / "plot.png")
fig.savefig(tmp_path / "plot.png")
def test_get_coord_range_nan_regression():
# Test to make sure there is no internal casting of NaN to integers
# NumPy 1.24 raises a RuntimeWarning if a NaN is cast to an integer
wcs = WCS(TARGET_HEADER)
wcs.wcs.crval[0] = 0 # Re-position the longitude wrap to the middle
ax = plt.subplot(1, 1, 1, projection=wcs)
# Set the Y limits within valid latitudes/declinations
ax.set_ylim(300, 500)
# Set the X limits within valid longitudes/RAs, so the world coordinates have no NaNs
ax.set_xlim(300, 700)
assert np.allclose(
ax.coords.get_coord_range(),
np.array(
[
(-123.5219272110385, 122.49684897692201),
(-44.02289164685554, 44.80732766607591),
]
),
)
# Extend the X limits to include invalid longitudes/RAs, so the world coordinates have NaNs
ax.set_xlim(0, 700)
assert np.allclose(
ax.coords.get_coord_range(),
np.array(
[(-131.3193386797236, 180.0), (-44.02289164685554, 44.80732766607591)]
),
)
def test_imshow_error():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection=WCS())
with pytest.raises(ValueError, match="Cannot use images with origin='upper"):
ax.imshow(np.ones(100).reshape(10, 10), origin="upper")
def test_label_setting():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection=WCS())
# Check both xlabel and label kwargs work
ax.set_xlabel(xlabel="label")
ax.set_xlabel(label="label")
# Check no label errors:
with pytest.raises(
TypeError, match=r"set_xlabel\(\) missing 1 required positional argument"
):
ax.set_xlabel()
# Check both xlabel and label kwargs work
ax.set_ylabel(ylabel="label")
ax.set_ylabel(label="label")
# Check no label errors:
with pytest.raises(
TypeError, match=r"set_ylabel\(\) missing 1 required positional argument"
):
ax.set_ylabel()
def test_invisible_bbox():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection=WCS())
assert ax.get_tightbbox(fig.canvas.get_renderer()) is not None
ax.set_visible(False)
assert ax.get_tightbbox(fig.canvas.get_renderer()) is None
|
74a005c702eb1788357fb3a5867c148b10cf49d8db497849d7cf8f9d2d5f8eea | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from numpy.testing import assert_almost_equal
from astropy import units as u
from astropy.coordinates import Angle, Galactic, HADec
from astropy.tests.helper import (
assert_quantity_allclose as assert_almost_equal_quantity,
)
from astropy.visualization.wcsaxes.utils import (
get_coord_meta,
select_step_degree,
select_step_hour,
select_step_scalar,
)
def test_select_step_degree():
assert_almost_equal_quantity(select_step_degree(127 * u.deg), 180.0 * u.deg)
assert_almost_equal_quantity(select_step_degree(44 * u.deg), 45.0 * u.deg)
assert_almost_equal_quantity(select_step_degree(18 * u.arcmin), 15 * u.arcmin)
assert_almost_equal_quantity(select_step_degree(3.4 * u.arcmin), 3 * u.arcmin)
assert_almost_equal_quantity(select_step_degree(2 * u.arcmin), 2 * u.arcmin)
assert_almost_equal_quantity(select_step_degree(59 * u.arcsec), 1 * u.arcmin)
assert_almost_equal_quantity(select_step_degree(33 * u.arcsec), 30 * u.arcsec)
assert_almost_equal_quantity(select_step_degree(2.2 * u.arcsec), 2 * u.arcsec)
assert_almost_equal_quantity(select_step_degree(0.8 * u.arcsec), 1 * u.arcsec)
assert_almost_equal_quantity(select_step_degree(0.2 * u.arcsec), 0.2 * u.arcsec)
assert_almost_equal_quantity(select_step_degree(0.11 * u.arcsec), 0.1 * u.arcsec)
assert_almost_equal_quantity(select_step_degree(0.022 * u.arcsec), 0.02 * u.arcsec)
assert_almost_equal_quantity(
select_step_degree(0.0043 * u.arcsec), 0.005 * u.arcsec
)
assert_almost_equal_quantity(
select_step_degree(0.00083 * u.arcsec), 0.001 * u.arcsec
)
assert_almost_equal_quantity(
select_step_degree(0.000027 * u.arcsec), 0.00002 * u.arcsec
)
def test_select_step_hour():
assert_almost_equal_quantity(select_step_hour(127 * u.deg), 8.0 * u.hourangle)
assert_almost_equal_quantity(select_step_hour(44 * u.deg), 3.0 * u.hourangle)
assert_almost_equal_quantity(select_step_hour(18 * u.arcmin), 15 * u.arcmin)
assert_almost_equal_quantity(select_step_hour(3.4 * u.arcmin), 3 * u.arcmin)
assert_almost_equal_quantity(select_step_hour(2 * u.arcmin), 1.5 * u.arcmin)
assert_almost_equal_quantity(select_step_hour(59 * u.arcsec), 1 * u.arcmin)
assert_almost_equal_quantity(select_step_hour(33 * u.arcsec), 30 * u.arcsec)
assert_almost_equal_quantity(select_step_hour(2.2 * u.arcsec), 3.0 * u.arcsec)
assert_almost_equal_quantity(select_step_hour(0.8 * u.arcsec), 0.75 * u.arcsec)
assert_almost_equal_quantity(select_step_hour(0.2 * u.arcsec), 0.15 * u.arcsec)
assert_almost_equal_quantity(select_step_hour(0.11 * u.arcsec), 0.15 * u.arcsec)
assert_almost_equal_quantity(select_step_hour(0.022 * u.arcsec), 0.03 * u.arcsec)
assert_almost_equal_quantity(select_step_hour(0.0043 * u.arcsec), 0.003 * u.arcsec)
assert_almost_equal_quantity(
select_step_hour(0.00083 * u.arcsec), 0.00075 * u.arcsec
)
assert_almost_equal_quantity(
select_step_hour(0.000027 * u.arcsec), 0.00003 * u.arcsec
)
def test_select_step_scalar():
assert_almost_equal(select_step_scalar(33122.0), 50000.0)
assert_almost_equal(select_step_scalar(433.0), 500.0)
assert_almost_equal(select_step_scalar(12.3), 10)
assert_almost_equal(select_step_scalar(3.3), 5.0)
assert_almost_equal(select_step_scalar(0.66), 0.5)
assert_almost_equal(select_step_scalar(0.0877), 0.1)
assert_almost_equal(select_step_scalar(0.00577), 0.005)
assert_almost_equal(select_step_scalar(0.00022), 0.0002)
assert_almost_equal(select_step_scalar(0.000012), 0.00001)
assert_almost_equal(select_step_scalar(0.000000443), 0.0000005)
def test_get_coord_meta():
galactic_meta = get_coord_meta(Galactic)
assert galactic_meta["name"] == ["l", "b"]
assert galactic_meta["wrap"] == (Angle(360 * u.deg), None)
assert galactic_meta["unit"] == (u.deg, u.deg)
hadec_meta = get_coord_meta(HADec)
assert hadec_meta["name"] == ["ha", "dec"]
assert hadec_meta["wrap"] == (Angle(180 * u.deg), None)
assert hadec_meta["unit"] == (u.hourangle, u.deg)
|
7d547c065aa13a89c398fd93c8b47e9d2f2956234af2882f093ff6193f991924 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
from textwrap import dedent
import matplotlib.pyplot as plt
import numpy as np
import pytest
from matplotlib.transforms import Affine2D, IdentityTransform
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.io import fits
from astropy.tests.figures import figure_test
from astropy.time import Time
from astropy.units import Quantity
from astropy.utils.data import get_pkg_data_filename
from astropy.visualization.wcsaxes.frame import RectangularFrame, RectangularFrame1D
from astropy.visualization.wcsaxes.wcsapi import (
WCSWorld2PixelTransform,
apply_slices,
transform_coord_meta_from_wcs,
)
from astropy.wcs import WCS
from astropy.wcs.wcsapi import BaseLowLevelWCS, SlicedLowLevelWCS
@pytest.fixture
def plt_close():
yield
plt.close("all")
WCS2D = WCS(naxis=2)
WCS2D.wcs.ctype = ["x", "y"]
WCS2D.wcs.cunit = ["km", "km"]
WCS2D.wcs.crpix = [614.5, 856.5]
WCS2D.wcs.cdelt = [6.25, 6.25]
WCS2D.wcs.crval = [0.0, 0.0]
WCS3D = WCS(naxis=3)
WCS3D.wcs.ctype = ["x", "y", "z"]
WCS3D.wcs.cunit = ["km", "km", "km"]
WCS3D.wcs.crpix = [614.5, 856.5, 333]
WCS3D.wcs.cdelt = [6.25, 6.25, 23]
WCS3D.wcs.crval = [0.0, 0.0, 1.0]
@pytest.fixture
def wcs_4d():
header = dedent(
"""\
WCSAXES = 4 / Number of coordinate axes
CRPIX1 = 0.0 / Pixel coordinate of reference point
CRPIX2 = 0.0 / Pixel coordinate of reference point
CRPIX3 = 0.0 / Pixel coordinate of reference point
CRPIX4 = 5.0 / Pixel coordinate of reference point
CDELT1 = 0.4 / [min] Coordinate increment at reference point
CDELT2 = 2E-11 / [m] Coordinate increment at reference point
CDELT3 = 0.0027777777777778 / [deg] Coordinate increment at reference point
CDELT4 = 0.0013888888888889 / [deg] Coordinate increment at reference point
CUNIT1 = 'min' / Units of coordinate increment and value
CUNIT2 = 'm' / Units of coordinate increment and value
CUNIT3 = 'deg' / Units of coordinate increment and value
CUNIT4 = 'deg' / Units of coordinate increment and value
CTYPE1 = 'TIME' / Coordinate type code
CTYPE2 = 'WAVE' / Vacuum wavelength (linear)
CTYPE3 = 'HPLT-TAN' / Coordinate type codegnomonic projection
CTYPE4 = 'HPLN-TAN' / Coordinate type codegnomonic projection
CRVAL1 = 0.0 / [min] Coordinate value at reference point
CRVAL2 = 0.0 / [m] Coordinate value at reference point
CRVAL3 = 0.0 / [deg] Coordinate value at reference point
CRVAL4 = 0.0 / [deg] Coordinate value at reference point
LONPOLE = 180.0 / [deg] Native longitude of celestial pole
LATPOLE = 0.0 / [deg] Native latitude of celestial pole
"""
)
return WCS(header=fits.Header.fromstring(header, sep="\n"))
@pytest.fixture
def cube_wcs():
cube_header = get_pkg_data_filename("data/cube_header")
header = fits.Header.fromtextfile(cube_header)
return WCS(header=header)
def test_shorthand_inversion():
"""
Test that the Matplotlib subtraction shorthand for composing and inverting
transformations works.
"""
w1 = WCS(naxis=2)
w1.wcs.ctype = ["RA---TAN", "DEC--TAN"]
w1.wcs.crpix = [256.0, 256.0]
w1.wcs.cdelt = [-0.05, 0.05]
w1.wcs.crval = [120.0, -19.0]
w2 = WCS(naxis=2)
w2.wcs.ctype = ["RA---SIN", "DEC--SIN"]
w2.wcs.crpix = [256.0, 256.0]
w2.wcs.cdelt = [-0.05, 0.05]
w2.wcs.crval = [235.0, +23.7]
t1 = WCSWorld2PixelTransform(w1)
t2 = WCSWorld2PixelTransform(w2)
assert t1 - t2 == t1 + t2.inverted()
assert t1 - t2 != t2.inverted() + t1
assert t1 - t1 == IdentityTransform()
# We add Affine2D to catch the fact that in Matplotlib, having a Composite
# transform can end up in more strict requirements for the dimensionality.
def test_2d():
world = np.ones((10, 2))
w1 = WCSWorld2PixelTransform(WCS2D) + Affine2D()
pixel = w1.transform(world)
world_2 = w1.inverted().transform(pixel)
np.testing.assert_allclose(world, world_2)
def test_3d():
world = np.ones((10, 2))
w1 = WCSWorld2PixelTransform(WCS3D[:, 0, :]) + Affine2D()
pixel = w1.transform(world)
world_2 = w1.inverted().transform(pixel)
np.testing.assert_allclose(world[:, 0], world_2[:, 0])
np.testing.assert_allclose(world[:, 1], world_2[:, 1])
def test_coord_type_from_ctype(cube_wcs):
_, coord_meta = transform_coord_meta_from_wcs(
cube_wcs, RectangularFrame, slices=(50, "y", "x")
)
axislabel_position = coord_meta["default_axislabel_position"]
ticklabel_position = coord_meta["default_ticklabel_position"]
ticks_position = coord_meta["default_ticks_position"]
# These axes are swapped due to the pixel derivatives
assert axislabel_position == ["l", "r", "b"]
assert ticklabel_position == ["l", "r", "b"]
assert ticks_position == ["l", "r", "b"]
wcs = WCS(naxis=2)
wcs.wcs.ctype = ["GLON-TAN", "GLAT-TAN"]
wcs.wcs.crpix = [256.0] * 2
wcs.wcs.cdelt = [-0.05] * 2
wcs.wcs.crval = [50.0] * 2
wcs.wcs.cname = ["Longitude", ""]
wcs.wcs.set()
_, coord_meta = transform_coord_meta_from_wcs(wcs, RectangularFrame)
assert coord_meta["type"] == ["longitude", "latitude"]
assert coord_meta["format_unit"] == [u.deg, u.deg]
assert coord_meta["wrap"] == [None, None]
assert coord_meta["default_axis_label"] == ["Longitude", "pos.galactic.lat"]
assert coord_meta["name"] == [
("pos.galactic.lon", "glon-tan", "glon", "Longitude"),
("pos.galactic.lat", "glat-tan", "glat"),
]
wcs = WCS(naxis=2)
wcs.wcs.ctype = ["HPLN-TAN", "HPLT-TAN"]
wcs.wcs.crpix = [256.0] * 2
wcs.wcs.cdelt = [-0.05] * 2
wcs.wcs.crval = [50.0] * 2
wcs.wcs.set()
_, coord_meta = transform_coord_meta_from_wcs(wcs, RectangularFrame)
assert coord_meta["type"] == ["longitude", "latitude"]
assert coord_meta["format_unit"] == [u.arcsec, u.arcsec]
assert coord_meta["wrap"] == [180.0 * u.deg, None]
_, coord_meta = transform_coord_meta_from_wcs(
wcs, RectangularFrame, slices=("y", "x")
)
axislabel_position = coord_meta["default_axislabel_position"]
ticklabel_position = coord_meta["default_ticklabel_position"]
ticks_position = coord_meta["default_ticks_position"]
# These axes should be swapped because of slices
assert axislabel_position == ["l", "b"]
assert ticklabel_position == ["l", "b"]
assert ticks_position == ["bltr", "bltr"]
wcs = WCS(naxis=2)
wcs.wcs.ctype = ["HGLN-TAN", "HGLT-TAN"]
wcs.wcs.crpix = [256.0] * 2
wcs.wcs.cdelt = [-0.05] * 2
wcs.wcs.crval = [50.0] * 2
wcs.wcs.set()
_, coord_meta = transform_coord_meta_from_wcs(wcs, RectangularFrame)
assert coord_meta["type"] == ["longitude", "latitude"]
assert coord_meta["format_unit"] == [u.deg, u.deg]
assert coord_meta["wrap"] == [180.0 * u.deg, None]
wcs = WCS(naxis=2)
wcs.wcs.ctype = ["CRLN-TAN", "CRLT-TAN"]
wcs.wcs.crpix = [256.0] * 2
wcs.wcs.cdelt = [-0.05] * 2
wcs.wcs.crval = [50.0] * 2
wcs.wcs.set()
_, coord_meta = transform_coord_meta_from_wcs(wcs, RectangularFrame)
assert coord_meta["type"] == ["longitude", "latitude"]
assert coord_meta["format_unit"] == [u.deg, u.deg]
assert coord_meta["wrap"] == [360.0 * u.deg, None]
wcs = WCS(naxis=2)
wcs.wcs.ctype = ["RA---TAN", "DEC--TAN"]
wcs.wcs.crpix = [256.0] * 2
wcs.wcs.cdelt = [-0.05] * 2
wcs.wcs.crval = [50.0] * 2
wcs.wcs.set()
_, coord_meta = transform_coord_meta_from_wcs(wcs, RectangularFrame)
assert coord_meta["type"] == ["longitude", "latitude"]
assert coord_meta["format_unit"] == [u.hourangle, u.deg]
assert coord_meta["wrap"] == [None, None]
wcs = WCS(naxis=2)
wcs.wcs.ctype = ["spam", "spam"]
wcs.wcs.crpix = [256.0] * 2
wcs.wcs.cdelt = [-0.05] * 2
wcs.wcs.crval = [50.0] * 2
wcs.wcs.set()
_, coord_meta = transform_coord_meta_from_wcs(wcs, RectangularFrame)
assert coord_meta["type"] == ["scalar", "scalar"]
assert coord_meta["format_unit"] == [u.one, u.one]
assert coord_meta["wrap"] == [None, None]
def test_coord_type_1d_1d_wcs():
wcs = WCS(naxis=1)
wcs.wcs.ctype = ["WAVE"]
wcs.wcs.crpix = [256.0]
wcs.wcs.cdelt = [-0.05]
wcs.wcs.crval = [50.0]
wcs.wcs.set()
_, coord_meta = transform_coord_meta_from_wcs(wcs, RectangularFrame1D)
assert coord_meta["type"] == ["scalar"]
assert coord_meta["format_unit"] == [u.m]
assert coord_meta["wrap"] == [None]
def test_coord_type_1d_2d_wcs_correlated():
wcs = WCS(naxis=2)
wcs.wcs.ctype = ["GLON-TAN", "GLAT-TAN"]
wcs.wcs.crpix = [256.0] * 2
wcs.wcs.cdelt = [-0.05] * 2
wcs.wcs.crval = [50.0] * 2
wcs.wcs.set()
_, coord_meta = transform_coord_meta_from_wcs(
wcs, RectangularFrame1D, slices=("x", 0)
)
assert coord_meta["type"] == ["longitude", "latitude"]
assert coord_meta["format_unit"] == [u.deg, u.deg]
assert coord_meta["wrap"] == [None, None]
assert coord_meta["visible"] == [True, True]
def test_coord_type_1d_2d_wcs_uncorrelated():
wcs = WCS(naxis=2)
wcs.wcs.ctype = ["WAVE", "UTC"]
wcs.wcs.crpix = [256.0] * 2
wcs.wcs.cdelt = [-0.05] * 2
wcs.wcs.crval = [50.0] * 2
wcs.wcs.cunit = ["nm", "s"]
wcs.wcs.set()
_, coord_meta = transform_coord_meta_from_wcs(
wcs, RectangularFrame1D, slices=("x", 0)
)
assert coord_meta["type"] == ["scalar", "scalar"]
assert coord_meta["format_unit"] == [u.m, u.s]
assert coord_meta["wrap"] == [None, None]
assert coord_meta["visible"] == [True, False]
def test_coord_meta_4d(wcs_4d):
_, coord_meta = transform_coord_meta_from_wcs(
wcs_4d, RectangularFrame, slices=(0, 0, "x", "y")
)
axislabel_position = coord_meta["default_axislabel_position"]
ticklabel_position = coord_meta["default_ticklabel_position"]
ticks_position = coord_meta["default_ticks_position"]
assert axislabel_position == ["", "", "b", "l"]
assert ticklabel_position == ["", "", "b", "l"]
assert ticks_position == ["", "", "bltr", "bltr"]
def test_coord_meta_4d_line_plot(wcs_4d):
_, coord_meta = transform_coord_meta_from_wcs(
wcs_4d, RectangularFrame1D, slices=(0, 0, 0, "x")
)
axislabel_position = coord_meta["default_axislabel_position"]
ticklabel_position = coord_meta["default_ticklabel_position"]
ticks_position = coord_meta["default_ticks_position"]
# These axes are swapped due to the pixel derivatives
assert axislabel_position == ["", "", "t", "b"]
assert ticklabel_position == ["", "", "t", "b"]
assert ticks_position == ["", "", "t", "b"]
@pytest.fixture
def sub_wcs(wcs_4d, wcs_slice):
return SlicedLowLevelWCS(wcs_4d, wcs_slice)
@pytest.mark.parametrize(
("wcs_slice", "wcsaxes_slices", "world_map", "ndim"),
[
(np.s_[...], [0, 0, "x", "y"], (2, 3), 2),
(np.s_[...], [0, "x", 0, "y"], (1, 2, 3), 3),
(np.s_[...], ["x", 0, 0, "y"], (0, 2, 3), 3),
(np.s_[...], ["x", "y", 0, 0], (0, 1), 2),
(np.s_[:, :, 0, :], [0, "x", "y"], (1, 2), 2),
(np.s_[:, :, 0, :], ["x", 0, "y"], (0, 1, 2), 3),
(np.s_[:, :, 0, :], ["x", "y", 0], (0, 1, 2), 3),
(np.s_[:, 0, :, :], ["x", "y", 0], (0, 1), 2),
],
)
def test_apply_slices(sub_wcs, wcs_slice, wcsaxes_slices, world_map, ndim):
transform_wcs, _, out_world_map = apply_slices(sub_wcs, wcsaxes_slices)
assert transform_wcs.world_n_dim == ndim
assert out_world_map == world_map
# parametrize here to pass to the fixture
@pytest.mark.parametrize("wcs_slice", [np.s_[:, :, 0, :]])
def test_sliced_ND_input(wcs_4d, sub_wcs, wcs_slice, plt_close):
slices_wcsaxes = [0, "x", "y"]
for sub_wcs in (sub_wcs, SlicedLowLevelWCS(wcs_4d, wcs_slice)):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
_, coord_meta = transform_coord_meta_from_wcs(
sub_wcs, RectangularFrame, slices=slices_wcsaxes
)
assert all(len(x) == 3 for x in coord_meta.values())
assert coord_meta["name"] == [
"time",
("custom:pos.helioprojective.lat", "hplt-tan", "hplt"),
("custom:pos.helioprojective.lon", "hpln-tan", "hpln"),
]
assert coord_meta["type"] == ["scalar", "latitude", "longitude"]
assert coord_meta["wrap"] == [None, None, 180.0 * u.deg]
assert coord_meta["unit"] == [u.Unit("min"), u.Unit("deg"), u.Unit("deg")]
assert coord_meta["visible"] == [False, True, True]
assert coord_meta["format_unit"] == [
u.Unit("min"),
u.Unit("arcsec"),
u.Unit("arcsec"),
]
assert coord_meta["default_axislabel_position"] == ["", "b", "l"]
assert coord_meta["default_ticklabel_position"] == ["", "b", "l"]
assert coord_meta["default_ticks_position"] == ["", "bltr", "bltr"]
# Validate the axes initialize correctly
plt.clf()
plt.subplot(projection=sub_wcs, slices=slices_wcsaxes)
class LowLevelWCS5D(BaseLowLevelWCS):
pixel_dim = 2
@property
def pixel_n_dim(self):
return self.pixel_dim
@property
def world_n_dim(self):
return 5
@property
def world_axis_physical_types(self):
return [
"em.freq",
"time",
"pos.eq.ra",
"pos.eq.dec",
"phys.polarization.stokes",
]
@property
def world_axis_units(self):
return ["Hz", "day", "deg", "deg", ""]
@property
def world_axis_names(self):
return ["Frequency", "", "RA", "DEC", ""]
def pixel_to_world_values(self, *pixel_arrays):
pixel_arrays = (list(pixel_arrays) * 3)[:-1] # make list have 5 elements
return [
np.asarray(pix) * scale
for pix, scale in zip(pixel_arrays, [10, 0.2, 0.4, 0.39, 2])
]
def world_to_pixel_values(self, *world_arrays):
world_arrays = world_arrays[:2] # make list have 2 elements
return [
np.asarray(world) / scale for world, scale in zip(world_arrays, [10, 0.2])
]
@property
def world_axis_object_components(self):
return [
("freq", 0, "value"),
("time", 0, "mjd"),
("celestial", 0, "spherical.lon.degree"),
("celestial", 1, "spherical.lat.degree"),
("stokes", 0, "value"),
]
@property
def world_axis_object_classes(self):
return {
"celestial": (SkyCoord, (), {"unit": "deg"}),
"time": (Time, (), {"format": "mjd"}),
"freq": (Quantity, (), {"unit": "Hz"}),
"stokes": (Quantity, (), {"unit": "one"}),
}
def test_edge_axes():
# Check that axes on the edge of a spherical projection are shown properley
# (see https://github.com/astropy/astropy/issues/10441)
shape = [180, 360]
data = np.random.rand(*shape)
header = {
"wcsaxes": 2,
"crpix1": 180.5,
"crpix2": 90.5,
"cdelt1": 1.0,
"cdelt2": 1.0,
"cunit1": "deg",
"cunit2": "deg",
"ctype1": "CRLN-CAR",
"ctype2": "CRLT-CAR",
"crval1": 0.0,
"crval2": 0.0,
"lonpole": 0.0,
"latpole": 90.0,
}
wcs = WCS(header)
fig = plt.figure()
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], projection=wcs)
ax.imshow(data, origin="lower")
# By default the x- and y- axes should be drawn
lon = ax.coords[0]
lat = ax.coords[1]
fig.canvas.draw()
np.testing.assert_equal(
lon.ticks.world["b"], np.array([90.0, 180.0, 180.0, 270.0, 0.0])
)
np.testing.assert_equal(
lat.ticks.world["l"], np.array([-90.0, -60.0, -30.0, 0.0, 30.0, 60.0, 90.0])
)
def test_coord_meta_wcsapi():
wcs = LowLevelWCS5D()
wcs.pixel_dim = 5
_, coord_meta = transform_coord_meta_from_wcs(
wcs, RectangularFrame, slices=[0, 0, "x", "y", 0]
)
assert coord_meta["name"] == [
("em.freq", "Frequency"),
"time",
("pos.eq.ra", "RA"),
("pos.eq.dec", "DEC"),
"phys.polarization.stokes",
]
assert coord_meta["type"] == ["scalar", "scalar", "longitude", "latitude", "scalar"]
assert coord_meta["wrap"] == [None, None, None, None, None]
assert coord_meta["unit"] == [
u.Unit("Hz"),
u.Unit("d"),
u.Unit("deg"),
u.Unit("deg"),
u.one,
]
assert coord_meta["visible"] == [True, True, True, True, True]
assert coord_meta["format_unit"] == [
u.Unit("Hz"),
u.Unit("d"),
u.Unit("hourangle"),
u.Unit("deg"),
u.one,
]
assert coord_meta["default_axislabel_position"] == ["b", "l", "t", "r", ""]
assert coord_meta["default_ticklabel_position"] == ["b", "l", "t", "r", ""]
assert coord_meta["default_ticks_position"] == ["b", "l", "t", "r", ""]
assert coord_meta["default_axis_label"] == [
"Frequency",
"time",
"RA",
"DEC",
"phys.polarization.stokes",
]
@figure_test
def test_wcsapi_5d_with_names(plt_close):
# Test for plotting image and also setting values of ticks
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], projection=LowLevelWCS5D())
ax.set_xlim(-0.5, 148.5)
ax.set_ylim(-0.5, 148.5)
return fig
class LowLevelWCSCelestial2D(BaseLowLevelWCS):
# APE 14 WCS that has celestial coordinates that are deliberately not in degrees
@property
def pixel_n_dim(self):
return 2
@property
def world_n_dim(self):
return 2
@property
def world_axis_physical_types(self):
return [
"pos.eq.ra",
"pos.eq.dec",
]
@property
def world_axis_units(self):
return ["arcsec", "arcsec"]
@property
def world_axis_names(self):
return ["RA", "DEC"]
# Since the units are in arcsec, we can just go for an identity transform
# where 1 pixel = 1" since this is not completely unrealistic
def pixel_to_world_values(self, *pixel_arrays):
return pixel_arrays
def world_to_pixel_values(self, *world_arrays):
return world_arrays
@property
def world_axis_object_components(self):
return [
("celestial", 0, "spherical.lon.arcsec"),
("celestial", 1, "spherical.lat.arcsec"),
]
@property
def world_axis_object_classes(self):
return {
"celestial": (SkyCoord, (), {"unit": "arcsec"}),
}
@figure_test
def test_wcsapi_2d_celestial_arcsec(plt_close):
# Regression test for plot_coord/scatter_coord with celestial WCS that is not in degrees
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes([0.15, 0.1, 0.8, 0.8], projection=LowLevelWCSCelestial2D())
ax.set_xlim(-0.5, 200.5)
ax.set_ylim(-0.5, 200.5)
ax.coords[0].set_format_unit("arcsec")
ax.plot_coord(SkyCoord([50, 150], [100, 100], unit="arcsec"), "ro")
ax.scatter_coord(
SkyCoord([100, 100], [50, 150], unit="arcsec"), color="green", s=50
)
return fig
|
dd51570a44ff54eca813096aa0bcf0aa3efc6d28a2b9726834c77bfd3a07d712 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# This module implements the Slicing mixin to the NDData class.
from astropy import log
from astropy.wcs.wcsapi import BaseHighLevelWCS # noqa: F401
from astropy.wcs.wcsapi import BaseLowLevelWCS # noqa: F401
from astropy.wcs.wcsapi import HighLevelWCSWrapper, SlicedLowLevelWCS
__all__ = ["NDSlicingMixin"]
class NDSlicingMixin:
"""Mixin to provide slicing on objects using the `NDData`
interface.
The ``data``, ``mask``, ``uncertainty`` and ``wcs`` will be sliced, if
set and sliceable. The ``unit`` and ``meta`` will be untouched. The return
will be a reference and not a copy, if possible.
Examples
--------
Using this Mixin with `~astropy.nddata.NDData`:
>>> from astropy.nddata import NDData, NDSlicingMixin
>>> class NDDataSliceable(NDSlicingMixin, NDData):
... pass
Slicing an instance containing data::
>>> nd = NDDataSliceable([1,2,3,4,5])
>>> nd[1:3]
NDDataSliceable([2, 3])
Also the other attributes are sliced for example the ``mask``::
>>> import numpy as np
>>> mask = np.array([True, False, True, True, False])
>>> nd2 = NDDataSliceable(nd, mask=mask)
>>> nd2slc = nd2[1:3]
>>> nd2slc[nd2slc.mask]
NDDataSliceable([3])
Be aware that changing values of the sliced instance will change the values
of the original::
>>> nd3 = nd2[1:3]
>>> nd3.data[0] = 100
>>> nd2
NDDataSliceable([ 1, 100, 3, 4, 5])
See Also
--------
NDDataRef
NDDataArray
"""
def __getitem__(self, item):
# Abort slicing if the data is a single scalar.
if self.data.shape == ():
raise TypeError("scalars cannot be sliced.")
# Let the other methods handle slicing.
kwargs = self._slice(item)
return self.__class__(**kwargs)
def _slice(self, item):
"""Collects the sliced attributes and passes them back as `dict`.
It passes uncertainty, mask and wcs to their appropriate ``_slice_*``
method, while ``meta`` and ``unit`` are simply taken from the original.
The data is assumed to be sliceable and is sliced directly.
When possible the return should *not* be a copy of the data but a
reference.
Parameters
----------
item : slice
The slice passed to ``__getitem__``.
Returns
-------
dict :
Containing all the attributes after slicing - ready to
use them to create ``self.__class__.__init__(**kwargs)`` in
``__getitem__``.
"""
kwargs = {}
kwargs["data"] = self.data[item]
# Try to slice some attributes
kwargs["uncertainty"] = self._slice_uncertainty(item)
kwargs["mask"] = self._slice_mask(item)
kwargs["wcs"] = self._slice_wcs(item)
# Attributes which are copied and not intended to be sliced
kwargs["unit"] = self.unit
kwargs["meta"] = self.meta
return kwargs
def _slice_uncertainty(self, item):
if self.uncertainty is None:
return None
try:
return self.uncertainty[item]
except TypeError:
# Catching TypeError in case the object has no __getitem__ method.
# But let IndexError raise.
log.info("uncertainty cannot be sliced.")
return self.uncertainty
def _slice_mask(self, item):
if self.mask is None:
return None
try:
return self.mask[item]
except TypeError:
log.info("mask cannot be sliced.")
return self.mask
def _slice_wcs(self, item):
if self.wcs is None:
return None
try:
llwcs = SlicedLowLevelWCS(self.wcs.low_level_wcs, item)
return HighLevelWCSWrapper(llwcs)
except Exception as err:
self._handle_wcs_slicing_error(err, item)
# Implement this in a method to allow subclasses to customise the error.
def _handle_wcs_slicing_error(self, err, item):
raise ValueError(
f"Slicing the WCS object with the slice '{item}' "
"failed, if you want to slice the NDData object without the WCS, you "
"can remove by setting `NDData.wcs = None` and then retry."
) from err
|
b75dddf7259836f4293dd9fcde7e3bbcd11a30219a444d2a79d09452c6877efa | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# This module implements the Arithmetic mixin to the NDData class.
import warnings
from copy import deepcopy
import numpy as np
from astropy.nddata.nduncertainty import NDUncertainty
from astropy.units import dimensionless_unscaled
from astropy.utils import format_doc, sharedmethod
from astropy.utils.exceptions import AstropyUserWarning
from astropy.utils.masked import Masked
__all__ = ["NDArithmeticMixin"]
# Global so it doesn't pollute the class dict unnecessarily:
# Docstring templates for add, subtract, multiply, divide methods.
_arit_doc = """
Performs {name} by evaluating ``self`` {op} ``operand``.
Parameters
----------
operand, operand2 : `NDData`-like instance
If ``operand2`` is ``None`` or not given it will perform the operation
``self`` {op} ``operand``.
If ``operand2`` is given it will perform ``operand`` {op} ``operand2``.
If the method was called on a class rather than on the instance
``operand2`` must be given.
propagate_uncertainties : `bool` or ``None``, optional
If ``None`` the result will have no uncertainty. If ``False`` the
result will have a copied version of the first operand that has an
uncertainty. If ``True`` the result will have a correctly propagated
uncertainty from the uncertainties of the operands but this assumes
that the uncertainties are `NDUncertainty`-like. Default is ``True``.
.. versionchanged:: 1.2
This parameter must be given as keyword-parameter. Using it as
positional parameter is deprecated.
``None`` was added as valid parameter value.
handle_mask : callable, ``'first_found'`` or ``None``, optional
If ``None`` the result will have no mask. If ``'first_found'`` the
result will have a copied version of the first operand that has a
mask). If it is a callable then the specified callable must
create the results ``mask`` and if necessary provide a copy.
Default is `numpy.logical_or`.
.. versionadded:: 1.2
handle_meta : callable, ``'first_found'`` or ``None``, optional
If ``None`` the result will have no meta. If ``'first_found'`` the
result will have a copied version of the first operand that has a
(not empty) meta. If it is a callable then the specified callable must
create the results ``meta`` and if necessary provide a copy.
Default is ``None``.
.. versionadded:: 1.2
compare_wcs : callable, ``'first_found'`` or ``None``, optional
If ``None`` the result will have no wcs and no comparison between
the wcs of the operands is made. If ``'first_found'`` the
result will have a copied version of the first operand that has a
wcs. If it is a callable then the specified callable must
compare the ``wcs``. The resulting ``wcs`` will be like if ``False``
was given otherwise it raises a ``ValueError`` if the comparison was
not successful. Default is ``'first_found'``.
.. versionadded:: 1.2
uncertainty_correlation : number or `~numpy.ndarray`, optional
The correlation between the two operands is used for correct error
propagation for correlated data as given in:
https://en.wikipedia.org/wiki/Propagation_of_uncertainty#Example_formulas
Default is 0.
.. versionadded:: 1.2
kwargs :
Any other parameter that should be passed to the callables used.
Returns
-------
result : `~astropy.nddata.NDData`-like
The resulting dataset
Notes
-----
If a ``callable`` is used for ``mask``, ``wcs`` or ``meta`` the
callable must accept the corresponding attributes as first two
parameters. If the callable also needs additional parameters these can be
defined as ``kwargs`` and must start with ``"wcs_"`` (for wcs callable) or
``"meta_"`` (for meta callable). This startstring is removed before the
callable is called.
``"first_found"`` can also be abbreviated with ``"ff"``.
"""
class NDArithmeticMixin:
"""
Mixin class to add arithmetic to an NDData object.
When subclassing, be sure to list the superclasses in the correct order
so that the subclass sees NDData as the main superclass. See
`~astropy.nddata.NDDataArray` for an example.
Notes
-----
This class only aims at covering the most common cases so there are certain
restrictions on the saved attributes::
- ``uncertainty`` : has to be something that has a `NDUncertainty`-like
interface for uncertainty propagation
- ``mask`` : has to be something that can be used by a bitwise ``or``
operation.
- ``wcs`` : has to implement a way of comparing with ``=`` to allow
the operation.
But there is a workaround that allows to disable handling a specific
attribute and to simply set the results attribute to ``None`` or to
copy the existing attribute (and neglecting the other).
For example for uncertainties not representing an `NDUncertainty`-like
interface you can alter the ``propagate_uncertainties`` parameter in
:meth:`NDArithmeticMixin.add`. ``None`` means that the result will have no
uncertainty, ``False`` means it takes the uncertainty of the first operand
(if this does not exist from the second operand) as the result's
uncertainty. This behavior is also explained in the docstring for the
different arithmetic operations.
Decomposing the units is not attempted, mainly due to the internal mechanics
of `~astropy.units.Quantity`, so the resulting data might have units like
``km/m`` if you divided for example 100km by 5m. So this Mixin has adopted
this behavior.
Examples
--------
Using this Mixin with `~astropy.nddata.NDData`:
>>> from astropy.nddata import NDData, NDArithmeticMixin
>>> class NDDataWithMath(NDArithmeticMixin, NDData):
... pass
Using it with one operand on an instance::
>>> ndd = NDDataWithMath(100)
>>> ndd.add(20)
NDDataWithMath(120)
Using it with two operand on an instance::
>>> ndd = NDDataWithMath(-4)
>>> ndd.divide(1, ndd)
NDDataWithMath(-0.25)
Using it as classmethod requires two operands::
>>> NDDataWithMath.subtract(5, 4)
NDDataWithMath(1)
"""
def _arithmetic(
self,
operation,
operand,
propagate_uncertainties=True,
handle_mask=np.logical_or,
handle_meta=None,
uncertainty_correlation=0,
compare_wcs="first_found",
operation_ignores_mask=False,
axis=None,
**kwds,
):
"""
Base method which calculates the result of the arithmetic operation.
This method determines the result of the arithmetic operation on the
``data`` including their units and then forwards to other methods
to calculate the other properties for the result (like uncertainty).
Parameters
----------
operation : callable
The operation that is performed on the `NDData`. Supported are
`numpy.add`, `numpy.subtract`, `numpy.multiply` and
`numpy.true_divide`.
operand : same type (class) as self
see :meth:`NDArithmeticMixin.add`
propagate_uncertainties : `bool` or ``None``, optional
see :meth:`NDArithmeticMixin.add`
handle_mask : callable, ``'first_found'`` or ``None``, optional
see :meth:`NDArithmeticMixin.add`
handle_meta : callable, ``'first_found'`` or ``None``, optional
see :meth:`NDArithmeticMixin.add`
compare_wcs : callable, ``'first_found'`` or ``None``, optional
see :meth:`NDArithmeticMixin.add`
uncertainty_correlation : ``Number`` or `~numpy.ndarray`, optional
see :meth:`NDArithmeticMixin.add`
operation_ignores_mask : bool, optional
When True, masked values will be excluded from operations;
otherwise the operation will be performed on all values,
including masked ones.
axis : int or tuple of ints, optional
axis or axes over which to perform collapse operations like min, max, sum or mean.
kwargs :
Any other parameter that should be passed to the
different :meth:`NDArithmeticMixin._arithmetic_mask` (or wcs, ...)
methods.
Returns
-------
result : ndarray or `~astropy.units.Quantity`
The resulting data as array (in case both operands were without
unit) or as quantity if at least one had a unit.
kwargs : `dict`
The kwargs should contain all the other attributes (besides data
and unit) needed to create a new instance for the result. Creating
the new instance is up to the calling method, for example
:meth:`NDArithmeticMixin.add`.
"""
# Find the appropriate keywords for the appropriate method (not sure
# if data and uncertainty are ever used ...)
kwds2 = {"mask": {}, "meta": {}, "wcs": {}, "data": {}, "uncertainty": {}}
for i in kwds:
splitted = i.split("_", 1)
try:
kwds2[splitted[0]][splitted[1]] = kwds[i]
except KeyError:
raise KeyError(f"Unknown prefix {splitted[0]} for parameter {i}")
kwargs = {}
# First check that the WCS allows the arithmetic operation
if compare_wcs is None:
kwargs["wcs"] = None
elif compare_wcs in ["ff", "first_found"]:
if self.wcs is None and hasattr(operand, "wcs"):
kwargs["wcs"] = deepcopy(operand.wcs)
else:
kwargs["wcs"] = deepcopy(self.wcs)
else:
kwargs["wcs"] = self._arithmetic_wcs(
operation, operand, compare_wcs, **kwds2["wcs"]
)
# collapse operations on masked quantities/arrays which are supported by
# the astropy.utils.masked or np.ma modules should use those modules to
# do the arithmetic on the data and propagate masks.
use_masked_arith = operand is None and self.mask is not None
if use_masked_arith:
# if we're *including* masked values in the operation,
# use the astropy Masked module:
if not operation_ignores_mask:
# call the numpy operation on a Masked NDDataArray
# representation of the nddata, with units when available:
if self.unit is not None and not hasattr(self.data, "unit"):
masked_input = Masked(self.data << self.unit, mask=self.mask)
else:
masked_input = Masked(self.data, mask=self.mask)
# if we're *excluding* masked values in the operation,
# we use the numpy.ma module:
else:
masked_input = np.ma.masked_array(self.data, self.mask)
result = operation(masked_input, axis=axis)
# since result may be e.g. a float if operation is a sum over all axes,
# let's ensure that result is a masked array, since we'll assume this later:
if not hasattr(result, "mask"):
result = np.ma.masked_array(
result, mask=np.zeros_like(result, dtype=bool)
)
else:
# Then calculate the resulting data (which can but needs not be a
# quantity)
result = self._arithmetic_data(
operation, operand, axis=axis, **kwds2["data"]
)
# preserve original units
if not hasattr(result, "unit") and hasattr(self, "unit"):
kwargs["unit"] = self.unit
# Determine the other properties
if propagate_uncertainties is None:
kwargs["uncertainty"] = None
elif not propagate_uncertainties:
if self.uncertainty is None:
kwargs["uncertainty"] = deepcopy(operand.uncertainty)
else:
kwargs["uncertainty"] = deepcopy(self.uncertainty)
else:
kwargs["uncertainty"] = self._arithmetic_uncertainty(
operation,
operand,
result,
uncertainty_correlation,
axis=axis,
**kwds2["uncertainty"],
)
# If both are None, there is nothing to do.
if self.psf is not None or (operand is not None and operand.psf is not None):
warnings.warn(
f"Not setting psf attribute during {operation.__name__}.",
AstropyUserWarning,
)
if handle_mask is None:
pass
elif hasattr(result, "mask"):
# if numpy.ma or astropy.utils.masked is being used, the constructor
# will pick up the mask from the masked object:
kwargs["mask"] = None
elif handle_mask in ["ff", "first_found"]:
if self.mask is None:
kwargs["mask"] = deepcopy(operand.mask)
else:
kwargs["mask"] = deepcopy(self.mask)
else:
kwargs["mask"] = self._arithmetic_mask(
operation, operand, handle_mask, axis=axis, **kwds2["mask"]
)
if handle_meta is None:
kwargs["meta"] = None
elif handle_meta in ["ff", "first_found"]:
if not self.meta:
kwargs["meta"] = deepcopy(operand.meta)
else:
kwargs["meta"] = deepcopy(self.meta)
else:
kwargs["meta"] = self._arithmetic_meta(
operation, operand, handle_meta, **kwds2["meta"]
)
# Wrap the individual results into a new instance of the same class.
return result, kwargs
def _arithmetic_data(self, operation, operand, **kwds):
"""
Calculate the resulting data.
Parameters
----------
operation : callable
see `NDArithmeticMixin._arithmetic` parameter description.
operand : `NDData`-like instance
The second operand wrapped in an instance of the same class as
self.
kwds :
Additional parameters.
Returns
-------
result_data : ndarray or `~astropy.units.Quantity`
If both operands had no unit the resulting data is a simple numpy
array, but if any of the operands had a unit the return is a
Quantity.
"""
# Do the calculation with or without units
if self.unit is None:
if operand.unit is None:
result = operation(self.data, operand.data)
else:
result = operation(
self.data << dimensionless_unscaled, operand.data << operand.unit
)
elif hasattr(operand, "unit"):
if operand.unit is not None:
result = operation(self.data << self.unit, operand.data << operand.unit)
else:
result = operation(
self.data << self.unit, operand.data << dimensionless_unscaled
)
elif operand is not None:
result = operation(self.data << self.unit, operand.data << operand.unit)
else:
result = operation(self.data, axis=kwds["axis"])
return result
def _arithmetic_uncertainty(self, operation, operand, result, correlation, **kwds):
"""
Calculate the resulting uncertainty.
Parameters
----------
operation : callable
see :meth:`NDArithmeticMixin._arithmetic` parameter description.
operand : `NDData`-like instance
The second operand wrapped in an instance of the same class as
self.
result : `~astropy.units.Quantity` or `~numpy.ndarray`
The result of :meth:`NDArithmeticMixin._arithmetic_data`.
correlation : number or `~numpy.ndarray`
see :meth:`NDArithmeticMixin.add` parameter description.
kwds :
Additional parameters.
Returns
-------
result_uncertainty : `NDUncertainty` subclass instance or None
The resulting uncertainty already saved in the same `NDUncertainty`
subclass that ``self`` had (or ``operand`` if self had no
uncertainty). ``None`` only if both had no uncertainty.
"""
# Make sure these uncertainties are NDUncertainties so this kind of
# propagation is possible.
if self.uncertainty is not None and not isinstance(
self.uncertainty, NDUncertainty
):
raise TypeError(
"Uncertainty propagation is only defined for "
"subclasses of NDUncertainty."
)
if (
operand is not None
and operand.uncertainty is not None
and not isinstance(operand.uncertainty, NDUncertainty)
):
raise TypeError(
"Uncertainty propagation is only defined for "
"subclasses of NDUncertainty."
)
# Now do the uncertainty propagation
# TODO: There is no enforced requirement that actually forbids the
# uncertainty to have negative entries but with correlation the
# sign of the uncertainty DOES matter.
if self.uncertainty is None and (
not hasattr(operand, "uncertainty") or operand.uncertainty is None
):
# Neither has uncertainties so the result should have none.
return None
elif self.uncertainty is None:
# Create a temporary uncertainty to allow uncertainty propagation
# to yield the correct results. (issue #4152)
self.uncertainty = operand.uncertainty.__class__(None)
result_uncert = self.uncertainty.propagate(
operation, operand, result, correlation
)
# Delete the temporary uncertainty again.
self.uncertainty = None
return result_uncert
elif operand is not None and operand.uncertainty is None:
# As with self.uncertainty is None but the other way around.
operand.uncertainty = self.uncertainty.__class__(None)
result_uncert = self.uncertainty.propagate(
operation, operand, result, correlation
)
operand.uncertainty = None
return result_uncert
else:
# Both have uncertainties so just propagate.
# only supply the axis kwarg if one has been specified for a collapsing operation
axis_kwarg = dict(axis=kwds["axis"]) if "axis" in kwds else dict()
return self.uncertainty.propagate(
operation, operand, result, correlation, **axis_kwarg
)
def _arithmetic_mask(self, operation, operand, handle_mask, axis=None, **kwds):
"""
Calculate the resulting mask.
This is implemented as the piecewise ``or`` operation if both have a
mask.
Parameters
----------
operation : callable
see :meth:`NDArithmeticMixin._arithmetic` parameter description.
By default, the ``operation`` will be ignored.
operand : `NDData`-like instance
The second operand wrapped in an instance of the same class as
self.
handle_mask : callable
see :meth:`NDArithmeticMixin.add`
kwds :
Additional parameters given to ``handle_mask``.
Returns
-------
result_mask : any type
If only one mask was present this mask is returned.
If neither had a mask ``None`` is returned. Otherwise
``handle_mask`` must create (and copy) the returned mask.
"""
# If only one mask is present we need not bother about any type checks
if (
self.mask is None and operand is not None and operand.mask is None
) or handle_mask is None:
return None
elif self.mask is None and operand is not None:
# Make a copy so there is no reference in the result.
return deepcopy(operand.mask)
elif operand is None:
return deepcopy(self.mask)
else:
# Now lets calculate the resulting mask (operation enforces copy)
return handle_mask(self.mask, operand.mask, **kwds)
def _arithmetic_wcs(self, operation, operand, compare_wcs, **kwds):
"""
Calculate the resulting wcs.
There is actually no calculation involved but it is a good place to
compare wcs information of both operands. This is currently not working
properly with `~astropy.wcs.WCS` (which is the suggested class for
storing as wcs property) but it will not break it neither.
Parameters
----------
operation : callable
see :meth:`NDArithmeticMixin._arithmetic` parameter description.
By default, the ``operation`` will be ignored.
operand : `NDData` instance or subclass
The second operand wrapped in an instance of the same class as
self.
compare_wcs : callable
see :meth:`NDArithmeticMixin.add` parameter description.
kwds :
Additional parameters given to ``compare_wcs``.
Raises
------
ValueError
If ``compare_wcs`` returns ``False``.
Returns
-------
result_wcs : any type
The ``wcs`` of the first operand is returned.
"""
# ok, not really arithmetic but we need to check which wcs makes sense
# for the result and this is an ideal place to compare the two WCS,
# too.
# I'll assume that the comparison returned None or False in case they
# are not equal.
if not compare_wcs(self.wcs, operand.wcs, **kwds):
raise ValueError("WCS are not equal.")
return deepcopy(self.wcs)
def _arithmetic_meta(self, operation, operand, handle_meta, **kwds):
"""
Calculate the resulting meta.
Parameters
----------
operation : callable
see :meth:`NDArithmeticMixin._arithmetic` parameter description.
By default, the ``operation`` will be ignored.
operand : `NDData`-like instance
The second operand wrapped in an instance of the same class as
self.
handle_meta : callable
see :meth:`NDArithmeticMixin.add`
kwds :
Additional parameters given to ``handle_meta``.
Returns
-------
result_meta : any type
The result of ``handle_meta``.
"""
# Just return what handle_meta does with both of the metas.
return handle_meta(self.meta, operand.meta, **kwds)
@sharedmethod
@format_doc(_arit_doc, name="addition", op="+")
def add(self, operand, operand2=None, **kwargs):
return self._prepare_then_do_arithmetic(np.add, operand, operand2, **kwargs)
@sharedmethod
@format_doc(_arit_doc, name="subtraction", op="-")
def subtract(self, operand, operand2=None, **kwargs):
return self._prepare_then_do_arithmetic(
np.subtract, operand, operand2, **kwargs
)
@sharedmethod
@format_doc(_arit_doc, name="multiplication", op="*")
def multiply(self, operand, operand2=None, **kwargs):
return self._prepare_then_do_arithmetic(
np.multiply, operand, operand2, **kwargs
)
@sharedmethod
@format_doc(_arit_doc, name="division", op="/")
def divide(self, operand, operand2=None, **kwargs):
return self._prepare_then_do_arithmetic(
np.true_divide, operand, operand2, **kwargs
)
@sharedmethod
def sum(self, **kwargs):
return self._prepare_then_do_arithmetic(np.sum, **kwargs)
@sharedmethod
def mean(self, **kwargs):
return self._prepare_then_do_arithmetic(np.mean, **kwargs)
@sharedmethod
def min(self, **kwargs):
# use the provided propagate_uncertainties if available, otherwise default is False:
propagate_uncertainties = kwargs.pop("propagate_uncertainties", None)
return self._prepare_then_do_arithmetic(
np.min, propagate_uncertainties=propagate_uncertainties, **kwargs
)
@sharedmethod
def max(self, **kwargs):
# use the provided propagate_uncertainties if available, otherwise default is False:
propagate_uncertainties = kwargs.pop("propagate_uncertainties", None)
return self._prepare_then_do_arithmetic(
np.max, propagate_uncertainties=propagate_uncertainties, **kwargs
)
@sharedmethod
def _prepare_then_do_arithmetic(
self_or_cls, operation, operand=None, operand2=None, **kwargs
):
"""Intermediate method called by public arithmetic (i.e. ``add``)
before the processing method (``_arithmetic``) is invoked.
.. warning::
Do not override this method in subclasses.
This method checks if it was called as instance or as class method and
then wraps the operands and the result from ``_arithmetic`` in the
appropriate subclass.
Parameters
----------
self_or_cls : instance or class
``sharedmethod`` behaves like a normal method if called on the
instance (then this parameter is ``self``) but like a classmethod
when called on the class (then this parameter is ``cls``).
operations : callable
The operation (normally a numpy-ufunc) that represents the
appropriate action.
operand, operand2, kwargs :
See for example ``add``.
Result
------
result : `~astropy.nddata.NDData`-like
Depending how this method was called either ``self_or_cls``
(called on class) or ``self_or_cls.__class__`` (called on instance)
is the NDData-subclass that is used as wrapper for the result.
"""
# DO NOT OVERRIDE THIS METHOD IN SUBCLASSES.
if isinstance(self_or_cls, NDArithmeticMixin):
# True means it was called on the instance, so self_or_cls is
# a reference to self
cls = self_or_cls.__class__
if operand2 is None:
# Only one operand was given. Set operand2 to operand and
# operand to self so that we call the appropriate method of the
# operand.
operand2 = operand
operand = self_or_cls
else:
# Convert the first operand to the class of this method.
# This is important so that always the correct _arithmetics is
# called later that method.
operand = cls(operand)
else:
# It was used as classmethod so self_or_cls represents the cls
cls = self_or_cls
# It was called on the class so we expect two operands!
if operand2 is None:
raise TypeError(
"operand2 must be given when the method isn't "
"called on an instance."
)
# Convert to this class. See above comment why.
operand = cls(operand)
# At this point operand, operand2, kwargs and cls are determined.
if operand2 is not None and not issubclass(
operand2.__class__, NDArithmeticMixin
):
# Let's try to convert operand2 to the class of operand to allow for
# arithmetic operations with numbers, lists, numpy arrays, numpy masked
# arrays, astropy quantities, masked quantities and of other subclasses
# of NDData.
operand2 = cls(operand2)
# Now call the _arithmetics method to do the arithmetic.
result, init_kwds = operand._arithmetic(operation, operand2, **kwargs)
elif issubclass(operand2.__class__, NDArithmeticMixin):
# calling as class method:
result, init_kwds = cls._arithmetic(
operand,
operation,
operand2,
**kwargs,
)
else:
# otherwise call the _arithmetic method on self for a collapse operation:
# for collapse operations, use astropy.utils.masked rather than handle_mask
result, init_kwds = self_or_cls._arithmetic(
operation,
operand2,
**kwargs,
)
# Return a new class based on the result
return cls(result, **init_kwds)
|
2da7ae81613ab2d812961ec7baf83f4c34c468502b85ccd64625b657b836854b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# This module implements the I/O mixin to the NDData class.
from astropy.io import registry
__all__ = ["NDIOMixin"]
__doctest_skip__ = ["NDDataRead", "NDDataWrite"]
class NDDataRead(registry.UnifiedReadWrite):
"""Read and parse gridded N-dimensional data and return as an NDData-derived
object.
This function provides the NDDataBase interface to the astropy unified I/O
layer. This allows easily reading a file in the supported data formats,
for example::
>>> from astropy.nddata import CCDData
>>> dat = CCDData.read('image.fits')
Get help on the available readers for ``CCDData`` using the``help()`` method::
>>> CCDData.read.help() # Get help reading CCDData and list supported formats
>>> CCDData.read.help('fits') # Get detailed help on CCDData FITS reader
>>> CCDData.read.list_formats() # Print list of available formats
For more information see:
- https://docs.astropy.org/en/stable/nddata
- https://docs.astropy.org/en/stable/io/unified.html
Parameters
----------
*args : tuple, optional
Positional arguments passed through to data reader. If supplied the
first argument is the input filename.
format : str, optional
File format specifier.
cache : bool, optional
Caching behavior if file is a URL.
**kwargs : dict, optional
Keyword arguments passed through to data reader.
Returns
-------
out : `NDData` subclass
NDData-basd object corresponding to file contents
Notes
-----
"""
def __init__(self, instance, cls):
super().__init__(instance, cls, "read", registry=None)
# uses default global registry
def __call__(self, *args, **kwargs):
return self.registry.read(self._cls, *args, **kwargs)
class NDDataWrite(registry.UnifiedReadWrite):
"""Write this CCDData object out in the specified format.
This function provides the NDData interface to the astropy unified I/O
layer. This allows easily writing a file in many supported data formats
using syntax such as::
>>> from astropy.nddata import CCDData
>>> dat = CCDData(np.zeros((12, 12)), unit='adu') # 12x12 image of zeros
>>> dat.write('zeros.fits')
Get help on the available writers for ``CCDData`` using the``help()`` method::
>>> CCDData.write.help() # Get help writing CCDData and list supported formats
>>> CCDData.write.help('fits') # Get detailed help on CCDData FITS writer
>>> CCDData.write.list_formats() # Print list of available formats
For more information see:
- https://docs.astropy.org/en/stable/nddata
- https://docs.astropy.org/en/stable/io/unified.html
Parameters
----------
*args : tuple, optional
Positional arguments passed through to data writer. If supplied the
first argument is the output filename.
format : str, optional
File format specifier.
**kwargs : dict, optional
Keyword arguments passed through to data writer.
Notes
-----
"""
def __init__(self, instance, cls):
super().__init__(instance, cls, "write", registry=None)
# uses default global registry
def __call__(self, *args, **kwargs):
self.registry.write(self._instance, *args, **kwargs)
class NDIOMixin:
"""
Mixin class to connect NDData to the astropy input/output registry.
This mixin adds two methods to its subclasses, ``read`` and ``write``.
"""
read = registry.UnifiedReadWriteMethod(NDDataRead)
write = registry.UnifiedReadWriteMethod(NDDataWrite)
|
55c87594b88874565c83a9cf01e465361bc0737bac2aa8ef7546d444ef1b4578 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_array_equal
from packaging.version import Version
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.nddata import (
CCDData,
Cutout2D,
NoOverlapError,
PartialOverlapError,
add_array,
extract_array,
overlap_slices,
subpixel_indices,
)
from astropy.tests.helper import assert_quantity_allclose
from astropy.wcs import WCS, Sip
from astropy.wcs.utils import proj_plane_pixel_area
test_positions = [
(10.52, 3.12),
(5.62, 12.97),
(31.33, 31.77),
(0.46, 0.94),
(20.45, 12.12),
(42.24, 24.42),
]
test_position_indices = [(0, 3), (0, 2), (4, 1), (4, 2), (4, 3), (3, 4)]
test_slices = [
slice(10.52, 3.12),
slice(5.62, 12.97),
slice(31.33, 31.77),
slice(0.46, 0.94),
slice(20.45, 12.12),
slice(42.24, 24.42),
]
subsampling = 5
test_pos_bad = [(-1, -4), (-2, 0), (6, 2), (6, 6)]
test_nonfinite_positions = [
(np.nan, np.nan),
(np.inf, np.inf),
(1, np.nan),
(np.nan, 2),
(2, -np.inf),
(-np.inf, 3),
]
def test_slices_different_dim():
"""Overlap from arrays with different number of dim is undefined."""
with pytest.raises(ValueError, match=".*the same number of dimensions.*"):
overlap_slices((4, 5, 6), (1, 2), (0, 0))
def test_slices_pos_different_dim():
"""Position must have same dim as arrays."""
with pytest.raises(ValueError, match=".*the same number of dimensions.*"):
overlap_slices((4, 5), (1, 2), (0, 0, 3))
@pytest.mark.parametrize("pos", test_pos_bad)
def test_slices_no_overlap(pos):
"""If there is no overlap between arrays, an error should be raised."""
with pytest.raises(NoOverlapError):
overlap_slices((5, 5), (2, 2), pos)
def test_slices_partial_overlap():
"""Compute a slice for partially overlapping arrays."""
temp = overlap_slices((5,), (3,), (0,))
assert temp == ((slice(0, 2, None),), (slice(1, 3, None),))
temp = overlap_slices((5,), (3,), (0,), mode="partial")
assert temp == ((slice(0, 2, None),), (slice(1, 3, None),))
for pos in [0, 4]:
with pytest.raises(
PartialOverlapError, match=".*Arrays overlap only partially.*"
):
temp = overlap_slices((5,), (3,), (pos,), mode="strict")
def test_slices_edges():
"""
Test overlap_slices when extracting along edges.
"""
slc_lg, slc_sm = overlap_slices((10, 10), (3, 3), (1, 1), mode="strict")
assert slc_lg[0].start == slc_lg[1].start == 0
assert slc_lg[0].stop == slc_lg[1].stop == 3
assert slc_sm[0].start == slc_sm[1].start == 0
assert slc_sm[0].stop == slc_sm[1].stop == 3
slc_lg, slc_sm = overlap_slices((10, 10), (3, 3), (8, 8), mode="strict")
assert slc_lg[0].start == slc_lg[1].start == 7
assert slc_lg[0].stop == slc_lg[1].stop == 10
assert slc_sm[0].start == slc_sm[1].start == 0
assert slc_sm[0].stop == slc_sm[1].stop == 3
# test (0, 0) shape
slc_lg, slc_sm = overlap_slices((10, 10), (0, 0), (0, 0))
assert slc_lg[0].start == slc_lg[0].stop == 0
assert slc_lg[1].start == slc_lg[1].stop == 0
assert slc_sm[0].start == slc_sm[0].stop == 0
assert slc_sm[1].start == slc_sm[1].stop == 0
slc_lg, slc_sm = overlap_slices((10, 10), (0, 0), (5, 5))
assert slc_lg[0].start == slc_lg[0].stop == 5
assert slc_lg[1].start == slc_lg[1].stop == 5
assert slc_sm[0].start == slc_sm[0].stop == 0
assert slc_sm[1].start == slc_sm[1].stop == 0
def test_slices_overlap_wrong_mode():
"""Call overlap_slices with non-existing mode."""
with pytest.raises(ValueError, match="^Mode can be only.*"):
overlap_slices((5,), (3,), (0,), mode="full")
@pytest.mark.parametrize("position", test_nonfinite_positions)
def test_slices_nonfinite_position(position):
"""
A ValueError should be raised if position contains a non-finite
value.
"""
with pytest.raises(ValueError):
overlap_slices((7, 7), (3, 3), position)
def test_extract_array_even_shape_rounding():
"""
Test overlap_slices (via extract_array) for rounding with an
even-shaped extraction.
"""
data = np.arange(10)
shape = (2,)
positions_expected = [
(1.49, (1, 2)),
(1.5, (1, 2)),
(1.501, (1, 2)),
(1.99, (1, 2)),
(2.0, (1, 2)),
(2.01, (2, 3)),
(2.49, (2, 3)),
(2.5, (2, 3)),
(2.501, (2, 3)),
(2.99, (2, 3)),
(3.0, (2, 3)),
(3.01, (3, 4)),
]
for pos, exp in positions_expected:
out = extract_array(data, shape, (pos,), mode="partial")
assert_array_equal(out, exp)
# test negative positions
positions = (-0.99, -0.51, -0.5, -0.49, -0.01, 0)
exp1 = (-99, 0)
exp2 = (0, 1)
expected = [exp1] * 6 + [exp2]
for pos, exp in zip(positions, expected):
out = extract_array(data, shape, (pos,), mode="partial", fill_value=-99)
assert_array_equal(out, exp)
def test_extract_array_odd_shape_rounding():
"""
Test overlap_slices (via extract_array) for rounding with an
even-shaped extraction.
"""
data = np.arange(10)
shape = (3,)
positions_expected = [
(1.49, (0, 1, 2)),
(1.5, (0, 1, 2)),
(1.501, (1, 2, 3)),
(1.99, (1, 2, 3)),
(2.0, (1, 2, 3)),
(2.01, (1, 2, 3)),
(2.49, (1, 2, 3)),
(2.5, (1, 2, 3)),
(2.501, (2, 3, 4)),
(2.99, (2, 3, 4)),
(3.0, (2, 3, 4)),
(3.01, (2, 3, 4)),
]
for pos, exp in positions_expected:
out = extract_array(data, shape, (pos,), mode="partial")
assert_array_equal(out, exp)
# test negative positions
positions = (-0.99, -0.51, -0.5, -0.49, -0.01, 0)
exp1 = (-99, -99, 0)
exp2 = (-99, 0, 1)
expected = [exp1] * 3 + [exp2] * 4
for pos, exp in zip(positions, expected):
out = extract_array(data, shape, (pos,), mode="partial", fill_value=-99)
assert_array_equal(out, exp)
def test_extract_array_wrong_mode():
"""Call extract_array with non-existing mode."""
with pytest.raises(
ValueError, match="Valid modes are 'partial', 'trim', and 'strict'."
):
extract_array(np.arange(4), (2,), (0,), mode="full")
def test_extract_array_1d_even():
"""Extract 1 d arrays.
All dimensions are treated the same, so we can test in 1 dim.
"""
assert np.all(
extract_array(np.arange(4), (2,), (0,), fill_value=-99) == np.array([-99, 0])
)
for i in [1, 2, 3]:
assert np.all(extract_array(np.arange(4), (2,), (i,)) == np.array([i - 1, i]))
assert np.all(
extract_array(np.arange(4.0), (2,), (4,), fill_value=np.inf)
== np.array([3, np.inf])
)
def test_extract_array_1d_odd():
"""Extract 1 d arrays.
All dimensions are treated the same, so we can test in 1 dim.
The first few lines test the most error-prone part: Extraction of an
array on the boundaries.
Additional tests (e.g. dtype of return array) are done for the last
case only.
"""
assert np.all(
extract_array(np.arange(4), (3,), (-1,), fill_value=-99)
== np.array([-99, -99, 0])
)
assert np.all(
extract_array(np.arange(4), (3,), (0,), fill_value=-99) == np.array([-99, 0, 1])
)
for i in [1, 2]:
assert np.all(
extract_array(np.arange(4), (3,), (i,)) == np.array([i - 1, i, i + 1])
)
assert np.all(
extract_array(np.arange(4), (3,), (3,), fill_value=-99) == np.array([2, 3, -99])
)
arrayin = np.arange(4.0)
extracted = extract_array(arrayin, (3,), (4,))
assert extracted[0] == 3
assert np.isnan(extracted[1]) # since I cannot use `==` to test for nan
assert extracted.dtype == arrayin.dtype
def test_extract_array_1d():
"""In 1d, shape can be int instead of tuple"""
assert np.all(
extract_array(np.arange(4), 3, (-1,), fill_value=-99) == np.array([-99, -99, 0])
)
assert np.all(
extract_array(np.arange(4), 3, -1, fill_value=-99) == np.array([-99, -99, 0])
)
def test_extract_Array_float():
"""integer is at bin center"""
for a in np.arange(2.51, 3.49, 0.1):
assert np.all(extract_array(np.arange(5), 3, a) == np.array([2, 3, 4]))
def test_extract_array_1d_trim():
"""Extract 1 d arrays.
All dimensions are treated the same, so we can test in 1 dim.
"""
assert np.all(extract_array(np.arange(4), (2,), (0,), mode="trim") == np.array([0]))
for i in [1, 2, 3]:
assert np.all(
extract_array(np.arange(4), (2,), (i,), mode="trim") == np.array([i - 1, i])
)
assert np.all(
extract_array(np.arange(4.0), (2,), (4,), mode="trim") == np.array([3])
)
@pytest.mark.parametrize("mode", ["partial", "trim", "strict"])
def test_extract_array_easy(mode):
"""
Test extract_array utility function.
Test by extracting an array of ones out of an array of zeros.
"""
large_test_array = np.zeros((11, 11))
small_test_array = np.ones((5, 5))
large_test_array[3:8, 3:8] = small_test_array
extracted_array = extract_array(large_test_array, (5, 5), (5, 5), mode=mode)
assert np.all(extracted_array == small_test_array)
def test_extract_array_return_pos():
"""Check that the return position is calculated correctly.
The result will differ by mode. All test here are done in 1d because it's
easier to construct correct test cases.
"""
large_test_array = np.arange(5, dtype=float)
for i in np.arange(-1, 6):
extracted, new_pos = extract_array(
large_test_array, 3, i, mode="partial", return_position=True
)
assert new_pos == (1,)
# Now check an array with an even number
for i, expected in zip([1.49, 1.51, 3], [0.49, 0.51, 1]):
extracted, new_pos = extract_array(
large_test_array, (2,), (i,), mode="strict", return_position=True
)
assert new_pos == (expected,)
# For mode='trim' the answer actually depends
for i, expected in zip(np.arange(-1, 6), (-1, 0, 1, 1, 1, 1, 1)):
extracted, new_pos = extract_array(
large_test_array, (3,), (i,), mode="trim", return_position=True
)
assert new_pos == (expected,)
def test_extract_array_nan_fillvalue():
if Version(np.__version__) >= Version("1.20"):
msg = "fill_value cannot be set to np.nan if the input array has"
with pytest.raises(ValueError, match=msg):
extract_array(
np.ones((10, 10), dtype=int), (5, 5), (1, 1), fill_value=np.nan
)
def test_add_array_odd_shape():
"""
Test add_array utility function.
Test by adding an array of ones out of an array of zeros.
"""
large_test_array = np.zeros((11, 11))
small_test_array = np.ones((5, 5))
large_test_array_ref = large_test_array.copy()
large_test_array_ref[3:8, 3:8] += small_test_array
added_array = add_array(large_test_array, small_test_array, (5, 5))
assert np.all(added_array == large_test_array_ref)
def test_add_array_even_shape():
"""
Test add_array_2D utility function.
Test by adding an array of ones out of an array of zeros.
"""
large_test_array = np.zeros((11, 11))
small_test_array = np.ones((4, 4))
large_test_array_ref = large_test_array.copy()
large_test_array_ref[0:2, 0:2] += small_test_array[2:4, 2:4]
added_array = add_array(large_test_array, small_test_array, (0, 0))
assert np.all(added_array == large_test_array_ref)
def test_add_array_equal_shape():
"""
Test add_array_2D utility function.
Test by adding an array of ones out of an array of zeros.
"""
large_test_array = np.zeros((11, 11))
small_test_array = np.ones((11, 11))
large_test_array_ref = large_test_array.copy()
large_test_array_ref += small_test_array
added_array = add_array(large_test_array, small_test_array, (5, 5))
assert np.all(added_array == large_test_array_ref)
@pytest.mark.parametrize(
("position", "subpixel_index"), zip(test_positions, test_position_indices)
)
def test_subpixel_indices(position, subpixel_index):
"""
Test subpixel_indices utility function.
Test by asserting that the function returns correct results for
given test values.
"""
assert np.all(subpixel_indices(position, subsampling) == subpixel_index)
class TestCutout2D:
def setup_class(self):
self.data = np.arange(20.0).reshape(5, 4)
self.position = SkyCoord("13h11m29.96s -01d19m18.7s", frame="icrs")
wcs = WCS(naxis=2)
rho = np.pi / 3.0
scale = 0.05 / 3600.0
wcs.wcs.cd = [
[scale * np.cos(rho), -scale * np.sin(rho)],
[scale * np.sin(rho), scale * np.cos(rho)],
]
wcs.wcs.ctype = ["RA---TAN", "DEC--TAN"]
wcs.wcs.crval = [
self.position.ra.to_value(u.deg),
self.position.dec.to_value(u.deg),
]
wcs.wcs.crpix = [3, 3]
self.wcs = wcs
# add SIP
sipwcs = wcs.deepcopy()
sipwcs.wcs.ctype = ["RA---TAN-SIP", "DEC--TAN-SIP"]
a = np.array(
[
[0, 0, 5.33092692e-08, 3.73753773e-11, -2.02111473e-13],
[0, 2.44084308e-05, 2.81394789e-11, 5.17856895e-13, 0.0],
[-2.41334657e-07, 1.29289255e-10, 2.35753629e-14, 0.0, 0.0],
[-2.37162007e-10, 5.43714947e-13, 0.0, 0.0, 0.0],
[-2.81029767e-13, 0.0, 0.0, 0.0, 0.0],
]
)
b = np.array(
[
[0, 0, 2.99270374e-05, -2.38136074e-10, 7.23205168e-13],
[0, -1.71073858e-07, 6.31243431e-11, -5.16744347e-14, 0.0],
[6.95458963e-06, -3.08278961e-10, -1.75800917e-13, 0.0, 0.0],
[3.51974159e-11, 5.60993016e-14, 0.0, 0.0, 0.0],
[-5.92438525e-13, 0.0, 0.0, 0.0, 0.0],
]
)
sipwcs.sip = Sip(a, b, None, None, wcs.wcs.crpix)
sipwcs.wcs.set()
self.sipwcs = sipwcs
def test_cutout(self):
sizes = [
3,
3 * u.pixel,
(3, 3),
(3 * u.pixel, 3 * u.pix),
(3.0, 3 * u.pixel),
(2.9, 3.3),
]
for size in sizes:
position = (2.1, 1.9)
c = Cutout2D(self.data, position, size)
assert c.data.shape == (3, 3)
assert c.data[1, 1] == 10
assert c.origin_original == (1, 1)
assert c.origin_cutout == (0, 0)
assert c.input_position_original == position
assert_allclose(c.input_position_cutout, (1.1, 0.9))
assert c.position_original == (2.0, 2.0)
assert c.position_cutout == (1.0, 1.0)
assert c.center_original == (2.0, 2.0)
assert c.center_cutout == (1.0, 1.0)
assert c.bbox_original == ((1, 3), (1, 3))
assert c.bbox_cutout == ((0, 2), (0, 2))
assert c.slices_original == (slice(1, 4), slice(1, 4))
assert c.slices_cutout == (slice(0, 3), slice(0, 3))
def test_size_length(self):
with pytest.raises(ValueError):
Cutout2D(self.data, (2, 2), (1, 1, 1))
def test_size_units(self):
for size in [3 * u.cm, (3, 3 * u.K)]:
with pytest.raises(ValueError):
Cutout2D(self.data, (2, 2), size)
def test_size_pixel(self):
"""
Check size in derived pixel units.
"""
size = 0.3 * u.arcsec / (0.1 * u.arcsec / u.pixel)
c = Cutout2D(self.data, (2, 2), size)
assert c.data.shape == (3, 3)
assert c.data[0, 0] == 5
assert c.slices_original == (slice(1, 4), slice(1, 4))
assert c.slices_cutout == (slice(0, 3), slice(0, 3))
def test_size_angle(self):
c = Cutout2D(self.data, (2, 2), (0.1 * u.arcsec), wcs=self.wcs)
assert c.data.shape == (2, 2)
assert c.data[0, 0] == 5
assert c.slices_original == (slice(1, 3), slice(1, 3))
assert c.slices_cutout == (slice(0, 2), slice(0, 2))
def test_size_angle_without_wcs(self):
with pytest.raises(ValueError):
Cutout2D(self.data, (2, 2), (3, 3 * u.arcsec))
def test_cutout_trim_overlap(self):
c = Cutout2D(self.data, (0, 0), (3, 3), mode="trim")
assert c.data.shape == (2, 2)
assert c.data[0, 0] == 0
assert c.slices_original == (slice(0, 2), slice(0, 2))
assert c.slices_cutout == (slice(0, 2), slice(0, 2))
def test_cutout_partial_overlap(self):
c = Cutout2D(self.data, (0, 0), (3, 3), mode="partial")
assert c.data.shape == (3, 3)
assert c.data[1, 1] == 0
assert c.slices_original == (slice(0, 2), slice(0, 2))
assert c.slices_cutout == (slice(1, 3), slice(1, 3))
def test_cutout_partial_overlap_fill_value(self):
fill_value = -99
c = Cutout2D(self.data, (0, 0), (3, 3), mode="partial", fill_value=fill_value)
assert c.data.shape == (3, 3)
assert c.data[1, 1] == 0
assert c.data[0, 0] == fill_value
def test_copy(self):
data = np.copy(self.data)
c = Cutout2D(data, (2, 3), (3, 3))
xy = (0, 0)
value = 100.0
c.data[xy] = value
xy_orig = c.to_original_position(xy)
yx = xy_orig[::-1]
assert data[yx] == value
data = np.copy(self.data)
c2 = Cutout2D(self.data, (2, 3), (3, 3), copy=True)
c2.data[xy] = value
assert data[yx] != value
def test_to_from_large(self):
position = (2, 2)
c = Cutout2D(self.data, position, (3, 3))
xy = (0, 0)
result = c.to_cutout_position(c.to_original_position(xy))
assert_allclose(result, xy)
def test_skycoord_without_wcs(self):
with pytest.raises(ValueError):
Cutout2D(self.data, self.position, (3, 3))
def test_skycoord(self):
c = Cutout2D(self.data, self.position, (3, 3), wcs=self.wcs)
skycoord_original = self.position.from_pixel(
c.center_original[1], c.center_original[0], self.wcs
)
skycoord_cutout = self.position.from_pixel(
c.center_cutout[1], c.center_cutout[0], c.wcs
)
assert_quantity_allclose(skycoord_original.ra, skycoord_cutout.ra)
assert_quantity_allclose(skycoord_original.dec, skycoord_cutout.dec)
def test_skycoord_partial(self):
c = Cutout2D(self.data, self.position, (3, 3), wcs=self.wcs, mode="partial")
skycoord_original = self.position.from_pixel(
c.center_original[1], c.center_original[0], self.wcs
)
skycoord_cutout = self.position.from_pixel(
c.center_cutout[1], c.center_cutout[0], c.wcs
)
assert_quantity_allclose(skycoord_original.ra, skycoord_cutout.ra)
assert_quantity_allclose(skycoord_original.dec, skycoord_cutout.dec)
def test_naxis_update(self):
xsize = 2
ysize = 3
c = Cutout2D(self.data, self.position, (ysize, xsize), wcs=self.wcs)
assert c.wcs.array_shape == (ysize, xsize)
def test_crpix_maps_to_crval(self):
w = Cutout2D(self.data, (0, 0), (3, 3), wcs=self.sipwcs, mode="partial").wcs
pscale = np.sqrt(proj_plane_pixel_area(w))
assert_allclose(
w.wcs_pix2world(*w.wcs.crpix, 1), w.wcs.crval, rtol=0.0, atol=1e-6 * pscale
)
assert_allclose(
w.all_pix2world(*w.wcs.crpix, 1), w.wcs.crval, rtol=0.0, atol=1e-6 * pscale
)
def test_cutout_with_nddata_as_input(self):
# This is essentially a copy/paste of test_skycoord with the
# input a ccd with wcs attribute instead of passing the
# wcs separately.
ccd = CCDData(data=self.data, wcs=self.wcs, unit="adu")
c = Cutout2D(ccd, self.position, (3, 3))
skycoord_original = self.position.from_pixel(
c.center_original[1], c.center_original[0], self.wcs
)
skycoord_cutout = self.position.from_pixel(
c.center_cutout[1], c.center_cutout[0], c.wcs
)
assert_quantity_allclose(skycoord_original.ra, skycoord_cutout.ra)
assert_quantity_allclose(skycoord_original.dec, skycoord_cutout.dec)
|
bdc257a75e12ef1272f02fdd0e83e3e6aa0dcceb7e287de84416d20918b09cd9 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pickle
import textwrap
from collections import OrderedDict
from itertools import chain, permutations
import numpy as np
import pytest
from numpy.testing import assert_array_equal
from astropy import units as u
from astropy.nddata import NDDataArray
from astropy.nddata import _testing as nd_testing
from astropy.nddata.nddata import NDData
from astropy.nddata.nduncertainty import StdDevUncertainty
from astropy.utils import NumpyRNGContext
from astropy.utils.masked import Masked
from astropy.wcs import WCS
from astropy.wcs.wcsapi import BaseHighLevelWCS, HighLevelWCSWrapper, SlicedLowLevelWCS
from .test_nduncertainty import FakeUncertainty
class FakeNumpyArray:
"""
Class that has a few of the attributes of a numpy array.
These attributes are checked for by NDData.
"""
def __init__(self):
super().__init__()
def shape(self):
pass
def __getitem__(self):
pass
def __array__(self):
pass
@property
def dtype(self):
return "fake"
class MinimalUncertainty:
"""
Define the minimum attributes acceptable as an uncertainty object.
"""
def __init__(self, value):
self._uncertainty = value
@property
def uncertainty_type(self):
return "totally and completely fake"
class BadNDDataSubclass(NDData):
def __init__(
self,
data,
uncertainty=None,
mask=None,
wcs=None,
meta=None,
unit=None,
psf=None,
):
self._data = data
self._uncertainty = uncertainty
self._mask = mask
self._wcs = wcs
self._psf = psf
self._unit = unit
self._meta = meta
# Setter tests
def test_uncertainty_setter():
nd = NDData([1, 2, 3])
good_uncertainty = MinimalUncertainty(5)
nd.uncertainty = good_uncertainty
assert nd.uncertainty is good_uncertainty
# Check the fake uncertainty (minimal does not work since it has no
# parent_nddata attribute from NDUncertainty)
nd.uncertainty = FakeUncertainty(5)
assert nd.uncertainty.parent_nddata is nd
# Check that it works if the uncertainty was set during init
nd = NDData(nd)
assert isinstance(nd.uncertainty, FakeUncertainty)
nd.uncertainty = 10
assert not isinstance(nd.uncertainty, FakeUncertainty)
assert nd.uncertainty.array == 10
def test_mask_setter():
# Since it just changes the _mask attribute everything should work
nd = NDData([1, 2, 3])
nd.mask = True
assert nd.mask
nd.mask = False
assert not nd.mask
# Check that it replaces a mask from init
nd = NDData(nd, mask=True)
assert nd.mask
nd.mask = False
assert not nd.mask
# Init tests
def test_nddata_empty():
with pytest.raises(TypeError):
NDData() # empty initializer should fail
def test_nddata_init_data_nonarray():
inp = [1, 2, 3]
nd = NDData(inp)
assert (np.array(inp) == nd.data).all()
def test_nddata_init_data_ndarray():
# random floats
with NumpyRNGContext(123):
nd = NDData(np.random.random((10, 10)))
assert nd.data.shape == (10, 10)
assert nd.data.size == 100
assert nd.data.dtype == np.dtype(float)
# specific integers
nd = NDData(np.array([[1, 2, 3], [4, 5, 6]]))
assert nd.data.size == 6
assert nd.data.dtype == np.dtype(int)
# Tests to ensure that creating a new NDData object copies by *reference*.
a = np.ones((10, 10))
nd_ref = NDData(a)
a[0, 0] = 0
assert nd_ref.data[0, 0] == 0
# Except we choose copy=True
a = np.ones((10, 10))
nd_ref = NDData(a, copy=True)
a[0, 0] = 0
assert nd_ref.data[0, 0] != 0
def test_nddata_init_data_maskedarray():
with NumpyRNGContext(456):
NDData(np.random.random((10, 10)), mask=np.random.random((10, 10)) > 0.5)
# Another test (just copied here)
with NumpyRNGContext(12345):
a = np.random.randn(100)
marr = np.ma.masked_where(a > 0, a)
nd = NDData(marr)
# check that masks and data match
assert_array_equal(nd.mask, marr.mask)
assert_array_equal(nd.data, marr.data)
# check that they are both by reference
marr.mask[10] = ~marr.mask[10]
marr.data[11] = 123456789
assert_array_equal(nd.mask, marr.mask)
assert_array_equal(nd.data, marr.data)
# or not if we choose copy=True
nd = NDData(marr, copy=True)
marr.mask[10] = ~marr.mask[10]
marr.data[11] = 0
assert nd.mask[10] != marr.mask[10]
assert nd.data[11] != marr.data[11]
@pytest.mark.parametrize("data", [np.array([1, 2, 3]), 5])
def test_nddata_init_data_quantity(data):
# Test an array and a scalar because a scalar Quantity does not always
# behave the same way as an array.
quantity = data * u.adu
ndd = NDData(quantity)
assert ndd.unit == quantity.unit
assert_array_equal(ndd.data, np.array(quantity))
if ndd.data.size > 1:
# check that if it is an array it is not copied
quantity.value[1] = 100
assert ndd.data[1] == quantity.value[1]
# or is copied if we choose copy=True
ndd = NDData(quantity, copy=True)
quantity.value[1] = 5
assert ndd.data[1] != quantity.value[1]
# provide a quantity and override the unit
ndd_unit = NDData(data * u.erg, unit=u.J)
assert ndd_unit.unit == u.J
np.testing.assert_allclose((ndd_unit.data * ndd_unit.unit).to_value(u.erg), data)
def test_nddata_init_data_masked_quantity():
a = np.array([2, 3])
q = a * u.m
m = False
mq = Masked(q, mask=m)
nd = NDData(mq)
assert_array_equal(nd.data, a)
# This test failed before the change in nddata init because the masked
# arrays data (which in fact was a quantity was directly saved)
assert nd.unit == u.m
assert not isinstance(nd.data, u.Quantity)
np.testing.assert_array_equal(nd.mask, np.array(m))
def test_nddata_init_data_nddata():
nd1 = NDData(np.array([1]))
nd2 = NDData(nd1)
assert nd2.wcs == nd1.wcs
assert nd2.uncertainty == nd1.uncertainty
assert nd2.mask == nd1.mask
assert nd2.unit == nd1.unit
assert nd2.meta == nd1.meta
assert nd2.psf == nd1.psf
# Check that it is copied by reference
nd1 = NDData(np.ones((5, 5)))
nd2 = NDData(nd1)
assert nd1.data is nd2.data
# Check that it is really copied if copy=True
nd2 = NDData(nd1, copy=True)
nd1.data[2, 3] = 10
assert nd1.data[2, 3] != nd2.data[2, 3]
# Now let's see what happens if we have all explicitly set
nd1 = NDData(
np.array([1]),
mask=False,
uncertainty=StdDevUncertainty(10),
unit=u.s,
meta={"dest": "mordor"},
wcs=WCS(naxis=1),
psf=np.array([10]),
)
nd2 = NDData(nd1)
assert nd2.data is nd1.data
assert nd2.wcs is nd1.wcs
assert nd2.uncertainty.array == nd1.uncertainty.array
assert nd2.mask == nd1.mask
assert nd2.unit == nd1.unit
assert nd2.meta == nd1.meta
assert nd2.psf == nd1.psf
# now what happens if we overwrite them all too
nd3 = NDData(
nd1,
mask=True,
uncertainty=StdDevUncertainty(200),
unit=u.km,
meta={"observer": "ME"},
wcs=WCS(naxis=1),
psf=np.array([20]),
)
assert nd3.data is nd1.data
assert nd3.wcs is not nd1.wcs
assert nd3.uncertainty.array != nd1.uncertainty.array
assert nd3.mask != nd1.mask
assert nd3.unit != nd1.unit
assert nd3.meta != nd1.meta
assert nd3.psf != nd1.psf
def test_nddata_init_data_nddata_subclass():
uncert = StdDevUncertainty(3)
# There might be some incompatible subclasses of NDData around.
bnd = BadNDDataSubclass(False, True, 3, 2, "gollum", 100, 12)
# Before changing the NDData init this would not have raised an error but
# would have lead to a compromised nddata instance
with pytest.raises(TypeError):
NDData(bnd)
# but if it has no actual incompatible attributes it passes
bnd_good = BadNDDataSubclass(
np.array([1, 2]),
uncert,
3,
HighLevelWCSWrapper(WCS(naxis=1)),
{"enemy": "black knight"},
u.km,
)
nd = NDData(bnd_good)
assert nd.unit == bnd_good.unit
assert nd.meta == bnd_good.meta
assert nd.uncertainty == bnd_good.uncertainty
assert nd.mask == bnd_good.mask
assert nd.wcs is bnd_good.wcs
assert nd.data is bnd_good.data
def test_nddata_init_data_fail():
# First one is sliceable but has no shape, so should fail.
with pytest.raises(TypeError):
NDData({"a": "dict"})
# This has a shape but is not sliceable
class Shape:
def __init__(self):
self.shape = 5
def __repr__(self):
return "7"
with pytest.raises(TypeError):
NDData(Shape())
def test_nddata_init_data_fakes():
ndd1 = NDData(FakeNumpyArray())
# First make sure that NDData isn't converting its data to a numpy array.
assert isinstance(ndd1.data, FakeNumpyArray)
# Make a new NDData initialized from an NDData
ndd2 = NDData(ndd1)
# Check that the data wasn't converted to numpy
assert isinstance(ndd2.data, FakeNumpyArray)
# Specific parameters
def test_param_uncertainty():
u = StdDevUncertainty(array=np.ones((5, 5)))
d = NDData(np.ones((5, 5)), uncertainty=u)
# Test that the parent_nddata is set.
assert d.uncertainty.parent_nddata is d
# Test conflicting uncertainties (other NDData)
u2 = StdDevUncertainty(array=np.ones((5, 5)) * 2)
d2 = NDData(d, uncertainty=u2)
assert d2.uncertainty is u2
assert d2.uncertainty.parent_nddata is d2
def test_param_wcs():
# Since everything is allowed we only need to test something
nd = NDData([1], wcs=WCS(naxis=1))
assert nd.wcs is not None
# Test conflicting wcs (other NDData)
nd2 = NDData(nd, wcs=WCS(naxis=1))
assert nd2.wcs is not None and nd2.wcs is not nd.wcs
def test_param_meta():
# everything dict-like is allowed
with pytest.raises(TypeError):
NDData([1], meta=3)
nd = NDData([1, 2, 3], meta={})
assert len(nd.meta) == 0
nd = NDData([1, 2, 3])
assert isinstance(nd.meta, OrderedDict)
assert len(nd.meta) == 0
# Test conflicting meta (other NDData)
nd2 = NDData(nd, meta={"image": "sun"})
assert len(nd2.meta) == 1
nd3 = NDData(nd2, meta={"image": "moon"})
assert len(nd3.meta) == 1
assert nd3.meta["image"] == "moon"
def test_param_mask():
# Since everything is allowed we only need to test something
nd = NDData([1], mask=False)
assert not nd.mask
# Test conflicting mask (other NDData)
nd2 = NDData(nd, mask=True)
assert nd2.mask
# (masked array)
nd3 = NDData(np.ma.array([1], mask=False), mask=True)
assert nd3.mask
# (masked quantity)
mq = np.ma.array(np.array([2, 3]) * u.m, mask=False)
nd4 = NDData(mq, mask=True)
assert nd4.mask
def test_param_unit():
with pytest.raises(ValueError):
NDData(np.ones((5, 5)), unit="NotAValidUnit")
NDData([1, 2, 3], unit="meter")
# Test conflicting units (quantity as data)
q = np.array([1, 2, 3]) * u.m
nd = NDData(q, unit="cm")
assert nd.unit != q.unit
assert nd.unit == u.cm
# (masked quantity)
mq = np.ma.array(np.array([2, 3]) * u.m, mask=False)
nd2 = NDData(mq, unit=u.pc)
assert nd2.unit == u.pc
# (another NDData as data)
nd3 = NDData(nd, unit="km")
assert nd3.unit == u.km
# (MaskedQuantity given to NDData)
mq_astropy = Masked.from_unmasked(q, False)
nd4 = NDData(mq_astropy, unit="km")
assert nd4.unit == u.km
def test_pickle_nddata_with_uncertainty():
ndd = NDData(
np.ones(3), uncertainty=StdDevUncertainty(np.ones(5), unit=u.m), unit=u.m
)
ndd_dumped = pickle.dumps(ndd)
ndd_restored = pickle.loads(ndd_dumped)
assert type(ndd_restored.uncertainty) is StdDevUncertainty
assert ndd_restored.uncertainty.parent_nddata is ndd_restored
assert ndd_restored.uncertainty.unit == u.m
def test_pickle_uncertainty_only():
ndd = NDData(
np.ones(3), uncertainty=StdDevUncertainty(np.ones(5), unit=u.m), unit=u.m
)
uncertainty_dumped = pickle.dumps(ndd.uncertainty)
uncertainty_restored = pickle.loads(uncertainty_dumped)
np.testing.assert_array_equal(ndd.uncertainty.array, uncertainty_restored.array)
assert ndd.uncertainty.unit == uncertainty_restored.unit
# Even though it has a parent there is no one that references the parent
# after unpickling so the weakref "dies" immediately after unpickling
# finishes.
assert uncertainty_restored.parent_nddata is None
def test_pickle_nddata_without_uncertainty():
ndd = NDData(np.ones(3), unit=u.m)
dumped = pickle.dumps(ndd)
ndd_restored = pickle.loads(dumped)
np.testing.assert_array_equal(ndd.data, ndd_restored.data)
# Check that the meta descriptor is working as expected. The MetaBaseTest class
# takes care of defining all the tests, and we simply have to define the class
# and any minimal set of args to pass.
from astropy.utils.tests.test_metadata import MetaBaseTest
class TestMetaNDData(MetaBaseTest):
test_class = NDData
args = np.array([[1.0]])
# Representation tests
def test_nddata_str():
arr1d = NDData(np.array([1, 2, 3]))
assert str(arr1d) == "[1 2 3]"
arr2d = NDData(np.array([[1, 2], [3, 4]]))
assert str(arr2d) == textwrap.dedent(
"""
[[1 2]
[3 4]]"""[
1:
]
)
arr3d = NDData(np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]))
assert str(arr3d) == textwrap.dedent(
"""
[[[1 2]
[3 4]]
[[5 6]
[7 8]]]"""[
1:
]
)
# let's add units!
arr = NDData(np.array([1, 2, 3]), unit="km")
assert str(arr) == "[1 2 3] km"
# what if it had these units?
arr = NDData(np.array([1, 2, 3]), unit="erg cm^-2 s^-1 A^-1")
assert str(arr) == "[1 2 3] erg / (A s cm2)"
def test_nddata_repr():
# The big test is eval(repr()) should be equal to the original!
arr1d = NDData(np.array([1, 2, 3]))
s = repr(arr1d)
assert s == "NDData([1, 2, 3])"
got = eval(s)
assert np.all(got.data == arr1d.data)
assert got.unit == arr1d.unit
arr2d = NDData(np.array([[1, 2], [3, 4]]))
s = repr(arr2d)
assert s == textwrap.dedent(
"""
NDData([[1, 2],
[3, 4]])"""[
1:
]
)
got = eval(s)
assert np.all(got.data == arr2d.data)
assert got.unit == arr2d.unit
arr3d = NDData(np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]))
s = repr(arr3d)
assert s == textwrap.dedent(
"""
NDData([[[1, 2],
[3, 4]],
[[5, 6],
[7, 8]]])"""[
1:
]
)
got = eval(s)
assert np.all(got.data == arr3d.data)
assert got.unit == arr3d.unit
# let's add units!
arr = NDData(np.array([1, 2, 3]), unit="km")
s = repr(arr)
assert s == "NDData([1, 2, 3], unit='km')"
got = eval(s)
assert np.all(got.data == arr.data)
assert got.unit == arr.unit
# Not supported features
def test_slicing_not_supported():
ndd = NDData(np.ones((5, 5)))
with pytest.raises(TypeError):
ndd[0]
def test_arithmetic_not_supported():
ndd = NDData(np.ones((5, 5)))
with pytest.raises(TypeError):
ndd + ndd
def test_nddata_wcs_setter_error_cases():
ndd = NDData(np.ones((5, 5)))
# Setting with a non-WCS should raise an error
with pytest.raises(TypeError):
ndd.wcs = "I am not a WCS"
naxis = 2
# This should succeed since the WCS is currently None
ndd.wcs = nd_testing._create_wcs_simple(
naxis=naxis,
ctype=["deg"] * naxis,
crpix=[0] * naxis,
crval=[10] * naxis,
cdelt=[1] * naxis,
)
with pytest.raises(ValueError):
# This should fail since the WCS is not None
ndd.wcs = nd_testing._create_wcs_simple(
naxis=naxis,
ctype=["deg"] * naxis,
crpix=[0] * naxis,
crval=[10] * naxis,
cdelt=[1] * naxis,
)
def test_nddata_wcs_setter_with_low_level_wcs():
ndd = NDData(np.ones((5, 5)))
wcs = WCS()
# If the wcs property is set with a low level WCS it should get
# wrapped to high level.
low_level = SlicedLowLevelWCS(wcs, 5)
assert not isinstance(low_level, BaseHighLevelWCS)
ndd.wcs = low_level
assert isinstance(ndd.wcs, BaseHighLevelWCS)
def test_nddata_init_with_low_level_wcs():
wcs = WCS()
low_level = SlicedLowLevelWCS(wcs, 5)
ndd = NDData(np.ones((5, 5)), wcs=low_level)
assert isinstance(ndd.wcs, BaseHighLevelWCS)
class NDDataCustomWCS(NDData):
@property
def wcs(self):
return WCS()
def test_overriden_wcs():
# Check that a sub-class that overrides `.wcs` without providing a setter
# works
NDDataCustomWCS(np.ones((5, 5)))
# set up parameters for test_collapse:
np.random.seed(42)
collapse_units = [None, u.Jy]
collapse_propagate = [True, False]
collapse_data_shapes = [
# 3D example:
(4, 3, 2),
# 5D example
(6, 5, 4, 3, 2),
]
collapse_ignore_masked = [True, False]
collapse_masks = list(
chain.from_iterable(
[
# try the operations without a mask (all False):
np.zeros(collapse_data_shape).astype(bool)
]
+ [
# assemble a bunch of random masks:
np.random.randint(0, 2, size=collapse_data_shape).astype(bool)
for _ in range(10)
]
for collapse_data_shape in collapse_data_shapes
)
)
# the following provides pytest.mark.parametrize with every
# permutation of (1) the units, (2) propagating/not propagating
# uncertainties, and (3) the data shapes of different ndim.
permute = (
len(collapse_masks)
* len(collapse_propagate)
* len(collapse_units)
* len(collapse_ignore_masked)
)
collapse_units = permute // len(collapse_units) * collapse_units
collapse_propagate = permute // len(collapse_propagate) * collapse_propagate
collapse_masks = permute // len(collapse_masks) * collapse_masks
collapse_ignore_masked = permute // len(collapse_ignore_masked) * collapse_ignore_masked
@pytest.mark.parametrize(
"mask, unit, propagate_uncertainties, operation_ignores_mask",
zip(collapse_masks, collapse_units, collapse_propagate, collapse_ignore_masked),
)
def test_collapse(mask, unit, propagate_uncertainties, operation_ignores_mask):
# unique set of combinations of each of the N-1 axes for an N-D cube:
axes_permutations = {tuple(axes[:2]) for axes in permutations(range(mask.ndim))}
# each of the single axis slices:
axes_permutations.update({axis for axis in range(mask.ndim)})
axes_permutations.update({None})
cube = np.arange(np.prod(mask.shape)).reshape(mask.shape)
numpy_cube = np.ma.masked_array(cube, mask=mask)
ma_cube = Masked(cube, mask=mask)
ndarr = NDDataArray(cube, uncertainty=StdDevUncertainty(cube), unit=unit, mask=mask)
# By construction, the minimum value along each axis is always the zeroth index and
# the maximum is always the last along that axis. We verify that here, so we can
# test that the correct uncertainties are extracted during the
# `NDDataArray.min` and `NDDataArray.max` methods later:
for axis in range(cube.ndim):
assert np.all(np.equal(cube.argmin(axis=axis), 0))
assert np.all(np.equal(cube.argmax(axis=axis), cube.shape[axis] - 1))
# confirm that supported nddata methods agree with corresponding numpy methods
# for the masked data array:
sum_methods = ["sum", "mean"]
ext_methods = ["min", "max"]
all_methods = sum_methods + ext_methods
# for all supported methods, ensure the masking is propagated:
for method in all_methods:
for axes in axes_permutations:
astropy_method = getattr(ma_cube, method)(axis=axes)
numpy_method = getattr(numpy_cube, method)(axis=axes)
nddata_method = getattr(ndarr, method)(
axis=axes,
propagate_uncertainties=propagate_uncertainties,
operation_ignores_mask=operation_ignores_mask,
)
astropy_unmasked = astropy_method.base[~astropy_method.mask]
nddata_unmasked = nddata_method.data[~nddata_method.mask]
# check if the units are passed through correctly:
assert unit == nddata_method.unit
# check if the numpy and astropy.utils.masked results agree when
# the result is not fully masked:
if len(astropy_unmasked) > 0:
if not operation_ignores_mask:
# compare with astropy
assert np.all(np.equal(astropy_unmasked, nddata_unmasked))
assert np.all(np.equal(astropy_method.mask, nddata_method.mask))
else:
# compare with numpy
assert np.ma.all(
np.ma.equal(numpy_method, np.asanyarray(nddata_method))
)
# For extremum methods, ensure the uncertainty returned corresponds to the
# min/max data value. We've created the uncertainties to have the same value
# as the data array, so we can just check for equality:
if method in ext_methods and propagate_uncertainties:
assert np.ma.all(np.ma.equal(astropy_method, nddata_method))
|
1f6f6b6f537ca7cda07e9532b8c8ac1bdcd77262496abdb735c350b44110b63e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pickle
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_array_equal
from astropy import units as u
from astropy.nddata.ccddata import CCDData
from astropy.nddata.compat import NDDataArray
from astropy.nddata.nddata import NDData
from astropy.nddata.nduncertainty import (
IncompatibleUncertaintiesException,
InverseVariance,
MissingDataAssociationException,
NDUncertainty,
StdDevUncertainty,
UnknownUncertainty,
VarianceUncertainty,
_move_preserved_axes_first,
_unravel_preserved_axes,
)
# Regarding setter tests:
# No need to test setters since the uncertainty is considered immutable after
# creation except of the parent_nddata attribute and this accepts just
# everything.
# Additionally they should be covered by NDData, NDArithmeticMixin which rely
# on it
# Regarding propagate, _convert_uncert, _propagate_* tests:
# They should be covered by NDArithmeticMixin since there is generally no need
# to test them without this mixin.
# Regarding __getitem__ tests:
# Should be covered by NDSlicingMixin.
# Regarding StdDevUncertainty tests:
# This subclass only overrides the methods for propagation so the same
# they should be covered in NDArithmeticMixin.
# Not really fake but the minimum an uncertainty has to override not to be
# abstract.
class FakeUncertainty(NDUncertainty):
@property
def uncertainty_type(self):
return "fake"
def _data_unit_to_uncertainty_unit(self, value):
return None
def _propagate_add(self, data, final_data):
pass
def _propagate_subtract(self, data, final_data):
pass
def _propagate_multiply(self, data, final_data):
pass
def _propagate_divide(self, data, final_data):
pass
# Test the fake (added also StdDevUncertainty which should behave identical)
# the list of classes used for parametrization in tests below
uncertainty_types_to_be_tested = [
FakeUncertainty,
StdDevUncertainty,
VarianceUncertainty,
InverseVariance,
UnknownUncertainty,
]
uncertainty_types_with_conversion_support = (
StdDevUncertainty,
VarianceUncertainty,
InverseVariance,
)
uncertainty_types_without_conversion_support = (FakeUncertainty, UnknownUncertainty)
@pytest.mark.parametrize("UncertClass", uncertainty_types_to_be_tested)
def test_init_fake_with_list(UncertClass):
fake_uncert = UncertClass([1, 2, 3])
assert_array_equal(fake_uncert.array, np.array([1, 2, 3]))
# Copy makes no difference since casting a list to an np.ndarray always
# makes a copy.
# But let's give the uncertainty a unit too
fake_uncert = UncertClass([1, 2, 3], unit=u.adu)
assert_array_equal(fake_uncert.array, np.array([1, 2, 3]))
assert fake_uncert.unit is u.adu
@pytest.mark.parametrize("UncertClass", uncertainty_types_to_be_tested)
def test_init_fake_with_ndarray(UncertClass):
uncert = np.arange(100).reshape(10, 10)
fake_uncert = UncertClass(uncert)
# Numpy Arrays are copied by default
assert_array_equal(fake_uncert.array, uncert)
assert fake_uncert.array is not uncert
# Now try it without copy
fake_uncert = UncertClass(uncert, copy=False)
assert fake_uncert.array is uncert
# let's provide a unit
fake_uncert = UncertClass(uncert, unit=u.adu)
assert_array_equal(fake_uncert.array, uncert)
assert fake_uncert.array is not uncert
assert fake_uncert.unit is u.adu
@pytest.mark.parametrize("UncertClass", uncertainty_types_to_be_tested)
def test_init_fake_with_quantity(UncertClass):
uncert = np.arange(10).reshape(2, 5) * u.adu
fake_uncert = UncertClass(uncert)
# Numpy Arrays are copied by default
assert_array_equal(fake_uncert.array, uncert.value)
assert fake_uncert.array is not uncert.value
assert fake_uncert.unit is u.adu
# Try without copy (should not work, quantity.value always returns a copy)
fake_uncert = UncertClass(uncert, copy=False)
assert fake_uncert.array is not uncert.value
assert fake_uncert.unit is u.adu
# Now try with an explicit unit parameter too
fake_uncert = UncertClass(uncert, unit=u.m)
assert_array_equal(fake_uncert.array, uncert.value) # No conversion done
assert fake_uncert.array is not uncert.value
assert fake_uncert.unit is u.m # It took the explicit one
@pytest.mark.parametrize("UncertClass", uncertainty_types_to_be_tested)
def test_init_fake_with_fake(UncertClass):
uncert = np.arange(5).reshape(5, 1)
fake_uncert1 = UncertClass(uncert)
fake_uncert2 = UncertClass(fake_uncert1)
assert_array_equal(fake_uncert2.array, uncert)
assert fake_uncert2.array is not uncert
# Without making copies
fake_uncert1 = UncertClass(uncert, copy=False)
fake_uncert2 = UncertClass(fake_uncert1, copy=False)
assert_array_equal(fake_uncert2.array, fake_uncert1.array)
assert fake_uncert2.array is fake_uncert1.array
# With a unit
uncert = np.arange(5).reshape(5, 1) * u.adu
fake_uncert1 = UncertClass(uncert)
fake_uncert2 = UncertClass(fake_uncert1)
assert_array_equal(fake_uncert2.array, uncert.value)
assert fake_uncert2.array is not uncert.value
assert fake_uncert2.unit is u.adu
# With a unit and an explicit unit-parameter
fake_uncert2 = UncertClass(fake_uncert1, unit=u.cm)
assert_array_equal(fake_uncert2.array, uncert.value)
assert fake_uncert2.array is not uncert.value
assert fake_uncert2.unit is u.cm
@pytest.mark.parametrize("UncertClass", uncertainty_types_to_be_tested)
def test_init_fake_with_somethingElse(UncertClass):
# What about a dict?
uncert = {"rdnoise": 2.9, "gain": 0.6}
fake_uncert = UncertClass(uncert)
assert fake_uncert.array == uncert
# We can pass a unit too but since we cannot do uncertainty propagation
# the interpretation is up to the user
fake_uncert = UncertClass(uncert, unit=u.s)
assert fake_uncert.array == uncert
assert fake_uncert.unit is u.s
# So, now check what happens if copy is False
fake_uncert = UncertClass(uncert, copy=False)
assert fake_uncert.array == uncert
assert id(fake_uncert) != id(uncert)
# dicts cannot be referenced without copy
# TODO : Find something that can be referenced without copy :-)
def test_init_fake_with_StdDevUncertainty():
# Different instances of uncertainties are not directly convertible so this
# should fail
uncert = np.arange(5).reshape(5, 1)
std_uncert = StdDevUncertainty(uncert)
with pytest.raises(IncompatibleUncertaintiesException):
FakeUncertainty(std_uncert)
# Ok try it the other way around
fake_uncert = FakeUncertainty(uncert)
with pytest.raises(IncompatibleUncertaintiesException):
StdDevUncertainty(fake_uncert)
def test_uncertainty_type():
fake_uncert = FakeUncertainty([10, 2])
assert fake_uncert.uncertainty_type == "fake"
std_uncert = StdDevUncertainty([10, 2])
assert std_uncert.uncertainty_type == "std"
var_uncert = VarianceUncertainty([10, 2])
assert var_uncert.uncertainty_type == "var"
ivar_uncert = InverseVariance([10, 2])
assert ivar_uncert.uncertainty_type == "ivar"
def test_uncertainty_correlated():
fake_uncert = FakeUncertainty([10, 2])
assert not fake_uncert.supports_correlated
std_uncert = StdDevUncertainty([10, 2])
assert std_uncert.supports_correlated
def test_for_leak_with_uncertainty():
# Regression test for memory leak because of cyclic references between
# NDData and uncertainty
from collections import defaultdict
from gc import get_objects
def test_leak(func, specific_objects=None):
"""Function based on gc.get_objects to determine if any object or
a specific object leaks.
It requires a function to be given and if any objects survive the
function scope it's considered a leak (so don't return anything).
"""
before = defaultdict(int)
for i in get_objects():
before[type(i)] += 1
func()
after = defaultdict(int)
for i in get_objects():
after[type(i)] += 1
if specific_objects is None:
assert all(after[k] - before[k] == 0 for k in after)
else:
assert after[specific_objects] - before[specific_objects] == 0
def non_leaker_nddata():
# Without uncertainty there is no reason to assume that there is a
# memory leak but test it nevertheless.
NDData(np.ones(100))
def leaker_nddata():
# With uncertainty there was a memory leak!
NDData(np.ones(100), uncertainty=StdDevUncertainty(np.ones(100)))
test_leak(non_leaker_nddata, NDData)
test_leak(leaker_nddata, NDData)
# Same for NDDataArray:
from astropy.nddata.compat import NDDataArray
def non_leaker_nddataarray():
NDDataArray(np.ones(100))
def leaker_nddataarray():
NDDataArray(np.ones(100), uncertainty=StdDevUncertainty(np.ones(100)))
test_leak(non_leaker_nddataarray, NDDataArray)
test_leak(leaker_nddataarray, NDDataArray)
def test_for_stolen_uncertainty():
# Sharing uncertainties should not overwrite the parent_nddata attribute
ndd1 = NDData(1, uncertainty=1)
ndd2 = NDData(2, uncertainty=ndd1.uncertainty)
# uncertainty.parent_nddata.data should be the original data!
assert ndd1.uncertainty.parent_nddata.data == ndd1.data
assert ndd2.uncertainty.parent_nddata.data == ndd2.data
def test_stddevuncertainty_pickle():
uncertainty = StdDevUncertainty(np.ones(3), unit=u.m)
uncertainty_restored = pickle.loads(pickle.dumps(uncertainty))
np.testing.assert_array_equal(uncertainty.array, uncertainty_restored.array)
assert uncertainty.unit == uncertainty_restored.unit
with pytest.raises(MissingDataAssociationException):
uncertainty_restored.parent_nddata
@pytest.mark.parametrize("UncertClass", uncertainty_types_to_be_tested)
def test_quantity(UncertClass):
fake_uncert = UncertClass([1, 2, 3], unit=u.adu)
assert isinstance(fake_uncert.quantity, u.Quantity)
assert fake_uncert.quantity.unit.is_equivalent(u.adu)
fake_uncert_nounit = UncertClass([1, 2, 3])
assert isinstance(fake_uncert_nounit.quantity, u.Quantity)
assert fake_uncert_nounit.quantity.unit.is_equivalent(u.dimensionless_unscaled)
@pytest.mark.parametrize(
"UncertClass", [VarianceUncertainty, StdDevUncertainty, InverseVariance]
)
def test_setting_uncertainty_unit_results_in_unit_object(UncertClass):
v = UncertClass([1, 1])
v.unit = "electron"
assert isinstance(v.unit, u.UnitBase)
@pytest.mark.parametrize("NDClass", [NDData, NDDataArray, CCDData])
@pytest.mark.parametrize(
"UncertClass", [VarianceUncertainty, StdDevUncertainty, InverseVariance]
)
def test_changing_unit_to_value_inconsistent_with_parent_fails(NDClass, UncertClass):
ndd1 = NDClass(1, unit="adu")
v = UncertClass(1)
# Sets the uncertainty unit to whatever makes sense with this data.
ndd1.uncertainty = v
with pytest.raises(u.UnitConversionError):
# Nothing special about 15 except no one would ever use that unit
v.unit = ndd1.unit**15
@pytest.mark.parametrize("NDClass", [NDData, NDDataArray, CCDData])
@pytest.mark.parametrize(
"UncertClass, expected_unit",
[
(VarianceUncertainty, u.adu**2),
(StdDevUncertainty, u.adu),
(InverseVariance, 1 / u.adu**2),
],
)
def test_assigning_uncertainty_to_parent_gives_correct_unit(
NDClass, UncertClass, expected_unit
):
# Does assigning a unitless uncertainty to an NDData result in the
# expected unit?
ndd = NDClass([1, 1], unit=u.adu)
v = UncertClass([1, 1])
ndd.uncertainty = v
assert v.unit == expected_unit
@pytest.mark.parametrize("NDClass", [NDData, NDDataArray, CCDData])
@pytest.mark.parametrize(
"UncertClass, expected_unit",
[
(VarianceUncertainty, u.adu**2),
(StdDevUncertainty, u.adu),
(InverseVariance, 1 / u.adu**2),
],
)
def test_assigning_uncertainty_with_unit_to_parent_with_unit(
NDClass, UncertClass, expected_unit
):
# Does assigning an uncertainty with an appropriate unit to an NDData
# with a unit work?
ndd = NDClass([1, 1], unit=u.adu)
v = UncertClass([1, 1], unit=expected_unit)
ndd.uncertainty = v
assert v.unit == expected_unit
@pytest.mark.parametrize("NDClass", [NDData, NDDataArray, CCDData])
@pytest.mark.parametrize(
"UncertClass", [(VarianceUncertainty), (StdDevUncertainty), (InverseVariance)]
)
def test_assigning_uncertainty_with_bad_unit_to_parent_fails(NDClass, UncertClass):
# Does assigning an uncertainty with a non-matching unit to an NDData
# with a unit work?
ndd = NDClass([1, 1], unit=u.adu)
# Set the unit to something inconsistent with ndd's unit
v = UncertClass([1, 1], unit=u.second)
with pytest.raises(u.UnitConversionError):
ndd.uncertainty = v
@pytest.mark.parametrize("UncertClass", uncertainty_types_with_conversion_support)
def test_self_conversion_via_variance_supported(UncertClass):
uncert = np.arange(1, 11).reshape(2, 5) * u.adu
start_uncert = UncertClass(uncert)
final_uncert = start_uncert.represent_as(UncertClass)
assert_array_equal(start_uncert.array, final_uncert.array)
assert start_uncert.unit == final_uncert.unit
@pytest.mark.parametrize(
"UncertClass,to_variance_func",
zip(
uncertainty_types_with_conversion_support,
(lambda x: x**2, lambda x: x, lambda x: 1 / x),
),
)
def test_conversion_to_from_variance_supported(UncertClass, to_variance_func):
uncert = np.arange(1, 11).reshape(2, 5) * u.adu
start_uncert = UncertClass(uncert)
var_uncert = start_uncert.represent_as(VarianceUncertainty)
final_uncert = var_uncert.represent_as(UncertClass)
assert_allclose(to_variance_func(start_uncert.array), var_uncert.array)
assert_array_equal(start_uncert.array, final_uncert.array)
assert start_uncert.unit == final_uncert.unit
@pytest.mark.parametrize("UncertClass", uncertainty_types_without_conversion_support)
def test_self_conversion_via_variance_not_supported(UncertClass):
uncert = np.arange(1, 11).reshape(2, 5) * u.adu
start_uncert = UncertClass(uncert)
with pytest.raises(TypeError):
final_uncert = start_uncert.represent_as(UncertClass)
def test_reshape_ndarray_methods():
shape = (6, 5, 4, 3, 2)
preserve_axes = (1, 2)
arr = np.arange(np.prod(shape)).reshape(shape)
reshaped_arr = _move_preserved_axes_first(arr, preserve_axes)
new_shape = reshaped_arr.shape
# first entry will be product of two preserved axes:
assert new_shape[0] == np.prod(np.array(shape)[np.array(preserve_axes)])
# remaining entries unchanged:
shape_unchanged_axes = np.array(shape)[
np.array([i for i in range(len(shape)) if i not in preserve_axes])
]
assert np.all(np.equal(new_shape[1:], shape_unchanged_axes))
# now confirm that after collapsing along first axis, what's left
# can be unraveled to the shape of the preserved axes:
summed = np.sum(reshaped_arr, axis=tuple(range(1, len(shape) - 1)))
assert summed.shape[0] == new_shape[0]
unravelled = _unravel_preserved_axes(arr, summed, preserve_axes)
shape_preserved = np.array(shape)[np.array(preserve_axes)]
assert np.all(np.equal(unravelled.shape, shape_preserved))
|
057b91f3fba6e126ba10724c74dca0f2ca4c6609b96a368509f1664a8ec5b1a8 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal, assert_array_equal
from astropy import units as u
from astropy.nddata import NDDataRef
from astropy.nddata import _testing as nd_testing
from astropy.nddata.nduncertainty import (
IncompatibleUncertaintiesException,
InverseVariance,
StdDevUncertainty,
UnknownUncertainty,
VarianceUncertainty,
)
from astropy.units import Quantity, UnitsError
from astropy.utils.exceptions import AstropyUserWarning
from astropy.wcs import WCS
# Alias NDDataAllMixins in case this will be renamed ... :-)
NDDataArithmetic = NDDataRef
class StdDevUncertaintyUncorrelated(StdDevUncertainty):
@property
def supports_correlated(self):
return False
# Test with Data covers:
# scalars, 1D, 2D and 3D
# broadcasting between them
@pytest.mark.filterwarnings("ignore:divide by zero encountered.*")
@pytest.mark.parametrize(
("data1", "data2"),
[
(np.array(5), np.array(10)),
(np.array(5), np.arange(10)),
(np.array(5), np.arange(10).reshape(2, 5)),
(np.arange(10), np.ones(10) * 2),
(np.arange(10), np.ones((10, 10)) * 2),
(np.arange(10).reshape(2, 5), np.ones((2, 5)) * 3),
(np.arange(1000).reshape(20, 5, 10), np.ones((20, 5, 10)) * 3),
],
)
def test_arithmetics_data(data1, data2):
nd1 = NDDataArithmetic(data1)
nd2 = NDDataArithmetic(data2)
# Addition
nd3 = nd1.add(nd2)
assert_array_equal(data1 + data2, nd3.data)
# Subtraction
nd4 = nd1.subtract(nd2)
assert_array_equal(data1 - data2, nd4.data)
# Multiplication
nd5 = nd1.multiply(nd2)
assert_array_equal(data1 * data2, nd5.data)
# Division
nd6 = nd1.divide(nd2)
assert_array_equal(data1 / data2, nd6.data)
for nd in [nd3, nd4, nd5, nd6]:
# Check that broadcasting worked as expected
if data1.ndim > data2.ndim:
assert data1.shape == nd.data.shape
else:
assert data2.shape == nd.data.shape
# Check all other attributes are not set
assert nd.unit is None
assert nd.uncertainty is None
assert nd.mask is None
assert len(nd.meta) == 0
assert nd.wcs is None
# Invalid arithmetic operations for data covering:
# not broadcastable data
def test_arithmetics_data_invalid():
nd1 = NDDataArithmetic([1, 2, 3])
nd2 = NDDataArithmetic([1, 2])
with pytest.raises(ValueError):
nd1.add(nd2)
# Test with Data and unit and covers:
# identical units (even dimensionless unscaled vs. no unit),
# equivalent units (such as meter and kilometer)
# equivalent composite units (such as m/s and km/h)
@pytest.mark.filterwarnings("ignore:divide by zero encountered.*")
@pytest.mark.parametrize(
("data1", "data2"),
[
(np.array(5) * u.s, np.array(10) * u.s),
(np.array(5) * u.s, np.arange(10) * u.h),
(np.array(5) * u.s, np.arange(10).reshape(2, 5) * u.min),
(np.arange(10) * u.m / u.s, np.ones(10) * 2 * u.km / u.s),
(np.arange(10) * u.m / u.s, np.ones((10, 10)) * 2 * u.m / u.h),
(np.arange(10).reshape(2, 5) * u.m / u.s, np.ones((2, 5)) * 3 * u.km / u.h),
(
np.arange(1000).reshape(20, 5, 10),
np.ones((20, 5, 10)) * 3 * u.dimensionless_unscaled,
),
(np.array(5), np.array(10) * u.s / u.h),
],
)
def test_arithmetics_data_unit_identical(data1, data2):
nd1 = NDDataArithmetic(data1)
nd2 = NDDataArithmetic(data2)
# Addition
nd3 = nd1.add(nd2)
ref = data1 + data2
ref_unit, ref_data = ref.unit, ref.value
assert_array_equal(ref_data, nd3.data)
assert nd3.unit == ref_unit
# Subtraction
nd4 = nd1.subtract(nd2)
ref = data1 - data2
ref_unit, ref_data = ref.unit, ref.value
assert_array_equal(ref_data, nd4.data)
assert nd4.unit == ref_unit
# Multiplication
nd5 = nd1.multiply(nd2)
ref = data1 * data2
ref_unit, ref_data = ref.unit, ref.value
assert_array_equal(ref_data, nd5.data)
assert nd5.unit == ref_unit
# Division
nd6 = nd1.divide(nd2)
ref = data1 / data2
ref_unit, ref_data = ref.unit, ref.value
assert_array_equal(ref_data, nd6.data)
assert nd6.unit == ref_unit
for nd in [nd3, nd4, nd5, nd6]:
# Check that broadcasting worked as expected
if data1.ndim > data2.ndim:
assert data1.shape == nd.data.shape
else:
assert data2.shape == nd.data.shape
# Check all other attributes are not set
assert nd.uncertainty is None
assert nd.mask is None
assert len(nd.meta) == 0
assert nd.wcs is None
# Test with Data and unit and covers:
# not identical not convertible units
# one with unit (which is not dimensionless) and one without
@pytest.mark.parametrize(
("data1", "data2"),
[
(np.array(5) * u.s, np.array(10) * u.m),
(np.array(5) * u.Mpc, np.array(10) * u.km / u.s),
(np.array(5) * u.Mpc, np.array(10)),
(np.array(5), np.array(10) * u.s),
],
)
def test_arithmetics_data_unit_not_identical(data1, data2):
nd1 = NDDataArithmetic(data1)
nd2 = NDDataArithmetic(data2)
# Addition should not be possible
with pytest.raises(UnitsError):
nd1.add(nd2)
# Subtraction should not be possible
with pytest.raises(UnitsError):
nd1.subtract(nd2)
# Multiplication is possible
nd3 = nd1.multiply(nd2)
ref = data1 * data2
ref_unit, ref_data = ref.unit, ref.value
assert_array_equal(ref_data, nd3.data)
assert nd3.unit == ref_unit
# Division is possible
nd4 = nd1.divide(nd2)
ref = data1 / data2
ref_unit, ref_data = ref.unit, ref.value
assert_array_equal(ref_data, nd4.data)
assert nd4.unit == ref_unit
for nd in [nd3, nd4]:
# Check all other attributes are not set
assert nd.uncertainty is None
assert nd.mask is None
assert len(nd.meta) == 0
assert nd.wcs is None
# Tests with wcs (not very sensible because there is no operation between them
# covering:
# both set and identical/not identical
# one set
# None set
@pytest.mark.parametrize(
("wcs1", "wcs2"),
[
(None, None),
(None, WCS(naxis=2)),
(WCS(naxis=2), None),
nd_testing.create_two_equal_wcs(naxis=2),
nd_testing.create_two_unequal_wcs(naxis=2),
],
)
def test_arithmetics_data_wcs(wcs1, wcs2):
nd1 = NDDataArithmetic(1, wcs=wcs1)
nd2 = NDDataArithmetic(1, wcs=wcs2)
if wcs1 is None and wcs2 is None:
ref_wcs = None
elif wcs1 is None:
ref_wcs = wcs2
elif wcs2 is None:
ref_wcs = wcs1
else:
ref_wcs = wcs1
# Addition
nd3 = nd1.add(nd2)
nd_testing.assert_wcs_seem_equal(ref_wcs, nd3.wcs)
# Subtraction
nd4 = nd1.subtract(nd2)
nd_testing.assert_wcs_seem_equal(ref_wcs, nd4.wcs)
# Multiplication
nd5 = nd1.multiply(nd2)
nd_testing.assert_wcs_seem_equal(ref_wcs, nd5.wcs)
# Division
nd6 = nd1.divide(nd2)
nd_testing.assert_wcs_seem_equal(ref_wcs, nd6.wcs)
for nd in [nd3, nd4, nd5, nd6]:
# Check all other attributes are not set
assert nd.unit is None
assert nd.uncertainty is None
assert len(nd.meta) == 0
assert nd.mask is None
# Masks are completely separated in the NDArithmetics from the data so we need
# no correlated tests but covering:
# masks 1D, 2D and mixed cases with broadcasting
@pytest.mark.parametrize(
("mask1", "mask2"),
[
(None, None),
(None, False),
(True, None),
(False, False),
(True, False),
(False, True),
(True, True),
(np.array(False), np.array(True)),
(np.array(False), np.array([0, 1, 0, 1, 1], dtype=np.bool_)),
(np.array(True), np.array([[0, 1, 0, 1, 1], [1, 1, 0, 1, 1]], dtype=np.bool_)),
(
np.array([0, 1, 0, 1, 1], dtype=np.bool_),
np.array([1, 1, 0, 0, 1], dtype=np.bool_),
),
(
np.array([0, 1, 0, 1, 1], dtype=np.bool_),
np.array([[0, 1, 0, 1, 1], [1, 0, 0, 1, 1]], dtype=np.bool_),
),
(
np.array([[0, 1, 0, 1, 1], [1, 0, 0, 1, 1]], dtype=np.bool_),
np.array([[0, 1, 0, 1, 1], [1, 1, 0, 1, 1]], dtype=np.bool_),
),
],
)
def test_arithmetics_data_masks(mask1, mask2):
nd1 = NDDataArithmetic(1, mask=mask1)
nd2 = NDDataArithmetic(1, mask=mask2)
if mask1 is None and mask2 is None:
ref_mask = None
elif mask1 is None:
ref_mask = mask2
elif mask2 is None:
ref_mask = mask1
else:
ref_mask = mask1 | mask2
# Addition
nd3 = nd1.add(nd2)
assert_array_equal(ref_mask, nd3.mask)
# Subtraction
nd4 = nd1.subtract(nd2)
assert_array_equal(ref_mask, nd4.mask)
# Multiplication
nd5 = nd1.multiply(nd2)
assert_array_equal(ref_mask, nd5.mask)
# Division
nd6 = nd1.divide(nd2)
assert_array_equal(ref_mask, nd6.mask)
for nd in [nd3, nd4, nd5, nd6]:
# Check all other attributes are not set
assert nd.unit is None
assert nd.uncertainty is None
assert len(nd.meta) == 0
assert nd.wcs is None
# One additional case which can not be easily incorporated in the test above
# what happens if the masks are numpy ndarrays are not broadcastable
def test_arithmetics_data_masks_invalid():
nd1 = NDDataArithmetic(1, mask=np.array([1, 0], dtype=np.bool_))
nd2 = NDDataArithmetic(1, mask=np.array([1, 0, 1], dtype=np.bool_))
with pytest.raises(ValueError):
nd1.add(nd2)
with pytest.raises(ValueError):
nd1.multiply(nd2)
with pytest.raises(ValueError):
nd1.subtract(nd2)
with pytest.raises(ValueError):
nd1.divide(nd2)
# Covering:
# both have uncertainties (data and uncertainty without unit)
# tested against manually determined resulting uncertainties to verify the
# implemented formulas
# this test only works as long as data1 and data2 do not contain any 0
def test_arithmetics_stddevuncertainty_basic():
nd1 = NDDataArithmetic([1, 2, 3], uncertainty=StdDevUncertainty([1, 1, 3]))
nd2 = NDDataArithmetic([2, 2, 2], uncertainty=StdDevUncertainty([2, 2, 2]))
nd3 = nd1.add(nd2)
nd4 = nd2.add(nd1)
# Inverse operation should result in the same uncertainty
assert_array_equal(nd3.uncertainty.array, nd4.uncertainty.array)
# Compare it to the theoretical uncertainty
ref_uncertainty = np.sqrt(np.array([1, 1, 3]) ** 2 + np.array([2, 2, 2]) ** 2)
assert_array_equal(nd3.uncertainty.array, ref_uncertainty)
nd3 = nd1.subtract(nd2)
nd4 = nd2.subtract(nd1)
# Inverse operation should result in the same uncertainty
assert_array_equal(nd3.uncertainty.array, nd4.uncertainty.array)
# Compare it to the theoretical uncertainty (same as for add)
assert_array_equal(nd3.uncertainty.array, ref_uncertainty)
# Multiplication and Division only work with almost equal array comparisons
# since the formula implemented and the formula used as reference are
# slightly different.
nd3 = nd1.multiply(nd2)
nd4 = nd2.multiply(nd1)
# Inverse operation should result in the same uncertainty
assert_array_almost_equal(nd3.uncertainty.array, nd4.uncertainty.array)
# Compare it to the theoretical uncertainty
ref_uncertainty = np.abs(np.array([2, 4, 6])) * np.sqrt(
(np.array([1, 1, 3]) / np.array([1, 2, 3])) ** 2
+ (np.array([2, 2, 2]) / np.array([2, 2, 2])) ** 2
)
assert_array_almost_equal(nd3.uncertainty.array, ref_uncertainty)
nd3 = nd1.divide(nd2)
nd4 = nd2.divide(nd1)
# Inverse operation gives a different uncertainty!
# Compare it to the theoretical uncertainty
ref_uncertainty_1 = np.abs(np.array([1 / 2, 2 / 2, 3 / 2])) * np.sqrt(
(np.array([1, 1, 3]) / np.array([1, 2, 3])) ** 2
+ (np.array([2, 2, 2]) / np.array([2, 2, 2])) ** 2
)
assert_array_almost_equal(nd3.uncertainty.array, ref_uncertainty_1)
ref_uncertainty_2 = np.abs(np.array([2, 1, 2 / 3])) * np.sqrt(
(np.array([1, 1, 3]) / np.array([1, 2, 3])) ** 2
+ (np.array([2, 2, 2]) / np.array([2, 2, 2])) ** 2
)
assert_array_almost_equal(nd4.uncertainty.array, ref_uncertainty_2)
# Tests for correlation, covering
# correlation between -1 and 1 with correlation term being positive / negative
# also with one data being once positive and once completely negative
# The point of this test is to compare the used formula to the theoretical one.
# TODO: Maybe covering units too but I think that should work because of
# the next tests. Also this may be reduced somehow.
@pytest.mark.parametrize(
("cor", "uncert1", "data2"),
[
(-1, [1, 1, 3], [2, 2, 7]),
(-0.5, [1, 1, 3], [2, 2, 7]),
(-0.25, [1, 1, 3], [2, 2, 7]),
(0, [1, 1, 3], [2, 2, 7]),
(0.25, [1, 1, 3], [2, 2, 7]),
(0.5, [1, 1, 3], [2, 2, 7]),
(1, [1, 1, 3], [2, 2, 7]),
(-1, [-1, -1, -3], [2, 2, 7]),
(-0.5, [-1, -1, -3], [2, 2, 7]),
(-0.25, [-1, -1, -3], [2, 2, 7]),
(0, [-1, -1, -3], [2, 2, 7]),
(0.25, [-1, -1, -3], [2, 2, 7]),
(0.5, [-1, -1, -3], [2, 2, 7]),
(1, [-1, -1, -3], [2, 2, 7]),
(-1, [1, 1, 3], [-2, -3, -2]),
(-0.5, [1, 1, 3], [-2, -3, -2]),
(-0.25, [1, 1, 3], [-2, -3, -2]),
(0, [1, 1, 3], [-2, -3, -2]),
(0.25, [1, 1, 3], [-2, -3, -2]),
(0.5, [1, 1, 3], [-2, -3, -2]),
(1, [1, 1, 3], [-2, -3, -2]),
(-1, [-1, -1, -3], [-2, -3, -2]),
(-0.5, [-1, -1, -3], [-2, -3, -2]),
(-0.25, [-1, -1, -3], [-2, -3, -2]),
(0, [-1, -1, -3], [-2, -3, -2]),
(0.25, [-1, -1, -3], [-2, -3, -2]),
(0.5, [-1, -1, -3], [-2, -3, -2]),
(1, [-1, -1, -3], [-2, -3, -2]),
],
)
def test_arithmetics_stddevuncertainty_basic_with_correlation(cor, uncert1, data2):
data1 = np.array([1, 2, 3])
data2 = np.array(data2)
uncert1 = np.array(uncert1)
uncert2 = np.array([2, 2, 2])
nd1 = NDDataArithmetic(data1, uncertainty=StdDevUncertainty(uncert1))
nd2 = NDDataArithmetic(data2, uncertainty=StdDevUncertainty(uncert2))
nd3 = nd1.add(nd2, uncertainty_correlation=cor)
nd4 = nd2.add(nd1, uncertainty_correlation=cor)
# Inverse operation should result in the same uncertainty
assert_array_equal(nd3.uncertainty.array, nd4.uncertainty.array)
# Compare it to the theoretical uncertainty
ref_uncertainty = np.sqrt(
uncert1**2 + uncert2**2 + 2 * cor * np.abs(uncert1 * uncert2)
)
assert_array_equal(nd3.uncertainty.array, ref_uncertainty)
nd3 = nd1.subtract(nd2, uncertainty_correlation=cor)
nd4 = nd2.subtract(nd1, uncertainty_correlation=cor)
# Inverse operation should result in the same uncertainty
assert_array_equal(nd3.uncertainty.array, nd4.uncertainty.array)
# Compare it to the theoretical uncertainty
ref_uncertainty = np.sqrt(
uncert1**2 + uncert2**2 - 2 * cor * np.abs(uncert1 * uncert2)
)
assert_array_equal(nd3.uncertainty.array, ref_uncertainty)
# Multiplication and Division only work with almost equal array comparisons
# since the formula implemented and the formula used as reference are
# slightly different.
nd3 = nd1.multiply(nd2, uncertainty_correlation=cor)
nd4 = nd2.multiply(nd1, uncertainty_correlation=cor)
# Inverse operation should result in the same uncertainty
assert_array_almost_equal(nd3.uncertainty.array, nd4.uncertainty.array)
# Compare it to the theoretical uncertainty
ref_uncertainty = (np.abs(data1 * data2)) * np.sqrt(
(uncert1 / data1) ** 2
+ (uncert2 / data2) ** 2
+ (2 * cor * np.abs(uncert1 * uncert2) / (data1 * data2))
)
assert_array_almost_equal(nd3.uncertainty.array, ref_uncertainty)
nd3 = nd1.divide(nd2, uncertainty_correlation=cor)
nd4 = nd2.divide(nd1, uncertainty_correlation=cor)
# Inverse operation gives a different uncertainty!
# Compare it to the theoretical uncertainty
ref_uncertainty_1 = (np.abs(data1 / data2)) * np.sqrt(
(uncert1 / data1) ** 2
+ (uncert2 / data2) ** 2
- (2 * cor * np.abs(uncert1 * uncert2) / (data1 * data2))
)
assert_array_almost_equal(nd3.uncertainty.array, ref_uncertainty_1)
ref_uncertainty_2 = (np.abs(data2 / data1)) * np.sqrt(
(uncert1 / data1) ** 2
+ (uncert2 / data2) ** 2
- (2 * cor * np.abs(uncert1 * uncert2) / (data1 * data2))
)
assert_array_almost_equal(nd4.uncertainty.array, ref_uncertainty_2)
# Tests for correlation, covering
# correlation between -1 and 1 with correlation term being positive / negative
# also with one data being once positive and once completely negative
# The point of this test is to compare the used formula to the theoretical one.
# TODO: Maybe covering units too but I think that should work because of
# the next tests. Also this may be reduced somehow.
@pytest.mark.parametrize(
("cor", "uncert1", "data2"),
[
(-1, [1, 1, 3], [2, 2, 7]),
(-0.5, [1, 1, 3], [2, 2, 7]),
(-0.25, [1, 1, 3], [2, 2, 7]),
(0, [1, 1, 3], [2, 2, 7]),
(0.25, [1, 1, 3], [2, 2, 7]),
(0.5, [1, 1, 3], [2, 2, 7]),
(1, [1, 1, 3], [2, 2, 7]),
(-1, [-1, -1, -3], [2, 2, 7]),
(-0.5, [-1, -1, -3], [2, 2, 7]),
(-0.25, [-1, -1, -3], [2, 2, 7]),
(0, [-1, -1, -3], [2, 2, 7]),
(0.25, [-1, -1, -3], [2, 2, 7]),
(0.5, [-1, -1, -3], [2, 2, 7]),
(1, [-1, -1, -3], [2, 2, 7]),
(-1, [1, 1, 3], [-2, -3, -2]),
(-0.5, [1, 1, 3], [-2, -3, -2]),
(-0.25, [1, 1, 3], [-2, -3, -2]),
(0, [1, 1, 3], [-2, -3, -2]),
(0.25, [1, 1, 3], [-2, -3, -2]),
(0.5, [1, 1, 3], [-2, -3, -2]),
(1, [1, 1, 3], [-2, -3, -2]),
(-1, [-1, -1, -3], [-2, -3, -2]),
(-0.5, [-1, -1, -3], [-2, -3, -2]),
(-0.25, [-1, -1, -3], [-2, -3, -2]),
(0, [-1, -1, -3], [-2, -3, -2]),
(0.25, [-1, -1, -3], [-2, -3, -2]),
(0.5, [-1, -1, -3], [-2, -3, -2]),
(1, [-1, -1, -3], [-2, -3, -2]),
],
)
def test_arithmetics_varianceuncertainty_basic_with_correlation(cor, uncert1, data2):
data1 = np.array([1, 2, 3])
data2 = np.array(data2)
uncert1 = np.array(uncert1) ** 2
uncert2 = np.array([2, 2, 2]) ** 2
nd1 = NDDataArithmetic(data1, uncertainty=VarianceUncertainty(uncert1))
nd2 = NDDataArithmetic(data2, uncertainty=VarianceUncertainty(uncert2))
nd3 = nd1.add(nd2, uncertainty_correlation=cor)
nd4 = nd2.add(nd1, uncertainty_correlation=cor)
# Inverse operation should result in the same uncertainty
assert_array_equal(nd3.uncertainty.array, nd4.uncertainty.array)
# Compare it to the theoretical uncertainty
ref_uncertainty = uncert1 + uncert2 + 2 * cor * np.sqrt(uncert1 * uncert2)
assert_array_equal(nd3.uncertainty.array, ref_uncertainty)
nd3 = nd1.subtract(nd2, uncertainty_correlation=cor)
nd4 = nd2.subtract(nd1, uncertainty_correlation=cor)
# Inverse operation should result in the same uncertainty
assert_array_equal(nd3.uncertainty.array, nd4.uncertainty.array)
# Compare it to the theoretical uncertainty
ref_uncertainty = uncert1 + uncert2 - 2 * cor * np.sqrt(uncert1 * uncert2)
assert_array_equal(nd3.uncertainty.array, ref_uncertainty)
# Multiplication and Division only work with almost equal array comparisons
# since the formula implemented and the formula used as reference are
# slightly different.
nd3 = nd1.multiply(nd2, uncertainty_correlation=cor)
nd4 = nd2.multiply(nd1, uncertainty_correlation=cor)
# Inverse operation should result in the same uncertainty
assert_array_almost_equal(nd3.uncertainty.array, nd4.uncertainty.array)
# Compare it to the theoretical uncertainty
ref_uncertainty = (data1 * data2) ** 2 * (
uncert1 / data1**2
+ uncert2 / data2**2
+ (2 * cor * np.sqrt(uncert1 * uncert2) / (data1 * data2))
)
assert_array_almost_equal(nd3.uncertainty.array, ref_uncertainty)
nd3 = nd1.divide(nd2, uncertainty_correlation=cor)
nd4 = nd2.divide(nd1, uncertainty_correlation=cor)
# Inverse operation gives a different uncertainty because of the
# prefactor nd1/nd2 vs nd2/nd1. Howeveare, a large chunk is the same.
ref_common = (
uncert1 / data1**2
+ uncert2 / data2**2
- (2 * cor * np.sqrt(uncert1 * uncert2) / (data1 * data2))
)
# Compare it to the theoretical uncertainty
ref_uncertainty_1 = (data1 / data2) ** 2 * ref_common
assert_array_almost_equal(nd3.uncertainty.array, ref_uncertainty_1)
ref_uncertainty_2 = (data2 / data1) ** 2 * ref_common
assert_array_almost_equal(nd4.uncertainty.array, ref_uncertainty_2)
# Tests for correlation, covering
# correlation between -1 and 1 with correlation term being positive / negative
# also with one data being once positive and once completely negative
# The point of this test is to compare the used formula to the theoretical one.
# TODO: Maybe covering units too but I think that should work because of
# the next tests. Also this may be reduced somehow.
@pytest.mark.filterwarnings("ignore:divide by zero encountered.*")
@pytest.mark.parametrize(
("cor", "uncert1", "data2"),
[
(-1, [1, 1, 3], [2, 2, 7]),
(-0.5, [1, 1, 3], [2, 2, 7]),
(-0.25, [1, 1, 3], [2, 2, 7]),
(0, [1, 1, 3], [2, 2, 7]),
(0.25, [1, 1, 3], [2, 2, 7]),
(0.5, [1, 1, 3], [2, 2, 7]),
(1, [1, 1, 3], [2, 2, 7]),
(-1, [-1, -1, -3], [2, 2, 7]),
(-0.5, [-1, -1, -3], [2, 2, 7]),
(-0.25, [-1, -1, -3], [2, 2, 7]),
(0, [-1, -1, -3], [2, 2, 7]),
(0.25, [-1, -1, -3], [2, 2, 7]),
(0.5, [-1, -1, -3], [2, 2, 7]),
(1, [-1, -1, -3], [2, 2, 7]),
(-1, [1, 1, 3], [-2, -3, -2]),
(-0.5, [1, 1, 3], [-2, -3, -2]),
(-0.25, [1, 1, 3], [-2, -3, -2]),
(0, [1, 1, 3], [-2, -3, -2]),
(0.25, [1, 1, 3], [-2, -3, -2]),
(0.5, [1, 1, 3], [-2, -3, -2]),
(1, [1, 1, 3], [-2, -3, -2]),
(-1, [-1, -1, -3], [-2, -3, -2]),
(-0.5, [-1, -1, -3], [-2, -3, -2]),
(-0.25, [-1, -1, -3], [-2, -3, -2]),
(0, [-1, -1, -3], [-2, -3, -2]),
(0.25, [-1, -1, -3], [-2, -3, -2]),
(0.5, [-1, -1, -3], [-2, -3, -2]),
(1, [-1, -1, -3], [-2, -3, -2]),
],
)
def test_arithmetics_inversevarianceuncertainty_basic_with_correlation(
cor, uncert1, data2
):
data1 = np.array([1, 2, 3])
data2 = np.array(data2)
uncert1 = 1 / np.array(uncert1) ** 2
uncert2 = 1 / np.array([2, 2, 2]) ** 2
nd1 = NDDataArithmetic(data1, uncertainty=InverseVariance(uncert1))
nd2 = NDDataArithmetic(data2, uncertainty=InverseVariance(uncert2))
nd3 = nd1.add(nd2, uncertainty_correlation=cor)
nd4 = nd2.add(nd1, uncertainty_correlation=cor)
# Inverse operation should result in the same uncertainty
assert_array_equal(nd3.uncertainty.array, nd4.uncertainty.array)
# Compare it to the theoretical uncertainty
ref_uncertainty = 1 / (
1 / uncert1 + 1 / uncert2 + 2 * cor / np.sqrt(uncert1 * uncert2)
)
assert_array_equal(nd3.uncertainty.array, ref_uncertainty)
nd3 = nd1.subtract(nd2, uncertainty_correlation=cor)
nd4 = nd2.subtract(nd1, uncertainty_correlation=cor)
# Inverse operation should result in the same uncertainty
assert_array_equal(nd3.uncertainty.array, nd4.uncertainty.array)
# Compare it to the theoretical uncertainty
ref_uncertainty = 1 / (
1 / uncert1 + 1 / uncert2 - 2 * cor / np.sqrt(uncert1 * uncert2)
)
assert_array_equal(nd3.uncertainty.array, ref_uncertainty)
# Multiplication and Division only work with almost equal array comparisons
# since the formula implemented and the formula used as reference are
# slightly different.
nd3 = nd1.multiply(nd2, uncertainty_correlation=cor)
nd4 = nd2.multiply(nd1, uncertainty_correlation=cor)
# Inverse operation should result in the same uncertainty
assert_array_almost_equal(nd3.uncertainty.array, nd4.uncertainty.array)
# Compare it to the theoretical uncertainty
ref_uncertainty = 1 / (
(data1 * data2) ** 2
* (
1 / uncert1 / data1**2
+ 1 / uncert2 / data2**2
+ (2 * cor / np.sqrt(uncert1 * uncert2) / (data1 * data2))
)
)
assert_array_almost_equal(nd3.uncertainty.array, ref_uncertainty)
nd3 = nd1.divide(nd2, uncertainty_correlation=cor)
nd4 = nd2.divide(nd1, uncertainty_correlation=cor)
# Inverse operation gives a different uncertainty because of the
# prefactor nd1/nd2 vs nd2/nd1. Howeveare, a large chunk is the same.
ref_common = (
1 / uncert1 / data1**2
+ 1 / uncert2 / data2**2
- (2 * cor / np.sqrt(uncert1 * uncert2) / (data1 * data2))
)
# Compare it to the theoretical uncertainty
ref_uncertainty_1 = 1 / ((data1 / data2) ** 2 * ref_common)
assert_array_almost_equal(nd3.uncertainty.array, ref_uncertainty_1)
ref_uncertainty_2 = 1 / ((data2 / data1) ** 2 * ref_common)
assert_array_almost_equal(nd4.uncertainty.array, ref_uncertainty_2)
# Covering:
# just an example that a np.ndarray works as correlation, no checks for
# the right result since these were basically done in the function above.
def test_arithmetics_stddevuncertainty_basic_with_correlation_array():
data1 = np.array([1, 2, 3])
data2 = np.array([1, 1, 1])
uncert1 = np.array([1, 1, 1])
uncert2 = np.array([2, 2, 2])
cor = np.array([0, 0.25, 0])
nd1 = NDDataArithmetic(data1, uncertainty=StdDevUncertainty(uncert1))
nd2 = NDDataArithmetic(data2, uncertainty=StdDevUncertainty(uncert2))
nd1.add(nd2, uncertainty_correlation=cor)
# Covering:
# That propagate throws an exception when correlation is given but the
# uncertainty does not support correlation.
def test_arithmetics_with_correlation_unsupported():
data1 = np.array([1, 2, 3])
data2 = np.array([1, 1, 1])
uncert1 = np.array([1, 1, 1])
uncert2 = np.array([2, 2, 2])
cor = 3
nd1 = NDDataArithmetic(data1, uncertainty=StdDevUncertaintyUncorrelated(uncert1))
nd2 = NDDataArithmetic(data2, uncertainty=StdDevUncertaintyUncorrelated(uncert2))
with pytest.raises(ValueError):
nd1.add(nd2, uncertainty_correlation=cor)
# Covering:
# only one has an uncertainty (data and uncertainty without unit)
# tested against the case where the other one has zero uncertainty. (this case
# must be correct because we tested it in the last case)
# Also verify that if the result of the data has negative values the resulting
# uncertainty has no negative values.
def test_arithmetics_stddevuncertainty_one_missing():
nd1 = NDDataArithmetic([1, -2, 3])
nd1_ref = NDDataArithmetic([1, -2, 3], uncertainty=StdDevUncertainty([0, 0, 0]))
nd2 = NDDataArithmetic([2, 2, -2], uncertainty=StdDevUncertainty([2, 2, 2]))
# Addition
nd3 = nd1.add(nd2)
nd3_ref = nd1_ref.add(nd2)
assert_array_equal(nd3.uncertainty.array, nd3_ref.uncertainty.array)
assert_array_equal(np.abs(nd3.uncertainty.array), nd3.uncertainty.array)
nd3 = nd2.add(nd1)
nd3_ref = nd2.add(nd1_ref)
assert_array_equal(nd3.uncertainty.array, nd3_ref.uncertainty.array)
assert_array_equal(np.abs(nd3.uncertainty.array), nd3.uncertainty.array)
# Subtraction
nd3 = nd1.subtract(nd2)
nd3_ref = nd1_ref.subtract(nd2)
assert_array_equal(nd3.uncertainty.array, nd3_ref.uncertainty.array)
assert_array_equal(np.abs(nd3.uncertainty.array), nd3.uncertainty.array)
nd3 = nd2.subtract(nd1)
nd3_ref = nd2.subtract(nd1_ref)
assert_array_equal(nd3.uncertainty.array, nd3_ref.uncertainty.array)
assert_array_equal(np.abs(nd3.uncertainty.array), nd3.uncertainty.array)
# Multiplication
nd3 = nd1.multiply(nd2)
nd3_ref = nd1_ref.multiply(nd2)
assert_array_equal(nd3.uncertainty.array, nd3_ref.uncertainty.array)
assert_array_equal(np.abs(nd3.uncertainty.array), nd3.uncertainty.array)
nd3 = nd2.multiply(nd1)
nd3_ref = nd2.multiply(nd1_ref)
assert_array_equal(nd3.uncertainty.array, nd3_ref.uncertainty.array)
assert_array_equal(np.abs(nd3.uncertainty.array), nd3.uncertainty.array)
# Division
nd3 = nd1.divide(nd2)
nd3_ref = nd1_ref.divide(nd2)
assert_array_equal(nd3.uncertainty.array, nd3_ref.uncertainty.array)
assert_array_equal(np.abs(nd3.uncertainty.array), nd3.uncertainty.array)
nd3 = nd2.divide(nd1)
nd3_ref = nd2.divide(nd1_ref)
assert_array_equal(nd3.uncertainty.array, nd3_ref.uncertainty.array)
assert_array_equal(np.abs(nd3.uncertainty.array), nd3.uncertainty.array)
# Covering:
# data with unit and uncertainty with unit (but equivalent units)
# compared against correctly scaled NDDatas
@pytest.mark.filterwarnings("ignore:.*encountered in.*divide.*")
@pytest.mark.parametrize(
("uncert1", "uncert2"),
[
(np.array([1, 2, 3]) * u.m, None),
(np.array([1, 2, 3]) * u.cm, None),
(None, np.array([1, 2, 3]) * u.m),
(None, np.array([1, 2, 3]) * u.cm),
(np.array([1, 2, 3]), np.array([2, 3, 4])),
(np.array([1, 2, 3]) * u.m, np.array([2, 3, 4])),
(np.array([1, 2, 3]), np.array([2, 3, 4])) * u.m,
(np.array([1, 2, 3]) * u.m, np.array([2, 3, 4])) * u.m,
(np.array([1, 2, 3]) * u.cm, np.array([2, 3, 4])),
(np.array([1, 2, 3]), np.array([2, 3, 4])) * u.cm,
(np.array([1, 2, 3]) * u.cm, np.array([2, 3, 4])) * u.cm,
(np.array([1, 2, 3]) * u.km, np.array([2, 3, 4])) * u.cm,
],
)
def test_arithmetics_stddevuncertainty_with_units(uncert1, uncert2):
# Data has same units
data1 = np.array([1, 2, 3]) * u.m
data2 = np.array([-4, 7, 0]) * u.m
if uncert1 is not None:
uncert1 = StdDevUncertainty(uncert1)
if isinstance(uncert1, Quantity):
uncert1_ref = uncert1.to_value(data1.unit)
else:
uncert1_ref = uncert1
uncert_ref1 = StdDevUncertainty(uncert1_ref, copy=True)
else:
uncert1 = None
uncert_ref1 = None
if uncert2 is not None:
uncert2 = StdDevUncertainty(uncert2)
if isinstance(uncert2, Quantity):
uncert2_ref = uncert2.to_value(data2.unit)
else:
uncert2_ref = uncert2
uncert_ref2 = StdDevUncertainty(uncert2_ref, copy=True)
else:
uncert2 = None
uncert_ref2 = None
nd1 = NDDataArithmetic(data1, uncertainty=uncert1)
nd2 = NDDataArithmetic(data2, uncertainty=uncert2)
nd1_ref = NDDataArithmetic(data1, uncertainty=uncert_ref1)
nd2_ref = NDDataArithmetic(data2, uncertainty=uncert_ref2)
# Let's start the tests
# Addition
nd3 = nd1.add(nd2)
nd3_ref = nd1_ref.add(nd2_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
nd3 = nd2.add(nd1)
nd3_ref = nd2_ref.add(nd1_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
# Subtraction
nd3 = nd1.subtract(nd2)
nd3_ref = nd1_ref.subtract(nd2_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
nd3 = nd2.subtract(nd1)
nd3_ref = nd2_ref.subtract(nd1_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
# Multiplication
nd3 = nd1.multiply(nd2)
nd3_ref = nd1_ref.multiply(nd2_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
nd3 = nd2.multiply(nd1)
nd3_ref = nd2_ref.multiply(nd1_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
# Division
nd3 = nd1.divide(nd2)
nd3_ref = nd1_ref.divide(nd2_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
nd3 = nd2.divide(nd1)
nd3_ref = nd2_ref.divide(nd1_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
# Covering:
# data with unit and uncertainty with unit (but equivalent units)
# compared against correctly scaled NDDatas
@pytest.mark.filterwarnings("ignore:.*encountered in.*divide.*")
@pytest.mark.parametrize(
("uncert1", "uncert2"),
[
(np.array([1, 2, 3]) * u.m, None),
(np.array([1, 2, 3]) * u.cm, None),
(None, np.array([1, 2, 3]) * u.m),
(None, np.array([1, 2, 3]) * u.cm),
(np.array([1, 2, 3]), np.array([2, 3, 4])),
(np.array([1, 2, 3]) * u.m, np.array([2, 3, 4])),
(np.array([1, 2, 3]), np.array([2, 3, 4])) * u.m,
(np.array([1, 2, 3]) * u.m, np.array([2, 3, 4])) * u.m,
(np.array([1, 2, 3]) * u.cm, np.array([2, 3, 4])),
(np.array([1, 2, 3]), np.array([2, 3, 4])) * u.cm,
(np.array([1, 2, 3]) * u.cm, np.array([2, 3, 4])) * u.cm,
(np.array([1, 2, 3]) * u.km, np.array([2, 3, 4])) * u.cm,
],
)
def test_arithmetics_varianceuncertainty_with_units(uncert1, uncert2):
# Data has same units
data1 = np.array([1, 2, 3]) * u.m
data2 = np.array([-4, 7, 0]) * u.m
if uncert1 is not None:
uncert1 = VarianceUncertainty(uncert1**2)
if isinstance(uncert1, Quantity):
uncert1_ref = uncert1.to_value(data1.unit**2)
else:
uncert1_ref = uncert1
uncert_ref1 = VarianceUncertainty(uncert1_ref, copy=True)
else:
uncert1 = None
uncert_ref1 = None
if uncert2 is not None:
uncert2 = VarianceUncertainty(uncert2**2)
if isinstance(uncert2, Quantity):
uncert2_ref = uncert2.to_value(data2.unit**2)
else:
uncert2_ref = uncert2
uncert_ref2 = VarianceUncertainty(uncert2_ref, copy=True)
else:
uncert2 = None
uncert_ref2 = None
nd1 = NDDataArithmetic(data1, uncertainty=uncert1)
nd2 = NDDataArithmetic(data2, uncertainty=uncert2)
nd1_ref = NDDataArithmetic(data1, uncertainty=uncert_ref1)
nd2_ref = NDDataArithmetic(data2, uncertainty=uncert_ref2)
# Let's start the tests
# Addition
nd3 = nd1.add(nd2)
nd3_ref = nd1_ref.add(nd2_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
nd3 = nd2.add(nd1)
nd3_ref = nd2_ref.add(nd1_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
# Subtraction
nd3 = nd1.subtract(nd2)
nd3_ref = nd1_ref.subtract(nd2_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
nd3 = nd2.subtract(nd1)
nd3_ref = nd2_ref.subtract(nd1_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
# Multiplication
nd3 = nd1.multiply(nd2)
nd3_ref = nd1_ref.multiply(nd2_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
nd3 = nd2.multiply(nd1)
nd3_ref = nd2_ref.multiply(nd1_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
# Division
nd3 = nd1.divide(nd2)
nd3_ref = nd1_ref.divide(nd2_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
nd3 = nd2.divide(nd1)
nd3_ref = nd2_ref.divide(nd1_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
# Covering:
# data with unit and uncertainty with unit (but equivalent units)
# compared against correctly scaled NDDatas
@pytest.mark.filterwarnings("ignore:.*encountered in.*divide.*")
@pytest.mark.parametrize(
("uncert1", "uncert2"),
[
(np.array([1, 2, 3]) * u.m, None),
(np.array([1, 2, 3]) * u.cm, None),
(None, np.array([1, 2, 3]) * u.m),
(None, np.array([1, 2, 3]) * u.cm),
(np.array([1, 2, 3]), np.array([2, 3, 4])),
(np.array([1, 2, 3]) * u.m, np.array([2, 3, 4])),
(np.array([1, 2, 3]), np.array([2, 3, 4])) * u.m,
(np.array([1, 2, 3]) * u.m, np.array([2, 3, 4])) * u.m,
(np.array([1, 2, 3]) * u.cm, np.array([2, 3, 4])),
(np.array([1, 2, 3]), np.array([2, 3, 4])) * u.cm,
(np.array([1, 2, 3]) * u.cm, np.array([2, 3, 4])) * u.cm,
(np.array([1, 2, 3]) * u.km, np.array([2, 3, 4])) * u.cm,
],
)
def test_arithmetics_inversevarianceuncertainty_with_units(uncert1, uncert2):
# Data has same units
data1 = np.array([1, 2, 3]) * u.m
data2 = np.array([-4, 7, 0]) * u.m
if uncert1 is not None:
uncert1 = InverseVariance(1 / uncert1**2)
if isinstance(uncert1, Quantity):
uncert1_ref = uncert1.to_value(1 / data1.unit**2)
else:
uncert1_ref = uncert1
uncert_ref1 = InverseVariance(uncert1_ref, copy=True)
else:
uncert1 = None
uncert_ref1 = None
if uncert2 is not None:
uncert2 = InverseVariance(1 / uncert2**2)
if isinstance(uncert2, Quantity):
uncert2_ref = uncert2.to_value(1 / data2.unit**2)
else:
uncert2_ref = uncert2
uncert_ref2 = InverseVariance(uncert2_ref, copy=True)
else:
uncert2 = None
uncert_ref2 = None
nd1 = NDDataArithmetic(data1, uncertainty=uncert1)
nd2 = NDDataArithmetic(data2, uncertainty=uncert2)
nd1_ref = NDDataArithmetic(data1, uncertainty=uncert_ref1)
nd2_ref = NDDataArithmetic(data2, uncertainty=uncert_ref2)
# Let's start the tests
# Addition
nd3 = nd1.add(nd2)
nd3_ref = nd1_ref.add(nd2_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
nd3 = nd2.add(nd1)
nd3_ref = nd2_ref.add(nd1_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
# Subtraction
nd3 = nd1.subtract(nd2)
nd3_ref = nd1_ref.subtract(nd2_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
nd3 = nd2.subtract(nd1)
nd3_ref = nd2_ref.subtract(nd1_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
# Multiplication
nd3 = nd1.multiply(nd2)
nd3_ref = nd1_ref.multiply(nd2_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
nd3 = nd2.multiply(nd1)
nd3_ref = nd2_ref.multiply(nd1_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
# Division
nd3 = nd1.divide(nd2)
nd3_ref = nd1_ref.divide(nd2_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
nd3 = nd2.divide(nd1)
nd3_ref = nd2_ref.divide(nd1_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
# Test abbreviation and long name for taking the first found meta, mask, wcs
@pytest.mark.parametrize("use_abbreviation", ["ff", "first_found"])
def test_arithmetics_handle_switches(use_abbreviation):
meta1 = {"a": 1}
meta2 = {"b": 2}
mask1 = True
mask2 = False
uncertainty1 = StdDevUncertainty([1, 2, 3])
uncertainty2 = StdDevUncertainty([1, 2, 3])
wcs1, wcs2 = nd_testing.create_two_unequal_wcs(naxis=1)
data1 = [1, 1, 1]
data2 = [1, 1, 1]
nd1 = NDDataArithmetic(
data1, meta=meta1, mask=mask1, wcs=wcs1, uncertainty=uncertainty1
)
nd2 = NDDataArithmetic(
data2, meta=meta2, mask=mask2, wcs=wcs2, uncertainty=uncertainty2
)
nd3 = NDDataArithmetic(data1)
# Both have the attributes but option None is chosen
nd_ = nd1.add(
nd2,
propagate_uncertainties=None,
handle_meta=None,
handle_mask=None,
compare_wcs=None,
)
assert nd_.wcs is None
assert len(nd_.meta) == 0
assert nd_.mask is None
assert nd_.uncertainty is None
# Only second has attributes and False is chosen
nd_ = nd3.add(
nd2,
propagate_uncertainties=False,
handle_meta=use_abbreviation,
handle_mask=use_abbreviation,
compare_wcs=use_abbreviation,
)
nd_testing.assert_wcs_seem_equal(nd_.wcs, wcs2)
assert nd_.meta == meta2
assert nd_.mask == mask2
assert_array_equal(nd_.uncertainty.array, uncertainty2.array)
# Only first has attributes and False is chosen
nd_ = nd1.add(
nd3,
propagate_uncertainties=False,
handle_meta=use_abbreviation,
handle_mask=use_abbreviation,
compare_wcs=use_abbreviation,
)
nd_testing.assert_wcs_seem_equal(nd_.wcs, wcs1)
assert nd_.meta == meta1
assert nd_.mask == mask1
assert_array_equal(nd_.uncertainty.array, uncertainty1.array)
def test_arithmetics_meta_func():
def meta_fun_func(meta1, meta2, take="first"):
if take == "first":
return meta1
else:
return meta2
meta1 = {"a": 1}
meta2 = {"a": 3, "b": 2}
mask1 = True
mask2 = False
uncertainty1 = StdDevUncertainty([1, 2, 3])
uncertainty2 = StdDevUncertainty([1, 2, 3])
data1 = [1, 1, 1]
data2 = [1, 1, 1]
nd1 = NDDataArithmetic(data1, meta=meta1, mask=mask1, uncertainty=uncertainty1)
nd2 = NDDataArithmetic(data2, meta=meta2, mask=mask2, uncertainty=uncertainty2)
nd3 = nd1.add(nd2, handle_meta=meta_fun_func)
assert nd3.meta["a"] == 1
assert "b" not in nd3.meta
nd4 = nd1.add(nd2, handle_meta=meta_fun_func, meta_take="second")
assert nd4.meta["a"] == 3
assert nd4.meta["b"] == 2
with pytest.raises(KeyError):
nd1.add(nd2, handle_meta=meta_fun_func, take="second")
def test_arithmetics_wcs_func():
def wcs_comp_func(wcs1, wcs2, tolerance=0.1):
if tolerance < 0.01:
return False
return True
meta1 = {"a": 1}
meta2 = {"a": 3, "b": 2}
mask1 = True
mask2 = False
uncertainty1 = StdDevUncertainty([1, 2, 3])
uncertainty2 = StdDevUncertainty([1, 2, 3])
wcs1, wcs2 = nd_testing.create_two_equal_wcs(naxis=1)
data1 = [1, 1, 1]
data2 = [1, 1, 1]
nd1 = NDDataArithmetic(
data1, meta=meta1, mask=mask1, wcs=wcs1, uncertainty=uncertainty1
)
nd2 = NDDataArithmetic(
data2, meta=meta2, mask=mask2, wcs=wcs2, uncertainty=uncertainty2
)
nd3 = nd1.add(nd2, compare_wcs=wcs_comp_func)
nd_testing.assert_wcs_seem_equal(nd3.wcs, wcs1)
# Fails because the function fails
with pytest.raises(ValueError):
nd1.add(nd2, compare_wcs=wcs_comp_func, wcs_tolerance=0.00001)
# Fails because for a parameter to be passed correctly to the function it
# needs the wcs_ prefix
with pytest.raises(KeyError):
nd1.add(nd2, compare_wcs=wcs_comp_func, tolerance=1)
def test_arithmetics_mask_func():
def mask_sad_func(mask1, mask2, fun=0):
if fun > 0.5:
return mask2
else:
return mask1
meta1 = {"a": 1}
meta2 = {"a": 3, "b": 2}
mask1 = [True, False, True]
mask2 = [True, False, False]
uncertainty1 = StdDevUncertainty([1, 2, 3])
uncertainty2 = StdDevUncertainty([1, 2, 3])
data1 = [1, 1, 1]
data2 = [1, 1, 1]
nd1 = NDDataArithmetic(data1, meta=meta1, mask=mask1, uncertainty=uncertainty1)
nd2 = NDDataArithmetic(data2, meta=meta2, mask=mask2, uncertainty=uncertainty2)
nd3 = nd1.add(nd2, handle_mask=mask_sad_func)
assert_array_equal(nd3.mask, nd1.mask)
nd4 = nd1.add(nd2, handle_mask=mask_sad_func, mask_fun=1)
assert_array_equal(nd4.mask, nd2.mask)
with pytest.raises(KeyError):
nd1.add(nd2, handle_mask=mask_sad_func, fun=1)
@pytest.mark.parametrize("meth", ["add", "subtract", "divide", "multiply"])
def test_two_argument_useage(meth):
ndd1 = NDDataArithmetic(np.ones((3, 3)))
ndd2 = NDDataArithmetic(np.ones((3, 3)))
# Call add on the class (not the instance) and compare it with already
# tested usage:
ndd3 = getattr(NDDataArithmetic, meth)(ndd1, ndd2)
ndd4 = getattr(ndd1, meth)(ndd2)
np.testing.assert_array_equal(ndd3.data, ndd4.data)
# And the same done on an unrelated instance...
ndd3 = getattr(NDDataArithmetic(-100), meth)(ndd1, ndd2)
ndd4 = getattr(ndd1, meth)(ndd2)
np.testing.assert_array_equal(ndd3.data, ndd4.data)
@pytest.mark.parametrize("meth", ["add", "subtract", "divide", "multiply"])
def test_two_argument_useage_non_nddata_first_arg(meth):
data1 = 50
data2 = 100
# Call add on the class (not the instance)
ndd3 = getattr(NDDataArithmetic, meth)(data1, data2)
# Compare it with the instance-useage and two identical NDData-like
# classes:
ndd1 = NDDataArithmetic(data1)
ndd2 = NDDataArithmetic(data2)
ndd4 = getattr(ndd1, meth)(ndd2)
np.testing.assert_array_equal(ndd3.data, ndd4.data)
# and check it's also working when called on an instance
ndd3 = getattr(NDDataArithmetic(-100), meth)(data1, data2)
ndd4 = getattr(ndd1, meth)(ndd2)
np.testing.assert_array_equal(ndd3.data, ndd4.data)
def test_arithmetics_unknown_uncertainties():
# Not giving any uncertainty class means it is saved as UnknownUncertainty
ndd1 = NDDataArithmetic(
np.ones((3, 3)), uncertainty=UnknownUncertainty(np.ones((3, 3)))
)
ndd2 = NDDataArithmetic(
np.ones((3, 3)), uncertainty=UnknownUncertainty(np.ones((3, 3)) * 2)
)
# There is no way to propagate uncertainties:
with pytest.raises(IncompatibleUncertaintiesException):
ndd1.add(ndd2)
# But it should be possible without propagation
ndd3 = ndd1.add(ndd2, propagate_uncertainties=False)
np.testing.assert_array_equal(ndd1.uncertainty.array, ndd3.uncertainty.array)
ndd4 = ndd1.add(ndd2, propagate_uncertainties=None)
assert ndd4.uncertainty is None
def test_psf_warning():
"""Test that math on objects with a psf warn."""
ndd1 = NDDataArithmetic(np.ones((3, 3)), psf=np.zeros(3))
ndd2 = NDDataArithmetic(np.ones((3, 3)), psf=None)
# no warning if both are None
ndd2.add(ndd2)
with pytest.warns(AstropyUserWarning, match="Not setting psf attribute during add"):
ndd1.add(ndd2)
with pytest.warns(AstropyUserWarning, match="Not setting psf attribute during add"):
ndd2.add(ndd1)
with pytest.warns(AstropyUserWarning, match="Not setting psf attribute during add"):
ndd1.add(ndd1)
def test_raise_method_not_supported():
ndd1 = NDDataArithmetic(np.zeros(3), uncertainty=StdDevUncertainty(np.zeros(3)))
ndd2 = NDDataArithmetic(np.ones(3), uncertainty=StdDevUncertainty(np.ones(3)))
result = np.zeros(3)
correlation = 0
# no error should be raised for supported operations:
ndd1.uncertainty.propagate(np.add, ndd2, result, correlation)
# raise error for unsupported propagation operations:
with pytest.raises(ValueError):
ndd1.uncertainty.propagate(np.mod, ndd2, result, correlation)
|
4eb717dde0a52b265843e0690d4572bdaf6777f5158045b233176484d3bf7d50 | import abc
from collections import OrderedDict, defaultdict
import numpy as np
from .utils import deserialize_class
__all__ = ["BaseHighLevelWCS", "HighLevelWCSMixin"]
def rec_getattr(obj, att):
for a in att.split("."):
obj = getattr(obj, a)
return obj
def default_order(components):
order = []
for key, _, _ in components:
if key not in order:
order.append(key)
return order
def _toindex(value):
"""Convert value to an int or an int array.
Input coordinates converted to integers
corresponding to the center of the pixel.
The convention is that the center of the pixel is
(0, 0), while the lower left corner is (-0.5, -0.5).
The outputs are used to index the mask.
Examples
--------
>>> _toindex(np.array([-0.5, 0.49999]))
array([0, 0])
>>> _toindex(np.array([0.5, 1.49999]))
array([1, 1])
>>> _toindex(np.array([1.5, 2.49999]))
array([2, 2])
"""
indx = np.asarray(np.floor(np.asarray(value) + 0.5), dtype=int)
return indx
class BaseHighLevelWCS(metaclass=abc.ABCMeta):
"""
Abstract base class for the high-level WCS interface.
This is described in `APE 14: A shared Python interface for World Coordinate
Systems <https://doi.org/10.5281/zenodo.1188875>`_.
"""
@property
@abc.abstractmethod
def low_level_wcs(self):
"""
Returns a reference to the underlying low-level WCS object.
"""
@abc.abstractmethod
def pixel_to_world(self, *pixel_arrays):
"""
Convert pixel coordinates to world coordinates (represented by
high-level objects).
If a single high-level object is used to represent the world coordinates
(i.e., if ``len(wcs.world_axis_object_classes) == 1``), it is returned
as-is (not in a tuple/list), otherwise a tuple of high-level objects is
returned. See
`~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_to_world_values` for pixel
indexing and ordering conventions.
"""
def array_index_to_world(self, *index_arrays):
"""
Convert array indices to world coordinates (represented by Astropy
objects).
If a single high-level object is used to represent the world coordinates
(i.e., if ``len(wcs.world_axis_object_classes) == 1``), it is returned
as-is (not in a tuple/list), otherwise a tuple of high-level objects is
returned. See
`~astropy.wcs.wcsapi.BaseLowLevelWCS.array_index_to_world_values` for
pixel indexing and ordering conventions.
"""
return self.pixel_to_world(*index_arrays[::-1])
@abc.abstractmethod
def world_to_pixel(self, *world_objects):
"""
Convert world coordinates (represented by Astropy objects) to pixel
coordinates.
If `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` is ``1``, this
method returns a single scalar or array, otherwise a tuple of scalars or
arrays is returned. See
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_to_pixel_values` for pixel
indexing and ordering conventions.
"""
def world_to_array_index(self, *world_objects):
"""
Convert world coordinates (represented by Astropy objects) to array
indices.
If `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` is ``1``, this
method returns a single scalar or array, otherwise a tuple of scalars or
arrays is returned. See
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_to_array_index_values` for
pixel indexing and ordering conventions. The indices should be returned
as rounded integers.
"""
if self.low_level_wcs.pixel_n_dim == 1:
return _toindex(self.world_to_pixel(*world_objects))
else:
return tuple(_toindex(self.world_to_pixel(*world_objects)[::-1]).tolist())
def high_level_objects_to_values(*world_objects, low_level_wcs):
"""
Convert the input high level object to low level values.
This function uses the information in ``wcs.world_axis_object_classes`` and
``wcs.world_axis_object_components`` to convert the high level objects
(such as `~.SkyCoord`) to low level "values" `~.Quantity` objects.
This is used in `.HighLevelWCSMixin.world_to_pixel`, but provided as a
separate function for use in other places where needed.
Parameters
----------
*world_objects: object
High level coordinate objects.
low_level_wcs: `.BaseLowLevelWCS`
The WCS object to use to interpret the coordinates.
"""
# Cache the classes and components since this may be expensive
serialized_classes = low_level_wcs.world_axis_object_classes
components = low_level_wcs.world_axis_object_components
# Deserialize world_axis_object_classes using the default order
classes = OrderedDict()
for key in default_order(components):
if low_level_wcs.serialized_classes:
classes[key] = deserialize_class(serialized_classes[key], construct=False)
else:
classes[key] = serialized_classes[key]
# Check that the number of classes matches the number of inputs
if len(world_objects) != len(classes):
raise ValueError(
f"Number of world inputs ({len(world_objects)}) does not match expected"
f" ({len(classes)})"
)
# Determine whether the classes are uniquely matched, that is we check
# whether there is only one of each class.
world_by_key = {}
unique_match = True
for w in world_objects:
matches = []
for key, (klass, *_) in classes.items():
if isinstance(w, klass):
matches.append(key)
if len(matches) == 1:
world_by_key[matches[0]] = w
else:
unique_match = False
break
# If the match is not unique, the order of the classes needs to match,
# whereas if all classes are unique, we can still intelligently match
# them even if the order is wrong.
objects = {}
if unique_match:
for key, (klass, args, kwargs, *rest) in classes.items():
if len(rest) == 0:
klass_gen = klass
elif len(rest) == 1:
klass_gen = rest[0]
else:
raise ValueError(
"Tuples in world_axis_object_classes should have length 3 or 4"
)
# FIXME: For now SkyCoord won't auto-convert upon initialization
# https://github.com/astropy/astropy/issues/7689
from astropy.coordinates import SkyCoord
if isinstance(world_by_key[key], SkyCoord):
if "frame" in kwargs:
objects[key] = world_by_key[key].transform_to(kwargs["frame"])
else:
objects[key] = world_by_key[key]
else:
objects[key] = klass_gen(world_by_key[key], *args, **kwargs)
else:
for ikey, key in enumerate(classes):
klass, args, kwargs, *rest = classes[key]
if len(rest) == 0:
klass_gen = klass
elif len(rest) == 1:
klass_gen = rest[0]
else:
raise ValueError(
"Tuples in world_axis_object_classes should have length 3 or 4"
)
w = world_objects[ikey]
if not isinstance(w, klass):
raise ValueError(
"Expected the following order of world arguments:"
f" {', '.join([k.__name__ for (k, _, _) in classes.values()])}"
)
# FIXME: For now SkyCoord won't auto-convert upon initialization
# https://github.com/astropy/astropy/issues/7689
from astropy.coordinates import SkyCoord
if isinstance(w, SkyCoord):
if "frame" in kwargs:
objects[key] = w.transform_to(kwargs["frame"])
else:
objects[key] = w
else:
objects[key] = klass_gen(w, *args, **kwargs)
# We now extract the attributes needed for the world values
world = []
for key, _, attr in components:
if callable(attr):
world.append(attr(objects[key]))
else:
world.append(rec_getattr(objects[key], attr))
return world
def values_to_high_level_objects(*world_values, low_level_wcs):
"""
Convert low level values into high level objects.
This function uses the information in ``wcs.world_axis_object_classes`` and
``wcs.world_axis_object_components`` to convert low level "values"
`~.Quantity` objects, to high level objects (such as `~.SkyCoord).
This is used in `.HighLevelWCSMixin.pixel_to_world`, but provided as a
separate function for use in other places where needed.
Parameters
----------
*world_values: object
Low level, "values" representations of the world coordinates.
low_level_wcs: `.BaseLowLevelWCS`
The WCS object to use to interpret the coordinates.
"""
# Cache the classes and components since this may be expensive
components = low_level_wcs.world_axis_object_components
classes = low_level_wcs.world_axis_object_classes
# Deserialize classes
if low_level_wcs.serialized_classes:
classes_new = {}
for key, value in classes.items():
classes_new[key] = deserialize_class(value, construct=False)
classes = classes_new
args = defaultdict(list)
kwargs = defaultdict(dict)
for i, (key, attr, _) in enumerate(components):
if isinstance(attr, str):
kwargs[key][attr] = world_values[i]
else:
while attr > len(args[key]) - 1:
args[key].append(None)
args[key][attr] = world_values[i]
result = []
for key in default_order(components):
klass, ar, kw, *rest = classes[key]
if len(rest) == 0:
klass_gen = klass
elif len(rest) == 1:
klass_gen = rest[0]
else:
raise ValueError(
"Tuples in world_axis_object_classes should have length 3 or 4"
)
result.append(klass_gen(*args[key], *ar, **kwargs[key], **kw))
return result
class HighLevelWCSMixin(BaseHighLevelWCS):
"""
Mix-in class that automatically provides the high-level WCS API for the
low-level WCS object given by the `~HighLevelWCSMixin.low_level_wcs`
property.
"""
@property
def low_level_wcs(self):
return self
def world_to_pixel(self, *world_objects):
world_values = high_level_objects_to_values(
*world_objects, low_level_wcs=self.low_level_wcs
)
# Finally we convert to pixel coordinates
pixel_values = self.low_level_wcs.world_to_pixel_values(*world_values)
return pixel_values
def pixel_to_world(self, *pixel_arrays):
# Compute the world coordinate values
world_values = self.low_level_wcs.pixel_to_world_values(*pixel_arrays)
if self.low_level_wcs.world_n_dim == 1:
world_values = (world_values,)
pixel_values = values_to_high_level_objects(
*world_values, low_level_wcs=self.low_level_wcs
)
if len(pixel_values) == 1:
return pixel_values[0]
else:
return pixel_values
|
88d39aada0e95659c922cea2c44fa00bacee429f0364af2d7435359045f41119 | # This file includes the definition of a mix-in class that provides the low-
# and high-level WCS API to the astropy.wcs.WCS object. We keep this code
# isolated in this mix-in class to avoid making the main wcs.py file too
# long.
import warnings
import numpy as np
from astropy import units as u
from astropy.constants import c
from astropy.coordinates import ICRS, Galactic, SpectralCoord
from astropy.coordinates.spectral_coordinate import (
attach_zero_velocities,
update_differentials_to_match,
)
from astropy.utils.exceptions import AstropyUserWarning
from .high_level_api import HighLevelWCSMixin
from .low_level_api import BaseLowLevelWCS
from .wrappers import SlicedLowLevelWCS
__all__ = ["custom_ctype_to_ucd_mapping", "SlicedFITSWCS", "FITSWCSAPIMixin"]
C_SI = c.si.value
VELOCITY_FRAMES = {
"GEOCENT": "gcrs",
"BARYCENT": "icrs",
"HELIOCENT": "hcrs",
"LSRK": "lsrk",
"LSRD": "lsrd",
}
# The spectra velocity frames below are needed for FITS spectral WCS
# (see Greisen 06 table 12) but aren't yet defined as real
# astropy.coordinates frames, so we instead define them here as instances
# of existing coordinate frames with offset velocities. In future we should
# make these real frames so that users can more easily recognize these
# velocity frames when used in SpectralCoord.
# This frame is defined as a velocity of 220 km/s in the
# direction of l=90, b=0. The rotation velocity is defined
# in:
#
# Kerr and Lynden-Bell 1986, Review of galactic constants.
#
# NOTE: this may differ from the assumptions of galcen_v_sun
# in the Galactocentric frame - the value used here is
# the one adopted by the WCS standard for spectral
# transformations.
VELOCITY_FRAMES["GALACTOC"] = Galactic(
u=0 * u.km,
v=0 * u.km,
w=0 * u.km,
U=0 * u.km / u.s,
V=-220 * u.km / u.s,
W=0 * u.km / u.s,
representation_type="cartesian",
differential_type="cartesian",
)
# This frame is defined as a velocity of 300 km/s in the
# direction of l=90, b=0. This is defined in:
#
# Transactions of the IAU Vol. XVI B Proceedings of the
# 16th General Assembly, Reports of Meetings of Commissions:
# Comptes Rendus Des Séances Des Commissions, Commission 28,
# p201.
#
# Note that these values differ from those used by CASA
# (308 km/s towards l=105, b=-7) but we use the above values
# since these are the ones defined in Greisen et al (2006).
VELOCITY_FRAMES["LOCALGRP"] = Galactic(
u=0 * u.km,
v=0 * u.km,
w=0 * u.km,
U=0 * u.km / u.s,
V=-300 * u.km / u.s,
W=0 * u.km / u.s,
representation_type="cartesian",
differential_type="cartesian",
)
# This frame is defined as a velocity of 368 km/s in the
# direction of l=263.85, b=48.25. This is defined in:
#
# Bennett et al. (2003), First-Year Wilkinson Microwave
# Anisotropy Probe (WMAP) Observations: Preliminary Maps
# and Basic Results
#
# Note that in that paper, the dipole is expressed as a
# temperature (T=3.346 +/- 0.017mK)
VELOCITY_FRAMES["CMBDIPOL"] = Galactic(
l=263.85 * u.deg,
b=48.25 * u.deg,
distance=0 * u.km,
radial_velocity=-(3.346e-3 / 2.725 * c).to(u.km / u.s),
)
# Mapping from CTYPE axis name to UCD1
CTYPE_TO_UCD1 = {
# Celestial coordinates
"RA": "pos.eq.ra",
"DEC": "pos.eq.dec",
"GLON": "pos.galactic.lon",
"GLAT": "pos.galactic.lat",
"ELON": "pos.ecliptic.lon",
"ELAT": "pos.ecliptic.lat",
"TLON": "pos.bodyrc.lon",
"TLAT": "pos.bodyrc.lat",
"HPLT": "custom:pos.helioprojective.lat",
"HPLN": "custom:pos.helioprojective.lon",
"HPRZ": "custom:pos.helioprojective.z",
"HGLN": "custom:pos.heliographic.stonyhurst.lon",
"HGLT": "custom:pos.heliographic.stonyhurst.lat",
"CRLN": "custom:pos.heliographic.carrington.lon",
"CRLT": "custom:pos.heliographic.carrington.lat",
"SOLX": "custom:pos.heliocentric.x",
"SOLY": "custom:pos.heliocentric.y",
"SOLZ": "custom:pos.heliocentric.z",
# Spectral coordinates (WCS paper 3)
"FREQ": "em.freq", # Frequency
"ENER": "em.energy", # Energy
"WAVN": "em.wavenumber", # Wavenumber
"WAVE": "em.wl", # Vacuum wavelength
"VRAD": "spect.dopplerVeloc.radio", # Radio velocity
"VOPT": "spect.dopplerVeloc.opt", # Optical velocity
"ZOPT": "src.redshift", # Redshift
"AWAV": "em.wl", # Air wavelength
"VELO": "spect.dopplerVeloc", # Apparent radial velocity
"BETA": "custom:spect.doplerVeloc.beta", # Beta factor (v/c)
"STOKES": "phys.polarization.stokes", # STOKES parameters
# Time coordinates (https://www.aanda.org/articles/aa/pdf/2015/02/aa24653-14.pdf)
"TIME": "time",
"TAI": "time",
"TT": "time",
"TDT": "time",
"ET": "time",
"IAT": "time",
"UT1": "time",
"UTC": "time",
"GMT": "time",
"GPS": "time",
"TCG": "time",
"TCB": "time",
"TDB": "time",
"LOCAL": "time",
# Distance coordinates
"DIST": "pos.distance",
"DSUN": "custom:pos.distance.sunToObserver"
# UT() and TT() are handled separately in world_axis_physical_types
}
# Keep a list of additional custom mappings that have been registered. This
# is kept as a list in case nested context managers are used
CTYPE_TO_UCD1_CUSTOM = []
class custom_ctype_to_ucd_mapping:
"""
A context manager that makes it possible to temporarily add new CTYPE to
UCD1+ mapping used by :attr:`FITSWCSAPIMixin.world_axis_physical_types`.
Parameters
----------
mapping : dict
A dictionary mapping a CTYPE value to a UCD1+ value
Examples
--------
Consider a WCS with the following CTYPE::
>>> from astropy.wcs import WCS
>>> wcs = WCS(naxis=1)
>>> wcs.wcs.ctype = ['SPAM']
By default, :attr:`FITSWCSAPIMixin.world_axis_physical_types` returns `None`,
but this can be overridden::
>>> wcs.world_axis_physical_types
[None]
>>> with custom_ctype_to_ucd_mapping({'SPAM': 'food.spam'}):
... wcs.world_axis_physical_types
['food.spam']
"""
def __init__(self, mapping):
CTYPE_TO_UCD1_CUSTOM.insert(0, mapping)
self.mapping = mapping
def __enter__(self):
pass
def __exit__(self, type, value, tb):
CTYPE_TO_UCD1_CUSTOM.remove(self.mapping)
class SlicedFITSWCS(SlicedLowLevelWCS, HighLevelWCSMixin):
pass
class FITSWCSAPIMixin(BaseLowLevelWCS, HighLevelWCSMixin):
"""
A mix-in class that is intended to be inherited by the
:class:`~astropy.wcs.WCS` class and provides the low- and high-level WCS API.
"""
@property
def pixel_n_dim(self):
return self.naxis
@property
def world_n_dim(self):
return len(self.wcs.ctype)
@property
def array_shape(self):
if self.pixel_shape is None:
return None
else:
return self.pixel_shape[::-1]
@array_shape.setter
def array_shape(self, value):
if value is None:
self.pixel_shape = None
else:
self.pixel_shape = value[::-1]
@property
def pixel_shape(self):
if self._naxis == [0, 0]:
return None
else:
return tuple(self._naxis)
@pixel_shape.setter
def pixel_shape(self, value):
if value is None:
self._naxis = [0, 0]
else:
if len(value) != self.naxis:
raise ValueError(
f"The number of data axes, {self.naxis}, does not equal the shape"
f" {len(value)}."
)
self._naxis = list(value)
@property
def pixel_bounds(self):
return self._pixel_bounds
@pixel_bounds.setter
def pixel_bounds(self, value):
if value is None:
self._pixel_bounds = value
else:
if len(value) != self.naxis:
raise ValueError(
"The number of data axes, "
f"{self.naxis}, does not equal the number of "
f"pixel bounds {len(value)}."
)
self._pixel_bounds = list(value)
@property
def world_axis_physical_types(self):
types = []
# TODO: need to support e.g. TT(TAI)
for ctype in self.wcs.ctype:
if ctype.upper().startswith(("UT(", "TT(")):
types.append("time")
else:
ctype_name = ctype.split("-")[0]
for custom_mapping in CTYPE_TO_UCD1_CUSTOM:
if ctype_name in custom_mapping:
types.append(custom_mapping[ctype_name])
break
else:
types.append(CTYPE_TO_UCD1.get(ctype_name.upper(), None))
return types
@property
def world_axis_units(self):
units = []
for unit in self.wcs.cunit:
if unit is None:
unit = ""
elif isinstance(unit, u.Unit):
unit = unit.to_string(format="vounit")
else:
try:
unit = u.Unit(unit).to_string(format="vounit")
except u.UnitsError:
unit = ""
units.append(unit)
return units
@property
def world_axis_names(self):
return list(self.wcs.cname)
@property
def axis_correlation_matrix(self):
# If there are any distortions present, we assume that there may be
# correlations between all axes. Maybe if some distortions only apply
# to the image plane we can improve this?
if self.has_distortion:
return np.ones((self.world_n_dim, self.pixel_n_dim), dtype=bool)
# Assuming linear world coordinates along each axis, the correlation
# matrix would be given by whether or not the PC matrix is zero
matrix = self.wcs.get_pc() != 0
# We now need to check specifically for celestial coordinates since
# these can assume correlations because of spherical distortions. For
# each celestial coordinate we copy over the pixel dependencies from
# the other celestial coordinates.
celestial = (self.wcs.axis_types // 1000) % 10 == 2
celestial_indices = np.nonzero(celestial)[0]
for world1 in celestial_indices:
for world2 in celestial_indices:
if world1 != world2:
matrix[world1] |= matrix[world2]
matrix[world2] |= matrix[world1]
return matrix
def pixel_to_world_values(self, *pixel_arrays):
world = self.all_pix2world(*pixel_arrays, 0)
return world[0] if self.world_n_dim == 1 else tuple(world)
def world_to_pixel_values(self, *world_arrays):
# avoid circular import
from astropy.wcs.wcs import NoConvergence
try:
pixel = self.all_world2pix(*world_arrays, 0)
except NoConvergence as e:
warnings.warn(str(e))
# use best_solution contained in the exception and format the same
# way as all_world2pix does (using _array_converter)
pixel = self._array_converter(
lambda *args: e.best_solution, "input", *world_arrays, 0
)
return pixel[0] if self.pixel_n_dim == 1 else tuple(pixel)
@property
def world_axis_object_components(self):
return self._get_components_and_classes()[0]
@property
def world_axis_object_classes(self):
return self._get_components_and_classes()[1]
@property
def serialized_classes(self):
return False
def _get_components_and_classes(self):
# The aim of this function is to return whatever is needed for
# world_axis_object_components and world_axis_object_classes. It's easier
# to figure it out in one go and then return the values and let the
# properties return part of it.
# Since this method might get called quite a few times, we need to cache
# it. We start off by defining a hash based on the attributes of the
# WCS that matter here (we can't just use the WCS object as a hash since
# it is mutable)
wcs_hash = (
self.naxis,
list(self.wcs.ctype),
list(self.wcs.cunit),
self.wcs.radesys,
self.wcs.specsys,
self.wcs.equinox,
self.wcs.dateobs,
self.wcs.lng,
self.wcs.lat,
)
# If the cache is present, we need to check that the 'hash' matches.
if getattr(self, "_components_and_classes_cache", None) is not None:
cache = self._components_and_classes_cache
if cache[0] == wcs_hash:
return cache[1]
else:
self._components_and_classes_cache = None
# Avoid circular imports by importing here
from astropy.coordinates import EarthLocation, SkyCoord
from astropy.time import Time, TimeDelta
from astropy.time.formats import FITS_DEPRECATED_SCALES
from astropy.wcs.utils import wcs_to_celestial_frame
components = [None] * self.naxis
classes = {}
# Let's start off by checking whether the WCS has a pair of celestial
# components
if self.has_celestial:
try:
celestial_frame = wcs_to_celestial_frame(self)
except ValueError:
# Some WCSes, e.g. solar, can be recognized by WCSLIB as being
# celestial but we don't necessarily have frames for them.
celestial_frame = None
else:
kwargs = {}
kwargs["frame"] = celestial_frame
kwargs["unit"] = u.deg
classes["celestial"] = (SkyCoord, (), kwargs)
components[self.wcs.lng] = ("celestial", 0, "spherical.lon.degree")
components[self.wcs.lat] = ("celestial", 1, "spherical.lat.degree")
# Next, we check for spectral components
if self.has_spectral:
# Find index of spectral coordinate
ispec = self.wcs.spec
ctype = self.wcs.ctype[ispec][:4]
ctype = ctype.upper()
kwargs = {}
# Determine observer location and velocity
# TODO: determine how WCS standard would deal with observer on a
# spacecraft far from earth. For now assume the obsgeo parameters,
# if present, give the geocentric observer location.
if np.isnan(self.wcs.obsgeo[0]):
observer = None
else:
earth_location = EarthLocation(*self.wcs.obsgeo[:3], unit=u.m)
# Get the time scale from TIMESYS or fall back to 'utc'
tscale = self.wcs.timesys or "utc"
if np.isnan(self.wcs.mjdavg):
obstime = Time(
self.wcs.mjdobs,
format="mjd",
scale=tscale,
location=earth_location,
)
else:
obstime = Time(
self.wcs.mjdavg,
format="mjd",
scale=tscale,
location=earth_location,
)
observer_location = SkyCoord(earth_location.get_itrs(obstime=obstime))
if self.wcs.specsys in VELOCITY_FRAMES:
frame = VELOCITY_FRAMES[self.wcs.specsys]
observer = observer_location.transform_to(frame)
if isinstance(frame, str):
observer = attach_zero_velocities(observer)
else:
observer = update_differentials_to_match(
observer_location,
VELOCITY_FRAMES[self.wcs.specsys],
preserve_observer_frame=True,
)
elif self.wcs.specsys == "TOPOCENT":
observer = attach_zero_velocities(observer_location)
else:
raise NotImplementedError(
f"SPECSYS={self.wcs.specsys} not yet supported"
)
# Determine target
# This is tricker. In principle the target for each pixel is the
# celestial coordinates of the pixel, but we then need to be very
# careful about SSYSOBS which is tricky. For now, we set the
# target using the reference celestial coordinate in the WCS (if
# any).
if self.has_celestial and celestial_frame is not None:
# NOTE: celestial_frame was defined higher up
# NOTE: we set the distance explicitly to avoid warnings in SpectralCoord
target = SkyCoord(
self.wcs.crval[self.wcs.lng] * self.wcs.cunit[self.wcs.lng],
self.wcs.crval[self.wcs.lat] * self.wcs.cunit[self.wcs.lat],
frame=celestial_frame,
distance=1000 * u.kpc,
)
target = attach_zero_velocities(target)
else:
target = None
# SpectralCoord does not work properly if either observer or target
# are not convertible to ICRS, so if this is the case, we (for now)
# drop the observer and target from the SpectralCoord and warn the
# user.
if observer is not None:
try:
observer.transform_to(ICRS())
except Exception:
warnings.warn(
"observer cannot be converted to ICRS, so will "
"not be set on SpectralCoord",
AstropyUserWarning,
)
observer = None
if target is not None:
try:
target.transform_to(ICRS())
except Exception:
warnings.warn(
"target cannot be converted to ICRS, so will "
"not be set on SpectralCoord",
AstropyUserWarning,
)
target = None
# NOTE: below we include Quantity in classes['spectral'] instead
# of SpectralCoord - this is because we want to also be able to
# accept plain quantities.
if ctype == "ZOPT":
def spectralcoord_from_redshift(redshift):
if isinstance(redshift, SpectralCoord):
return redshift
return SpectralCoord(
(redshift + 1) * self.wcs.restwav,
unit=u.m,
observer=observer,
target=target,
)
def redshift_from_spectralcoord(spectralcoord):
# TODO: check target is consistent between WCS and SpectralCoord,
# if they are not the transformation doesn't make conceptual sense.
if (
observer is None
or spectralcoord.observer is None
or spectralcoord.target is None
):
if observer is None:
msg = "No observer defined on WCS"
elif spectralcoord.observer is None:
msg = "No observer defined on SpectralCoord"
else:
msg = "No target defined on SpectralCoord"
warnings.warn(
f"{msg}, SpectralCoord "
"will be converted without any velocity "
"frame change",
AstropyUserWarning,
)
return spectralcoord.to_value(u.m) / self.wcs.restwav - 1.0
else:
return (
spectralcoord.with_observer_stationary_relative_to(
observer
).to_value(u.m)
/ self.wcs.restwav
- 1.0
)
classes["spectral"] = (u.Quantity, (), {}, spectralcoord_from_redshift)
components[self.wcs.spec] = ("spectral", 0, redshift_from_spectralcoord)
elif ctype == "BETA":
def spectralcoord_from_beta(beta):
if isinstance(beta, SpectralCoord):
return beta
return SpectralCoord(
beta * C_SI,
unit=u.m / u.s,
doppler_convention="relativistic",
doppler_rest=self.wcs.restwav * u.m,
observer=observer,
target=target,
)
def beta_from_spectralcoord(spectralcoord):
# TODO: check target is consistent between WCS and SpectralCoord,
# if they are not the transformation doesn't make conceptual sense.
doppler_equiv = u.doppler_relativistic(self.wcs.restwav * u.m)
if (
observer is None
or spectralcoord.observer is None
or spectralcoord.target is None
):
if observer is None:
msg = "No observer defined on WCS"
elif spectralcoord.observer is None:
msg = "No observer defined on SpectralCoord"
else:
msg = "No target defined on SpectralCoord"
warnings.warn(
f"{msg}, SpectralCoord "
"will be converted without any velocity "
"frame change",
AstropyUserWarning,
)
return spectralcoord.to_value(u.m / u.s, doppler_equiv) / C_SI
else:
return (
spectralcoord.with_observer_stationary_relative_to(
observer
).to_value(u.m / u.s, doppler_equiv)
/ C_SI
)
classes["spectral"] = (u.Quantity, (), {}, spectralcoord_from_beta)
components[self.wcs.spec] = ("spectral", 0, beta_from_spectralcoord)
else:
kwargs["unit"] = self.wcs.cunit[ispec]
if self.wcs.restfrq > 0:
if ctype == "VELO":
kwargs["doppler_convention"] = "relativistic"
kwargs["doppler_rest"] = self.wcs.restfrq * u.Hz
elif ctype == "VRAD":
kwargs["doppler_convention"] = "radio"
kwargs["doppler_rest"] = self.wcs.restfrq * u.Hz
elif ctype == "VOPT":
kwargs["doppler_convention"] = "optical"
kwargs["doppler_rest"] = self.wcs.restwav * u.m
def spectralcoord_from_value(value):
if isinstance(value, SpectralCoord):
return value
return SpectralCoord(
value, observer=observer, target=target, **kwargs
)
def value_from_spectralcoord(spectralcoord):
# TODO: check target is consistent between WCS and SpectralCoord,
# if they are not the transformation doesn't make conceptual sense.
if (
observer is None
or spectralcoord.observer is None
or spectralcoord.target is None
):
if observer is None:
msg = "No observer defined on WCS"
elif spectralcoord.observer is None:
msg = "No observer defined on SpectralCoord"
else:
msg = "No target defined on SpectralCoord"
warnings.warn(
f"{msg}, SpectralCoord "
"will be converted without any velocity "
"frame change",
AstropyUserWarning,
)
return spectralcoord.to_value(**kwargs)
else:
return spectralcoord.with_observer_stationary_relative_to(
observer
).to_value(**kwargs)
classes["spectral"] = (u.Quantity, (), {}, spectralcoord_from_value)
components[self.wcs.spec] = ("spectral", 0, value_from_spectralcoord)
# We can then make sure we correctly return Time objects where appropriate
# (https://www.aanda.org/articles/aa/pdf/2015/02/aa24653-14.pdf)
if "time" in self.world_axis_physical_types:
multiple_time = self.world_axis_physical_types.count("time") > 1
for i in range(self.naxis):
if self.world_axis_physical_types[i] == "time":
if multiple_time:
name = f"time.{i}"
else:
name = "time"
# Initialize delta
reference_time_delta = None
# Extract time scale
scale = self.wcs.ctype[i].lower()
if scale == "time":
if self.wcs.timesys:
scale = self.wcs.timesys.lower()
else:
scale = "utc"
# Drop sub-scales
if "(" in scale:
pos = scale.index("(")
scale, subscale = scale[:pos], scale[pos + 1 : -1]
warnings.warn(
"Dropping unsupported sub-scale "
f"{subscale.upper()} from scale {scale.upper()}",
UserWarning,
)
# TODO: consider having GPS as a scale in Time
# For now GPS is not a scale, we approximate this by TAI - 19s
if scale == "gps":
reference_time_delta = TimeDelta(19, format="sec")
scale = "tai"
elif scale.upper() in FITS_DEPRECATED_SCALES:
scale = FITS_DEPRECATED_SCALES[scale.upper()]
elif scale not in Time.SCALES:
raise ValueError(f"Unrecognized time CTYPE={self.wcs.ctype[i]}")
# Determine location
trefpos = self.wcs.trefpos.lower()
if trefpos.startswith("topocent"):
# Note that some headers use TOPOCENT instead of TOPOCENTER
if np.any(np.isnan(self.wcs.obsgeo[:3])):
warnings.warn(
"Missing or incomplete observer location "
"information, setting location in Time to None",
UserWarning,
)
location = None
else:
location = EarthLocation(*self.wcs.obsgeo[:3], unit=u.m)
elif trefpos == "geocenter":
location = EarthLocation(0, 0, 0, unit=u.m)
elif trefpos == "":
location = None
else:
# TODO: implement support for more locations when Time supports it
warnings.warn(
f"Observation location '{trefpos}' is not "
"supported, setting location in Time to None",
UserWarning,
)
location = None
reference_time = Time(
np.nan_to_num(self.wcs.mjdref[0]),
np.nan_to_num(self.wcs.mjdref[1]),
format="mjd",
scale=scale,
location=location,
)
if reference_time_delta is not None:
reference_time = reference_time + reference_time_delta
def time_from_reference_and_offset(offset):
if isinstance(offset, Time):
return offset
return reference_time + TimeDelta(offset, format="sec")
def offset_from_time_and_reference(time):
return (time - reference_time).sec
classes[name] = (Time, (), {}, time_from_reference_and_offset)
components[i] = (name, 0, offset_from_time_and_reference)
# Fallback: for any remaining components that haven't been identified, just
# return Quantity as the class to use
for i in range(self.naxis):
if components[i] is None:
name = self.wcs.ctype[i].split("-")[0].lower()
if name == "":
name = "world"
while name in classes:
name += "_"
classes[name] = (u.Quantity, (), {"unit": self.wcs.cunit[i]})
components[i] = (name, 0, "value")
# Keep a cached version of result
self._components_and_classes_cache = wcs_hash, (components, classes)
return components, classes
|
1acc1034612d79a5ca13ecbf4bb336071a2a66936eea2b7b9b88751735c7dc4b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import io
import os
from contextlib import nullcontext
from datetime import datetime
import numpy as np
import pytest
from numpy.testing import (
assert_allclose,
assert_array_almost_equal,
assert_array_almost_equal_nulp,
assert_array_equal,
)
from packaging.version import Version
from astropy import units as u
from astropy import wcs
from astropy.coordinates import SkyCoord
from astropy.io import fits
from astropy.nddata import Cutout2D
from astropy.tests.helper import assert_quantity_allclose
from astropy.utils.data import (
get_pkg_data_contents,
get_pkg_data_filename,
get_pkg_data_filenames,
)
from astropy.utils.exceptions import (
AstropyDeprecationWarning,
AstropyUserWarning,
AstropyWarning,
)
from astropy.utils.misc import NumpyRNGContext
from astropy.wcs import _wcs
_WCSLIB_VER = Version(_wcs.__version__)
# NOTE: User can choose to use system wcslib instead of bundled.
def ctx_for_v71_dateref_warnings():
if _WCSLIB_VER >= Version("7.1") and _WCSLIB_VER < Version("7.3"):
ctx = pytest.warns(
wcs.FITSFixedWarning,
match=(
r"'datfix' made the change 'Set DATE-REF to '1858-11-17' from"
r" MJD-REF'\."
),
)
else:
ctx = nullcontext()
return ctx
class TestMaps:
def setup_method(self):
# get the list of the hdr files that we want to test
self._file_list = list(get_pkg_data_filenames("data/maps", pattern="*.hdr"))
def test_consistency(self):
# Check to see that we actually have the list we expect, so that we
# do not get in a situation where the list is empty or incomplete and
# the tests still seem to pass correctly.
# how many do we expect to see?
n_data_files = 28
assert len(self._file_list) == n_data_files, (
f"test_spectra has wrong number data files: found {len(self._file_list)},"
f" expected {n_data_files}"
)
def test_maps(self):
for filename in self._file_list:
# use the base name of the file, so we get more useful messages
# for failing tests.
filename = os.path.basename(filename)
# Now find the associated file in the installed wcs test directory.
header = get_pkg_data_contents(
os.path.join("data", "maps", filename), encoding="binary"
)
# finally run the test.
wcsobj = wcs.WCS(header)
world = wcsobj.wcs_pix2world([[97, 97]], 1)
assert_array_almost_equal(world, [[285.0, -66.25]], decimal=1)
pix = wcsobj.wcs_world2pix([[285.0, -66.25]], 1)
assert_array_almost_equal(pix, [[97, 97]], decimal=0)
class TestSpectra:
def setup_method(self):
self._file_list = list(get_pkg_data_filenames("data/spectra", pattern="*.hdr"))
def test_consistency(self):
# Check to see that we actually have the list we expect, so that we
# do not get in a situation where the list is empty or incomplete and
# the tests still seem to pass correctly.
# how many do we expect to see?
n_data_files = 6
assert len(self._file_list) == n_data_files, (
f"test_spectra has wrong number data files: found {len(self._file_list)},"
f" expected {n_data_files}"
)
def test_spectra(self):
for filename in self._file_list:
# use the base name of the file, so we get more useful messages
# for failing tests.
filename = os.path.basename(filename)
# Now find the associated file in the installed wcs test directory.
header = get_pkg_data_contents(
os.path.join("data", "spectra", filename), encoding="binary"
)
# finally run the test.
if _WCSLIB_VER >= Version("7.4"):
ctx = pytest.warns(
wcs.FITSFixedWarning,
match=(
r"'datfix' made the change 'Set MJD-OBS to 53925\.853472 from"
r" DATE-OBS'\."
),
)
else:
ctx = nullcontext()
with ctx:
all_wcs = wcs.find_all_wcs(header)
assert len(all_wcs) == 9
def test_fixes():
"""
From github issue #36
"""
header = get_pkg_data_contents("data/nonstandard_units.hdr", encoding="binary")
with pytest.raises(wcs.InvalidTransformError), pytest.warns(
wcs.FITSFixedWarning
) as w:
wcs.WCS(header, translate_units="dhs")
if Version("7.4") <= _WCSLIB_VER < Version("7.6"):
assert len(w) == 3
assert "'datfix' made the change 'Success'." in str(w.pop().message)
else:
assert len(w) == 2
first_wmsg = str(w[0].message)
assert "unitfix" in first_wmsg and "Hz" in first_wmsg and "M/S" in first_wmsg
assert "plane angle" in str(w[1].message) and "m/s" in str(w[1].message)
# Ignore "PV2_2 = 0.209028857410973 invalid keyvalue" warning seen on Windows.
@pytest.mark.filterwarnings(r"ignore:PV2_2")
def test_outside_sky():
"""
From github issue #107
"""
header = get_pkg_data_contents("data/outside_sky.hdr", encoding="binary")
w = wcs.WCS(header)
assert np.all(np.isnan(w.wcs_pix2world([[100.0, 500.0]], 0))) # outside sky
assert np.all(np.isnan(w.wcs_pix2world([[200.0, 200.0]], 0))) # outside sky
assert not np.any(np.isnan(w.wcs_pix2world([[1000.0, 1000.0]], 0)))
def test_pix2world():
"""
From github issue #1463
"""
# TODO: write this to test the expected output behavior of pix2world,
# currently this just makes sure it doesn't error out in unexpected ways
# (and compares `wcs.pc` and `result` values?)
filename = get_pkg_data_filename("data/sip2.fits")
with pytest.warns(wcs.FITSFixedWarning) as caught_warnings:
# this raises a warning unimportant for this testing the pix2world
# FITSFixedWarning(u'The WCS transformation has more axes (2) than
# the image it is associated with (0)')
ww = wcs.WCS(filename)
# might as well monitor for changing behavior
if Version("7.4") <= _WCSLIB_VER < Version("7.6"):
assert len(caught_warnings) == 2
else:
assert len(caught_warnings) == 1
n = 3
pixels = (np.arange(n) * np.ones((2, n))).T
result = ww.wcs_pix2world(pixels, 0, ra_dec_order=True)
# Catch #2791
ww.wcs_pix2world(pixels[..., 0], pixels[..., 1], 0, ra_dec_order=True)
# assuming that the data of sip2.fits doesn't change
answer = np.array([[0.00024976, 0.00023018], [0.00023043, -0.00024997]])
assert np.allclose(ww.wcs.pc, answer, atol=1.0e-8)
answer = np.array(
[
[202.39265216, 47.17756518],
[202.39335826, 47.17754619],
[202.39406436, 47.1775272],
]
)
assert np.allclose(result, answer, atol=1.0e-8, rtol=1.0e-10)
def test_load_fits_path():
fits_name = get_pkg_data_filename("data/sip.fits")
with pytest.warns(wcs.FITSFixedWarning):
wcs.WCS(fits_name)
def test_dict_init():
"""
Test that WCS can be initialized with a dict-like object
"""
# Dictionary with no actual WCS, returns identity transform
with ctx_for_v71_dateref_warnings():
w = wcs.WCS({})
xp, yp = w.wcs_world2pix(41.0, 2.0, 1)
assert_array_almost_equal_nulp(xp, 41.0, 10)
assert_array_almost_equal_nulp(yp, 2.0, 10)
# Valid WCS
hdr = {
"CTYPE1": "GLON-CAR",
"CTYPE2": "GLAT-CAR",
"CUNIT1": "deg",
"CUNIT2": "deg",
"CRPIX1": 1,
"CRPIX2": 1,
"CRVAL1": 40.0,
"CRVAL2": 0.0,
"CDELT1": -0.1,
"CDELT2": 0.1,
}
if _WCSLIB_VER >= Version("7.1"):
hdr["DATEREF"] = "1858-11-17"
if _WCSLIB_VER >= Version("7.4"):
ctx = pytest.warns(
wcs.wcs.FITSFixedWarning,
match=r"'datfix' made the change 'Set MJDREF to 0\.000000 from DATEREF'\.",
)
else:
ctx = nullcontext()
with ctx:
w = wcs.WCS(hdr)
xp, yp = w.wcs_world2pix(41.0, 2.0, 0)
assert_array_almost_equal_nulp(xp, -10.0, 10)
assert_array_almost_equal_nulp(yp, 20.0, 10)
def test_extra_kwarg():
"""
Issue #444
"""
w = wcs.WCS()
with NumpyRNGContext(123456789):
data = np.random.rand(100, 2)
with pytest.raises(TypeError):
w.wcs_pix2world(data, origin=1)
def test_3d_shapes():
"""
Issue #444
"""
w = wcs.WCS(naxis=3)
with NumpyRNGContext(123456789):
data = np.random.rand(100, 3)
result = w.wcs_pix2world(data, 1)
assert result.shape == (100, 3)
result = w.wcs_pix2world(data[..., 0], data[..., 1], data[..., 2], 1)
assert len(result) == 3
def test_preserve_shape():
w = wcs.WCS(naxis=2)
x = np.random.random((2, 3, 4))
y = np.random.random((2, 3, 4))
xw, yw = w.wcs_pix2world(x, y, 1)
assert xw.shape == (2, 3, 4)
assert yw.shape == (2, 3, 4)
xp, yp = w.wcs_world2pix(x, y, 1)
assert xp.shape == (2, 3, 4)
assert yp.shape == (2, 3, 4)
def test_broadcasting():
w = wcs.WCS(naxis=2)
x = np.random.random((2, 3, 4))
y = 1
xp, yp = w.wcs_world2pix(x, y, 1)
assert xp.shape == (2, 3, 4)
assert yp.shape == (2, 3, 4)
def test_shape_mismatch():
w = wcs.WCS(naxis=2)
x = np.random.random((2, 3, 4))
y = np.random.random((3, 2, 4))
MESSAGE = r"Coordinate arrays are not broadcastable to each other"
with pytest.raises(ValueError, match=MESSAGE):
xw, yw = w.wcs_pix2world(x, y, 1)
with pytest.raises(ValueError, match=MESSAGE):
xp, yp = w.wcs_world2pix(x, y, 1)
# There are some ambiguities that need to be worked around when
# naxis == 1
w = wcs.WCS(naxis=1)
x = np.random.random((42, 1))
xw = w.wcs_pix2world(x, 1)
assert xw.shape == (42, 1)
x = np.random.random((42,))
(xw,) = w.wcs_pix2world(x, 1)
assert xw.shape == (42,)
def test_invalid_shape():
"""Issue #1395"""
MESSAGE = r"When providing two arguments, the array must be of shape [(]N, 2[)]"
w = wcs.WCS(naxis=2)
xy = np.random.random((2, 3))
with pytest.raises(ValueError, match=MESSAGE):
w.wcs_pix2world(xy, 1)
xy = np.random.random((2, 1))
with pytest.raises(ValueError, match=MESSAGE):
w.wcs_pix2world(xy, 1)
def test_warning_about_defunct_keywords():
header = get_pkg_data_contents("data/defunct_keywords.hdr", encoding="binary")
if Version("7.4") <= _WCSLIB_VER < Version("7.6"):
n_warn = 5
else:
n_warn = 4
# Make sure the warnings come out every time...
for _ in range(2):
with pytest.warns(wcs.FITSFixedWarning) as w:
wcs.WCS(header)
assert len(w) == n_warn
# 7.4 adds a fifth warning "'datfix' made the change 'Success'."
for item in w[:4]:
assert "PCi_ja" in str(item.message)
def test_warning_about_defunct_keywords_exception():
header = get_pkg_data_contents("data/defunct_keywords.hdr", encoding="binary")
with pytest.warns(wcs.FITSFixedWarning):
wcs.WCS(header)
def test_to_header_string():
# fmt: off
hdrstr = (
"WCSAXES = 2 / Number of coordinate axes ",
"CRPIX1 = 0.0 / Pixel coordinate of reference point ",
"CRPIX2 = 0.0 / Pixel coordinate of reference point ",
"CDELT1 = 1.0 / Coordinate increment at reference point ",
"CDELT2 = 1.0 / Coordinate increment at reference point ",
"CRVAL1 = 0.0 / Coordinate value at reference point ",
"CRVAL2 = 0.0 / Coordinate value at reference point ",
"LATPOLE = 90.0 / [deg] Native latitude of celestial pole ",
)
# fmt: on
if _WCSLIB_VER >= Version("7.3"):
# fmt: off
hdrstr += (
"MJDREF = 0.0 / [d] MJD of fiducial time ",
)
# fmt: on
elif _WCSLIB_VER >= Version("7.1"):
# fmt: off
hdrstr += (
"DATEREF = '1858-11-17' / ISO-8601 fiducial time ",
"MJDREFI = 0.0 / [d] MJD of fiducial time, integer part ",
"MJDREFF = 0.0 / [d] MJD of fiducial time, fractional part "
)
# fmt: on
hdrstr += ("END",)
header_string = "".join(hdrstr)
w = wcs.WCS()
h0 = fits.Header.fromstring(w.to_header_string().strip())
if "COMMENT" in h0:
del h0["COMMENT"]
if "" in h0:
del h0[""]
h1 = fits.Header.fromstring(header_string.strip())
assert dict(h0) == dict(h1)
def test_to_fits():
nrec = 11 if _WCSLIB_VER >= Version("7.1") else 8
if _WCSLIB_VER < Version("7.1"):
nrec = 8
elif _WCSLIB_VER < Version("7.3"):
nrec = 11
else:
nrec = 9
w = wcs.WCS()
header_string = w.to_header()
wfits = w.to_fits()
assert isinstance(wfits, fits.HDUList)
assert isinstance(wfits[0], fits.PrimaryHDU)
assert header_string == wfits[0].header[-nrec:]
def test_to_header_warning():
fits_name = get_pkg_data_filename("data/sip.fits")
with pytest.warns(wcs.FITSFixedWarning):
x = wcs.WCS(fits_name)
with pytest.warns(AstropyWarning, match="A_ORDER") as w:
x.to_header()
assert len(w) == 1
def test_no_comments_in_header():
w = wcs.WCS()
header = w.to_header()
assert w.wcs.alt not in header
assert "COMMENT" + w.wcs.alt.strip() not in header
assert "COMMENT" not in header
wkey = "P"
header = w.to_header(key=wkey)
assert wkey not in header
assert "COMMENT" not in header
assert "COMMENT" + w.wcs.alt.strip() not in header
def test_find_all_wcs_crash():
"""
Causes a double free without a recent fix in wcslib_wrap.C
"""
with open(get_pkg_data_filename("data/too_many_pv.hdr")) as fd:
header = fd.read()
# We have to set fix=False here, because one of the fixing tasks is to
# remove redundant SCAMP distortion parameters when SIP distortion
# parameters are also present.
with pytest.raises(wcs.InvalidTransformError), pytest.warns(wcs.FITSFixedWarning):
wcs.find_all_wcs(header, fix=False)
# NOTE: Warning bubbles up from C layer during wcs.validate() and
# is hard to catch, so we just ignore it.
@pytest.mark.filterwarnings("ignore")
def test_validate():
results = wcs.validate(get_pkg_data_filename("data/validate.fits"))
results_txt = sorted({x.strip() for x in repr(results).splitlines()})
if _WCSLIB_VER >= Version("7.6"):
filename = "data/validate.7.6.txt"
elif _WCSLIB_VER >= Version("7.4"):
filename = "data/validate.7.4.txt"
elif _WCSLIB_VER >= Version("6.0"):
filename = "data/validate.6.txt"
elif _WCSLIB_VER >= Version("5.13"):
filename = "data/validate.5.13.txt"
elif _WCSLIB_VER >= Version("5.0"):
filename = "data/validate.5.0.txt"
else:
filename = "data/validate.txt"
with open(get_pkg_data_filename(filename)) as fd:
lines = fd.readlines()
assert sorted({x.strip() for x in lines}) == results_txt
@pytest.mark.filterwarnings("ignore")
def test_validate_wcs_tab():
results = wcs.validate(get_pkg_data_filename("data/tab-time-last-axis.fits"))
results_txt = sorted({x.strip() for x in repr(results).splitlines()})
assert results_txt == [
"",
"HDU 0 (PRIMARY):",
"HDU 1 (WCS-TABLE):",
"No issues.",
"WCS key ' ':",
]
def test_validate_with_2_wcses():
# From Issue #2053
with pytest.warns(AstropyUserWarning):
results = wcs.validate(get_pkg_data_filename("data/2wcses.hdr"))
assert "WCS key 'A':" in str(results)
def test_crpix_maps_to_crval():
twcs = wcs.WCS(naxis=2)
twcs.wcs.crval = [251.29, 57.58]
twcs.wcs.cdelt = [1, 1]
twcs.wcs.crpix = [507, 507]
twcs.wcs.pc = np.array([[7.7e-6, 3.3e-5], [3.7e-5, -6.8e-6]])
twcs._naxis = [1014, 1014]
twcs.wcs.ctype = ["RA---TAN-SIP", "DEC--TAN-SIP"]
a = np.array(
[
[0, 0, 5.33092692e-08, 3.73753773e-11, -2.02111473e-13],
[0, 2.44084308e-05, 2.81394789e-11, 5.17856895e-13, 0.0],
[-2.41334657e-07, 1.29289255e-10, 2.35753629e-14, 0.0, 0.0],
[-2.37162007e-10, 5.43714947e-13, 0.0, 0.0, 0.0],
[-2.81029767e-13, 0.0, 0.0, 0.0, 0.0],
]
)
b = np.array(
[
[0, 0, 2.99270374e-05, -2.38136074e-10, 7.23205168e-13],
[0, -1.71073858e-07, 6.31243431e-11, -5.16744347e-14, 0.0],
[6.95458963e-06, -3.08278961e-10, -1.75800917e-13, 0.0, 0.0],
[3.51974159e-11, 5.60993016e-14, 0.0, 0.0, 0.0],
[-5.92438525e-13, 0.0, 0.0, 0.0, 0.0],
]
)
twcs.sip = wcs.Sip(a, b, None, None, twcs.wcs.crpix)
twcs.wcs.set()
pscale = np.sqrt(wcs.utils.proj_plane_pixel_area(twcs))
# test that CRPIX maps to CRVAL:
assert_allclose(
twcs.wcs_pix2world(*twcs.wcs.crpix, 1),
twcs.wcs.crval,
rtol=0.0,
atol=1e-6 * pscale,
)
# test that CRPIX maps to CRVAL:
assert_allclose(
twcs.all_pix2world(*twcs.wcs.crpix, 1),
twcs.wcs.crval,
rtol=0.0,
atol=1e-6 * pscale,
)
def test_all_world2pix(
fname=None,
ext=0,
tolerance=1.0e-4,
origin=0,
random_npts=25000,
adaptive=False,
maxiter=20,
detect_divergence=True,
):
"""Test all_world2pix, iterative inverse of all_pix2world"""
# Open test FITS file:
if fname is None:
fname = get_pkg_data_filename("data/j94f05bgq_flt.fits")
ext = ("SCI", 1)
if not os.path.isfile(fname):
raise OSError(f"Input file '{fname:s}' to 'test_all_world2pix' not found.")
h = fits.open(fname)
w = wcs.WCS(h[ext].header, h)
h.close()
del h
crpix = w.wcs.crpix
ncoord = crpix.shape[0]
# Assume that CRPIX is at the center of the image and that the image has
# a power-of-2 number of pixels along each axis. Only use the central
# 1/64 for this testing purpose:
naxesi_l = list((7.0 / 16 * crpix).astype(int))
naxesi_u = list((9.0 / 16 * crpix).astype(int))
# Generate integer indices of pixels (image grid):
img_pix = np.dstack(
[i.flatten() for i in np.meshgrid(*map(range, naxesi_l, naxesi_u))]
)[0]
# Generage random data (in image coordinates):
with NumpyRNGContext(123456789):
rnd_pix = np.random.rand(random_npts, ncoord)
# Scale random data to cover the central part of the image
mwidth = 2 * (crpix * 1.0 / 8)
rnd_pix = crpix - 0.5 * mwidth + (mwidth - 1) * rnd_pix
# Reference pixel coordinates in image coordinate system (CS):
test_pix = np.append(img_pix, rnd_pix, axis=0)
# Reference pixel coordinates in sky CS using forward transformation:
all_world = w.all_pix2world(test_pix, origin)
try:
runtime_begin = datetime.now()
# Apply the inverse iterative process to pixels in world coordinates
# to recover the pixel coordinates in image space.
all_pix = w.all_world2pix(
all_world,
origin,
tolerance=tolerance,
adaptive=adaptive,
maxiter=maxiter,
detect_divergence=detect_divergence,
)
runtime_end = datetime.now()
except wcs.wcs.NoConvergence as e:
runtime_end = datetime.now()
ndiv = 0
if e.divergent is not None:
ndiv = e.divergent.shape[0]
print(f"There are {ndiv} diverging solutions.")
print(f"Indices of diverging solutions:\n{e.divergent}")
print(f"Diverging solutions:\n{e.best_solution[e.divergent]}\n")
print(
"Mean radius of the diverging solutions:"
f" {np.mean(np.linalg.norm(e.best_solution[e.divergent], axis=1))}"
)
print(
"Mean accuracy of the diverging solutions:"
f" {np.mean(np.linalg.norm(e.accuracy[e.divergent], axis=1))}\n"
)
else:
print("There are no diverging solutions.")
nslow = 0
if e.slow_conv is not None:
nslow = e.slow_conv.shape[0]
print(f"There are {nslow} slowly converging solutions.")
print(f"Indices of slowly converging solutions:\n{e.slow_conv}")
print(f"Slowly converging solutions:\n{e.best_solution[e.slow_conv]}\n")
else:
print("There are no slowly converging solutions.\n")
print(
f"There are {e.best_solution.shape[0] - ndiv - nslow} converged solutions."
)
print(f"Best solutions (all points):\n{e.best_solution}")
print(f"Accuracy:\n{e.accuracy}\n")
print(
"\nFinished running 'test_all_world2pix' with errors.\n"
f"ERROR: {e.args[0]}\nRun time: {runtime_end - runtime_begin}\n"
)
raise e
# Compute differences between reference pixel coordinates and
# pixel coordinates (in image space) recovered from reference
# pixels in world coordinates:
errors = np.sqrt(np.sum(np.power(all_pix - test_pix, 2), axis=1))
meanerr = np.mean(errors)
maxerr = np.amax(errors)
print(
"\nFinished running 'test_all_world2pix'.\n"
f"Mean error = {meanerr:e} (Max error = {maxerr:e})\n"
f"Run time: {runtime_end - runtime_begin}\n"
)
assert maxerr < 2.0 * tolerance
def test_scamp_sip_distortion_parameters():
"""
Test parsing of WCS parameters with redundant SIP and SCAMP distortion
parameters.
"""
header = get_pkg_data_contents("data/validate.fits", encoding="binary")
with pytest.warns(wcs.FITSFixedWarning):
w = wcs.WCS(header)
# Just check that this doesn't raise an exception.
w.all_pix2world(0, 0, 0)
def test_fixes2():
"""
From github issue #1854
"""
header = get_pkg_data_contents("data/nonstandard_units.hdr", encoding="binary")
with pytest.raises(wcs.InvalidTransformError):
wcs.WCS(header, fix=False)
def test_unit_normalization():
"""
From github issue #1918
"""
header = get_pkg_data_contents("data/unit.hdr", encoding="binary")
w = wcs.WCS(header)
assert w.wcs.cunit[2] == "m/s"
def test_footprint_to_file(tmp_path):
"""
From github issue #1912
"""
# Arbitrary keywords from real data
hdr = {
"CTYPE1": "RA---ZPN",
"CRUNIT1": "deg",
"CRPIX1": -3.3495999e02,
"CRVAL1": 3.185790700000e02,
"CTYPE2": "DEC--ZPN",
"CRUNIT2": "deg",
"CRPIX2": 3.0453999e03,
"CRVAL2": 4.388538000000e01,
"PV2_1": 1.0,
"PV2_3": 220.0,
"NAXIS1": 2048,
"NAXIS2": 1024,
}
w = wcs.WCS(hdr)
testfile = tmp_path / "test.txt"
w.footprint_to_file(testfile)
with open(testfile) as f:
lines = f.readlines()
assert len(lines) == 4
assert lines[2] == "ICRS\n"
assert "color=green" in lines[3]
w.footprint_to_file(testfile, coordsys="FK5", color="red")
with open(testfile) as f:
lines = f.readlines()
assert len(lines) == 4
assert lines[2] == "FK5\n"
assert "color=red" in lines[3]
with pytest.raises(ValueError):
w.footprint_to_file(testfile, coordsys="FOO")
del hdr["NAXIS1"]
del hdr["NAXIS2"]
w = wcs.WCS(hdr)
with pytest.warns(AstropyUserWarning):
w.footprint_to_file(testfile)
# Ignore FITSFixedWarning about keyrecords following the END keyrecord were
# ignored, which comes from src/astropy_wcs.c . Only a blind catch like this
# seems to work when pytest warnings are turned into exceptions.
@pytest.mark.filterwarnings("ignore")
def test_validate_faulty_wcs():
"""
From github issue #2053
"""
h = fits.Header()
# Illegal WCS:
h["RADESYSA"] = "ICRS"
h["PV2_1"] = 1.0
hdu = fits.PrimaryHDU([[0]], header=h)
hdulist = fits.HDUList([hdu])
# Check that this doesn't raise a NameError exception
wcs.validate(hdulist)
def test_error_message():
header = get_pkg_data_contents("data/invalid_header.hdr", encoding="binary")
# make WCS transformation invalid
hdr = fits.Header.fromstring(header)
del hdr["PV?_*"]
hdr["PV1_1"] = 110
hdr["PV1_2"] = 110
hdr["PV2_1"] = -110
hdr["PV2_2"] = -110
with pytest.raises(wcs.InvalidTransformError):
with pytest.warns(wcs.FITSFixedWarning):
w = wcs.WCS(hdr, _do_set=False)
w.all_pix2world([[536.0, 894.0]], 0)
def test_out_of_bounds():
# See #2107
header = get_pkg_data_contents("data/zpn-hole.hdr", encoding="binary")
w = wcs.WCS(header)
ra, dec = w.wcs_pix2world(110, 110, 0)
assert np.isnan(ra)
assert np.isnan(dec)
ra, dec = w.wcs_pix2world(0, 0, 0)
assert not np.isnan(ra)
assert not np.isnan(dec)
def test_calc_footprint_1():
fits = get_pkg_data_filename("data/sip.fits")
with pytest.warns(wcs.FITSFixedWarning):
w = wcs.WCS(fits)
axes = (1000, 1051)
ref = np.array(
[
[202.39314493, 47.17753352],
[202.71885939, 46.94630488],
[202.94631893, 47.15855022],
[202.72053428, 47.37893142],
]
)
footprint = w.calc_footprint(axes=axes)
assert_allclose(footprint, ref)
def test_calc_footprint_2():
"""Test calc_footprint without distortion."""
fits = get_pkg_data_filename("data/sip.fits")
with pytest.warns(wcs.FITSFixedWarning):
w = wcs.WCS(fits)
axes = (1000, 1051)
ref = np.array(
[
[202.39265216, 47.17756518],
[202.7469062, 46.91483312],
[203.11487481, 47.14359319],
[202.76092671, 47.40745948],
]
)
footprint = w.calc_footprint(axes=axes, undistort=False)
assert_allclose(footprint, ref)
def test_calc_footprint_3():
"""Test calc_footprint with corner of the pixel."""
w = wcs.WCS()
w.wcs.ctype = ["GLON-CAR", "GLAT-CAR"]
w.wcs.crpix = [1.5, 5.5]
w.wcs.cdelt = [-0.1, 0.1]
axes = (2, 10)
ref = np.array([[0.1, -0.5], [0.1, 0.5], [359.9, 0.5], [359.9, -0.5]])
footprint = w.calc_footprint(axes=axes, undistort=False, center=False)
assert_allclose(footprint, ref)
def test_sip():
# See #2107
header = get_pkg_data_contents("data/irac_sip.hdr", encoding="binary")
w = wcs.WCS(header)
x0, y0 = w.sip_pix2foc(200, 200, 0)
assert_allclose(72, x0, 1e-3)
assert_allclose(72, y0, 1e-3)
x1, y1 = w.sip_foc2pix(x0, y0, 0)
assert_allclose(200, x1, 1e-3)
assert_allclose(200, y1, 1e-3)
def test_sub_3d_with_sip():
# See #10527
header = get_pkg_data_contents("data/irac_sip.hdr", encoding="binary")
header = fits.Header.fromstring(header)
header["NAXIS"] = 3
header.set("NAXIS3", 64, after=header.index("NAXIS2"))
w = wcs.WCS(header, naxis=2)
assert w.naxis == 2
def test_printwcs(capsys):
"""
Just make sure that it runs
"""
h = get_pkg_data_contents("data/spectra/orion-freq-1.hdr", encoding="binary")
with pytest.warns(wcs.FITSFixedWarning):
w = wcs.WCS(h)
w.printwcs()
captured = capsys.readouterr()
assert "WCS Keywords" in captured.out
h = get_pkg_data_contents("data/3d_cd.hdr", encoding="binary")
w = wcs.WCS(h)
w.printwcs()
captured = capsys.readouterr()
assert "WCS Keywords" in captured.out
def test_invalid_spherical():
header = """
SIMPLE = T / conforms to FITS standard
BITPIX = 8 / array data type
WCSAXES = 2 / no comment
CTYPE1 = 'RA---TAN' / TAN (gnomic) projection
CTYPE2 = 'DEC--TAN' / TAN (gnomic) projection
EQUINOX = 2000.0 / Equatorial coordinates definition (yr)
LONPOLE = 180.0 / no comment
LATPOLE = 0.0 / no comment
CRVAL1 = 16.0531567459 / RA of reference point
CRVAL2 = 23.1148929108 / DEC of reference point
CRPIX1 = 2129 / X reference pixel
CRPIX2 = 1417 / Y reference pixel
CUNIT1 = 'deg ' / X pixel scale units
CUNIT2 = 'deg ' / Y pixel scale units
CD1_1 = -0.00912247310646 / Transformation matrix
CD1_2 = -0.00250608809647 / no comment
CD2_1 = 0.00250608809647 / no comment
CD2_2 = -0.00912247310646 / no comment
IMAGEW = 4256 / Image width, in pixels.
IMAGEH = 2832 / Image height, in pixels.
"""
f = io.StringIO(header)
header = fits.Header.fromtextfile(f)
w = wcs.WCS(header)
x, y = w.wcs_world2pix(211, -26, 0)
assert np.isnan(x) and np.isnan(y)
def test_no_iteration():
"""Regression test for #3066"""
MESSAGE = "'{}' object is not iterable"
w = wcs.WCS(naxis=2)
with pytest.raises(TypeError, match=MESSAGE.format("WCS")):
iter(w)
class NewWCS(wcs.WCS):
pass
w = NewWCS(naxis=2)
with pytest.raises(TypeError, match=MESSAGE.format("NewWCS")):
iter(w)
@pytest.mark.skipif(
_wcs.__version__[0] < "5", reason="TPV only works with wcslib 5.x or later"
)
def test_sip_tpv_agreement():
sip_header = get_pkg_data_contents(
os.path.join("data", "siponly.hdr"), encoding="binary"
)
tpv_header = get_pkg_data_contents(
os.path.join("data", "tpvonly.hdr"), encoding="binary"
)
with pytest.warns(wcs.FITSFixedWarning):
w_sip = wcs.WCS(sip_header)
w_tpv = wcs.WCS(tpv_header)
assert_array_almost_equal(
w_sip.all_pix2world([w_sip.wcs.crpix], 1),
w_tpv.all_pix2world([w_tpv.wcs.crpix], 1),
)
w_sip2 = wcs.WCS(w_sip.to_header())
w_tpv2 = wcs.WCS(w_tpv.to_header())
assert_array_almost_equal(
w_sip.all_pix2world([w_sip.wcs.crpix], 1),
w_sip2.all_pix2world([w_sip.wcs.crpix], 1),
)
assert_array_almost_equal(
w_tpv.all_pix2world([w_sip.wcs.crpix], 1),
w_tpv2.all_pix2world([w_sip.wcs.crpix], 1),
)
assert_array_almost_equal(
w_sip2.all_pix2world([w_sip.wcs.crpix], 1),
w_tpv2.all_pix2world([w_tpv.wcs.crpix], 1),
)
def test_tpv_ctype_sip():
sip_header = fits.Header.fromstring(
get_pkg_data_contents(os.path.join("data", "siponly.hdr"), encoding="binary")
)
tpv_header = fits.Header.fromstring(
get_pkg_data_contents(os.path.join("data", "tpvonly.hdr"), encoding="binary")
)
sip_header.update(tpv_header)
sip_header["CTYPE1"] = "RA---TAN-SIP"
sip_header["CTYPE2"] = "DEC--TAN-SIP"
with pytest.warns(
wcs.FITSFixedWarning,
match="Removed redundant SCAMP distortion parameters "
"because SIP parameters are also present",
):
w_sip = wcs.WCS(sip_header)
assert w_sip.sip is not None
def test_tpv_ctype_tpv():
sip_header = fits.Header.fromstring(
get_pkg_data_contents(os.path.join("data", "siponly.hdr"), encoding="binary")
)
tpv_header = fits.Header.fromstring(
get_pkg_data_contents(os.path.join("data", "tpvonly.hdr"), encoding="binary")
)
sip_header.update(tpv_header)
sip_header["CTYPE1"] = "RA---TPV"
sip_header["CTYPE2"] = "DEC--TPV"
with pytest.warns(
wcs.FITSFixedWarning,
match="Removed redundant SIP distortion parameters "
"because CTYPE explicitly specifies TPV distortions",
):
w_sip = wcs.WCS(sip_header)
assert w_sip.sip is None
def test_tpv_ctype_tan():
sip_header = fits.Header.fromstring(
get_pkg_data_contents(os.path.join("data", "siponly.hdr"), encoding="binary")
)
tpv_header = fits.Header.fromstring(
get_pkg_data_contents(os.path.join("data", "tpvonly.hdr"), encoding="binary")
)
sip_header.update(tpv_header)
sip_header["CTYPE1"] = "RA---TAN"
sip_header["CTYPE2"] = "DEC--TAN"
with pytest.warns(
wcs.FITSFixedWarning,
match="Removed redundant SIP distortion parameters "
"because SCAMP' PV distortions are also present",
):
w_sip = wcs.WCS(sip_header)
assert w_sip.sip is None
def test_car_sip_with_pv():
# https://github.com/astropy/astropy/issues/14255
header_dict = {
"SIMPLE": True,
"BITPIX": -32,
"NAXIS": 2,
"NAXIS1": 1024,
"NAXIS2": 1024,
"CRPIX1": 512.0,
"CRPIX2": 512.0,
"CDELT1": 0.01,
"CDELT2": 0.01,
"CRVAL1": 120.0,
"CRVAL2": 29.0,
"CTYPE1": "RA---CAR-SIP",
"CTYPE2": "DEC--CAR-SIP",
"PV1_1": 120.0,
"PV1_2": 29.0,
"PV1_0": 1.0,
"A_ORDER": 2,
"A_2_0": 5.0e-4,
"B_ORDER": 2,
"B_2_0": 5.0e-4,
}
w = wcs.WCS(header_dict)
assert w.sip is not None
assert w.wcs.get_pv() == [(1, 1, 120.0), (1, 2, 29.0), (1, 0, 1.0)]
assert np.allclose(
w.all_pix2world(header_dict["CRPIX1"], header_dict["CRPIX2"], 1),
[header_dict["CRVAL1"], header_dict["CRVAL2"]],
)
@pytest.mark.skipif(
_wcs.__version__[0] < "5", reason="TPV only works with wcslib 5.x or later"
)
def test_tpv_copy():
# See #3904
tpv_header = get_pkg_data_contents(
os.path.join("data", "tpvonly.hdr"), encoding="binary"
)
with pytest.warns(wcs.FITSFixedWarning):
w_tpv = wcs.WCS(tpv_header)
ra, dec = w_tpv.wcs_pix2world([0, 100, 200], [0, -100, 200], 0)
assert ra[0] != ra[1] and ra[1] != ra[2]
assert dec[0] != dec[1] and dec[1] != dec[2]
def test_hst_wcs():
path = get_pkg_data_filename("data/dist_lookup.fits.gz")
with fits.open(path) as hdulist:
# wcslib will complain about the distortion parameters if they
# weren't correctly deleted from the header
w = wcs.WCS(hdulist[1].header, hdulist)
# Check pixel scale and area
assert_quantity_allclose(
w.proj_plane_pixel_scales(), [1.38484378e-05, 1.39758488e-05] * u.deg
)
assert_quantity_allclose(
w.proj_plane_pixel_area(), 1.93085492e-10 * (u.deg * u.deg)
)
# Exercise the main transformation functions, mainly just for
# coverage
w.p4_pix2foc([0, 100, 200], [0, -100, 200], 0)
w.det2im([0, 100, 200], [0, -100, 200], 0)
w.cpdis1 = w.cpdis1
w.cpdis2 = w.cpdis2
w.det2im1 = w.det2im1
w.det2im2 = w.det2im2
w.sip = w.sip
w.cpdis1.cdelt = w.cpdis1.cdelt
w.cpdis1.crpix = w.cpdis1.crpix
w.cpdis1.crval = w.cpdis1.crval
w.cpdis1.data = w.cpdis1.data
assert w.sip.a_order == 4
assert w.sip.b_order == 4
assert w.sip.ap_order == 0
assert w.sip.bp_order == 0
assert_array_equal(w.sip.crpix, [2048.0, 1024.0])
wcs.WCS(hdulist[1].header, hdulist)
def test_cpdis_comments():
path = get_pkg_data_filename("data/dist_lookup.fits.gz")
f = fits.open(path)
w = wcs.WCS(f[1].header, f)
hdr = w.to_fits()[0].header
f.close()
wcscards = list(hdr["CPDIS*"].cards) + list(hdr["DP*"].cards)
wcsdict = {k: (v, c) for k, v, c in wcscards}
refcards = [
("CPDIS1", "LOOKUP", "Prior distortion function type"),
("DP1.EXTVER", 1.0, "Version number of WCSDVARR extension"),
("DP1.NAXES", 2.0, "Number of independent variables in CPDIS function"),
("DP1.AXIS.1", 1.0, "Axis number of the 1st variable in a CPDIS function"),
("DP1.AXIS.2", 2.0, "Axis number of the 2nd variable in a CPDIS function"),
("CPDIS2", "LOOKUP", "Prior distortion function type"),
("DP2.EXTVER", 2.0, "Version number of WCSDVARR extension"),
("DP2.NAXES", 2.0, "Number of independent variables in CPDIS function"),
("DP2.AXIS.1", 1.0, "Axis number of the 1st variable in a CPDIS function"),
("DP2.AXIS.2", 2.0, "Axis number of the 2nd variable in a CPDIS function"),
]
assert len(wcsdict) == len(refcards)
for k, v, c in refcards:
assert wcsdict[k] == (v, c)
def test_d2im_comments():
path = get_pkg_data_filename("data/ie6d07ujq_wcs.fits")
f = fits.open(path)
with pytest.warns(wcs.FITSFixedWarning):
w = wcs.WCS(f[0].header, f)
f.close()
wcscards = list(w.to_fits()[0].header["D2IM*"].cards)
wcsdict = {k: (v, c) for k, v, c in wcscards}
refcards = [
("D2IMDIS1", "LOOKUP", "Detector to image correction type"),
("D2IM1.EXTVER", 1.0, "Version number of WCSDVARR extension"),
("D2IM1.NAXES", 2.0, "Number of independent variables in D2IM function"),
("D2IM1.AXIS.1", 1.0, "Axis number of the 1st variable in a D2IM function"),
("D2IM1.AXIS.2", 2.0, "Axis number of the 2nd variable in a D2IM function"),
("D2IMDIS2", "LOOKUP", "Detector to image correction type"),
("D2IM2.EXTVER", 2.0, "Version number of WCSDVARR extension"),
("D2IM2.NAXES", 2.0, "Number of independent variables in D2IM function"),
("D2IM2.AXIS.1", 1.0, "Axis number of the 1st variable in a D2IM function"),
("D2IM2.AXIS.2", 2.0, "Axis number of the 2nd variable in a D2IM function"),
# ('D2IMERR1', 0.049, 'Maximum error of D2IM correction for axis 1'),
# ('D2IMERR2', 0.035, 'Maximum error of D2IM correction for axis 2'),
# ('D2IMEXT', 'iref$y7b1516hi_d2i.fits', ''),
]
assert len(wcsdict) == len(refcards)
for k, v, c in refcards:
assert wcsdict[k] == (v, c)
def test_sip_broken():
# This header caused wcslib to segfault because it has a SIP
# specification in a non-default keyword
hdr = get_pkg_data_contents("data/sip-broken.hdr")
wcs.WCS(hdr)
def test_no_truncate_crval():
"""
Regression test for https://github.com/astropy/astropy/issues/4612
"""
w = wcs.WCS(naxis=3)
w.wcs.crval = [50, 50, 2.12345678e11]
w.wcs.cdelt = [1e-3, 1e-3, 1e8]
w.wcs.ctype = ["RA---TAN", "DEC--TAN", "FREQ"]
w.wcs.set()
header = w.to_header()
for ii in range(3):
assert header[f"CRVAL{ii + 1}"] == w.wcs.crval[ii]
assert header[f"CDELT{ii + 1}"] == w.wcs.cdelt[ii]
def test_no_truncate_crval_try2():
"""
Regression test for https://github.com/astropy/astropy/issues/4612
"""
w = wcs.WCS(naxis=3)
w.wcs.crval = [50, 50, 2.12345678e11]
w.wcs.cdelt = [1e-5, 1e-5, 1e5]
w.wcs.ctype = ["RA---SIN", "DEC--SIN", "FREQ"]
w.wcs.cunit = ["deg", "deg", "Hz"]
w.wcs.crpix = [1, 1, 1]
w.wcs.restfrq = 2.34e11
w.wcs.set()
header = w.to_header()
for ii in range(3):
assert header[f"CRVAL{ii + 1}"] == w.wcs.crval[ii]
assert header[f"CDELT{ii + 1}"] == w.wcs.cdelt[ii]
def test_no_truncate_crval_p17():
"""
Regression test for https://github.com/astropy/astropy/issues/5162
"""
w = wcs.WCS(naxis=2)
w.wcs.crval = [50.1234567890123456, 50.1234567890123456]
w.wcs.cdelt = [1e-3, 1e-3]
w.wcs.ctype = ["RA---TAN", "DEC--TAN"]
w.wcs.set()
header = w.to_header()
assert header["CRVAL1"] != w.wcs.crval[0]
assert header["CRVAL2"] != w.wcs.crval[1]
header = w.to_header(relax=wcs.WCSHDO_P17)
assert header["CRVAL1"] == w.wcs.crval[0]
assert header["CRVAL2"] == w.wcs.crval[1]
def test_no_truncate_using_compare():
"""
Regression test for https://github.com/astropy/astropy/issues/4612
This one uses WCS.wcs.compare and some slightly different values
"""
w = wcs.WCS(naxis=3)
w.wcs.crval = [2.409303333333e02, 50, 2.12345678e11]
w.wcs.cdelt = [1e-3, 1e-3, 1e8]
w.wcs.ctype = ["RA---TAN", "DEC--TAN", "FREQ"]
w.wcs.set()
w2 = wcs.WCS(w.to_header())
w.wcs.compare(w2.wcs)
def test_passing_ImageHDU():
"""
Passing ImageHDU or PrimaryHDU and comparing it with
wcs initialized from header. For #4493.
"""
path = get_pkg_data_filename("data/validate.fits")
with fits.open(path) as hdulist:
with pytest.warns(wcs.FITSFixedWarning):
wcs_hdu = wcs.WCS(hdulist[0])
wcs_header = wcs.WCS(hdulist[0].header)
assert wcs_hdu.wcs.compare(wcs_header.wcs)
wcs_hdu = wcs.WCS(hdulist[1])
wcs_header = wcs.WCS(hdulist[1].header)
assert wcs_hdu.wcs.compare(wcs_header.wcs)
def test_inconsistent_sip():
"""
Test for #4814
"""
hdr = get_pkg_data_contents("data/sip-broken.hdr")
ctx = ctx_for_v71_dateref_warnings()
with ctx:
w = wcs.WCS(hdr)
with pytest.warns(AstropyWarning):
newhdr = w.to_header(relax=None)
# CTYPE should not include "-SIP" if relax is None
with ctx:
wnew = wcs.WCS(newhdr)
assert all(not ctyp.endswith("-SIP") for ctyp in wnew.wcs.ctype)
newhdr = w.to_header(relax=False)
assert "A_0_2" not in newhdr
# CTYPE should not include "-SIP" if relax is False
with ctx:
wnew = wcs.WCS(newhdr)
assert all(not ctyp.endswith("-SIP") for ctyp in wnew.wcs.ctype)
with pytest.warns(AstropyWarning):
newhdr = w.to_header(key="C")
assert "A_0_2" not in newhdr
# Test writing header with a different key
with ctx:
wnew = wcs.WCS(newhdr, key="C")
assert all(not ctyp.endswith("-SIP") for ctyp in wnew.wcs.ctype)
with pytest.warns(AstropyWarning):
newhdr = w.to_header(key=" ")
# Test writing a primary WCS to header
with ctx:
wnew = wcs.WCS(newhdr)
assert all(not ctyp.endswith("-SIP") for ctyp in wnew.wcs.ctype)
# Test that "-SIP" is kept into CTYPE if relax=True and
# "-SIP" was in the original header
newhdr = w.to_header(relax=True)
with ctx:
wnew = wcs.WCS(newhdr)
assert all(ctyp.endswith("-SIP") for ctyp in wnew.wcs.ctype)
assert "A_0_2" in newhdr
# Test that SIP coefficients are also written out.
assert wnew.sip is not None
# ######### broken header ###########
# Test that "-SIP" is added to CTYPE if relax=True and
# "-SIP" was not in the original header but SIP coefficients
# are present.
with ctx:
w = wcs.WCS(hdr)
w.wcs.ctype = ["RA---TAN", "DEC--TAN"]
newhdr = w.to_header(relax=True)
with ctx:
wnew = wcs.WCS(newhdr)
assert all(ctyp.endswith("-SIP") for ctyp in wnew.wcs.ctype)
def test_bounds_check():
"""Test for #4957"""
w = wcs.WCS(naxis=2)
w.wcs.ctype = ["RA---CAR", "DEC--CAR"]
w.wcs.cdelt = [10, 10]
w.wcs.crval = [-90, 90]
w.wcs.crpix = [1, 1]
w.wcs.bounds_check(False, False)
ra, dec = w.wcs_pix2world(300, 0, 0)
assert_allclose(ra, -180)
assert_allclose(dec, -30)
def test_naxis():
w = wcs.WCS(naxis=2)
w.wcs.crval = [1, 1]
w.wcs.cdelt = [0.1, 0.1]
w.wcs.crpix = [1, 1]
w._naxis = [1000, 500]
assert w.pixel_shape == (1000, 500)
assert w.array_shape == (500, 1000)
w.pixel_shape = (99, 59)
assert w._naxis == [99, 59]
w.array_shape = (45, 23)
assert w._naxis == [23, 45]
assert w.pixel_shape == (23, 45)
w.pixel_shape = None
assert w.pixel_bounds is None
def test_sip_with_altkey():
"""
Test that when creating a WCS object using a key, CTYPE with
that key is looked at and not the primary CTYPE.
fix for #5443.
"""
with fits.open(get_pkg_data_filename("data/sip.fits")) as f:
with pytest.warns(wcs.FITSFixedWarning):
w = wcs.WCS(f[0].header)
# create a header with two WCSs.
h1 = w.to_header(relax=True, key="A")
h2 = w.to_header(relax=False)
h1["CTYPE1A"] = "RA---SIN-SIP"
h1["CTYPE2A"] = "DEC--SIN-SIP"
h1.update(h2)
with ctx_for_v71_dateref_warnings():
w = wcs.WCS(h1, key="A")
assert (w.wcs.ctype == np.array(["RA---SIN-SIP", "DEC--SIN-SIP"])).all()
def test_to_fits_1():
"""
Test to_fits() with LookupTable distortion.
"""
fits_name = get_pkg_data_filename("data/dist.fits")
with pytest.warns(AstropyDeprecationWarning):
w = wcs.WCS(fits_name)
wfits = w.to_fits()
assert isinstance(wfits, fits.HDUList)
assert isinstance(wfits[0], fits.PrimaryHDU)
assert isinstance(wfits[1], fits.ImageHDU)
def test_keyedsip():
"""
Test sip reading with extra key.
"""
hdr_name = get_pkg_data_filename("data/sip-broken.hdr")
header = fits.Header.fromfile(hdr_name)
del header["CRPIX1"]
del header["CRPIX2"]
w = wcs.WCS(header=header, key="A")
assert isinstance(w.sip, wcs.Sip)
assert w.sip.crpix[0] == 2048
assert w.sip.crpix[1] == 1026
def test_zero_size_input():
with fits.open(get_pkg_data_filename("data/sip.fits")) as f:
with pytest.warns(wcs.FITSFixedWarning):
w = wcs.WCS(f[0].header)
inp = np.zeros((0, 2))
assert_array_equal(inp, w.all_pix2world(inp, 0))
assert_array_equal(inp, w.all_world2pix(inp, 0))
inp = [], [1]
result = w.all_pix2world([], [1], 0)
assert_array_equal(inp[0], result[0])
assert_array_equal(inp[1], result[1])
result = w.all_world2pix([], [1], 0)
assert_array_equal(inp[0], result[0])
assert_array_equal(inp[1], result[1])
def test_scalar_inputs():
"""
Issue #7845
"""
wcsobj = wcs.WCS(naxis=1)
result = wcsobj.all_pix2world(2, 1)
assert_array_equal(result, [np.array(2.0)])
assert result[0].shape == ()
result = wcsobj.all_pix2world([2], 1)
assert_array_equal(result, [np.array([2.0])])
assert result[0].shape == (1,)
# Ignore RuntimeWarning raised on s390.
@pytest.mark.filterwarnings("ignore:.*invalid value encountered in.*")
def test_footprint_contains():
"""
Test WCS.footprint_contains(skycoord)
"""
header = """
WCSAXES = 2 / Number of coordinate axes
CRPIX1 = 1045.0 / Pixel coordinate of reference point
CRPIX2 = 1001.0 / Pixel coordinate of reference point
PC1_1 = -0.00556448550786 / Coordinate transformation matrix element
PC1_2 = -0.001042120133257 / Coordinate transformation matrix element
PC2_1 = 0.001181477028705 / Coordinate transformation matrix element
PC2_2 = -0.005590809742987 / Coordinate transformation matrix element
CDELT1 = 1.0 / [deg] Coordinate increment at reference point
CDELT2 = 1.0 / [deg] Coordinate increment at reference point
CUNIT1 = 'deg' / Units of coordinate increment and value
CUNIT2 = 'deg' / Units of coordinate increment and value
CTYPE1 = 'RA---TAN' / TAN (gnomonic) projection + SIP distortions
CTYPE2 = 'DEC--TAN' / TAN (gnomonic) projection + SIP distortions
CRVAL1 = 250.34971683647 / [deg] Coordinate value at reference point
CRVAL2 = 2.2808772582495 / [deg] Coordinate value at reference point
LONPOLE = 180.0 / [deg] Native longitude of celestial pole
LATPOLE = 2.2808772582495 / [deg] Native latitude of celestial pole
RADESYS = 'ICRS' / Equatorial coordinate system
MJD-OBS = 58612.339199259 / [d] MJD of observation matching DATE-OBS
DATE-OBS= '2019-05-09T08:08:26.816Z' / ISO-8601 observation date matching MJD-OB
NAXIS = 2 / NAXIS
NAXIS1 = 2136 / length of first array dimension
NAXIS2 = 2078 / length of second array dimension
"""
header = fits.Header.fromstring(header.strip(), "\n")
test_wcs = wcs.WCS(header)
hasCoord = test_wcs.footprint_contains(SkyCoord(254, 2, unit="deg"))
assert hasCoord
hasCoord = test_wcs.footprint_contains(SkyCoord(240, 2, unit="deg"))
assert not hasCoord
hasCoord = test_wcs.footprint_contains(SkyCoord(24, 2, unit="deg"))
assert not hasCoord
def test_cunit():
# Initializing WCS
w1 = wcs.WCS(naxis=2)
w2 = wcs.WCS(naxis=2)
w3 = wcs.WCS(naxis=2)
w4 = wcs.WCS(naxis=2)
# Initializing the values of cunit
w1.wcs.cunit = ["deg", "m/s"]
w2.wcs.cunit = ["km/h", "km/h"]
w3.wcs.cunit = ["deg", "m/s"]
w4.wcs.cunit = ["deg", "deg"]
# Equality checking a cunit with itself
assert w1.wcs.cunit == w1.wcs.cunit
assert not w1.wcs.cunit != w1.wcs.cunit
# Equality checking of two different cunit object having same values
assert w1.wcs.cunit == w3.wcs.cunit
assert not w1.wcs.cunit != w3.wcs.cunit
# Equality checking of two different cunit object having the same first unit
# but different second unit (see #9154)
assert not w1.wcs.cunit == w4.wcs.cunit
assert w1.wcs.cunit != w4.wcs.cunit
# Inequality checking of two different cunit object having different values
assert not w1.wcs.cunit == w2.wcs.cunit
assert w1.wcs.cunit != w2.wcs.cunit
# Inequality checking of cunit with a list of literals
assert not w1.wcs.cunit == [1, 2, 3]
assert w1.wcs.cunit != [1, 2, 3]
# Inequality checking with some characters
assert not w1.wcs.cunit == ["a", "b", "c"]
assert w1.wcs.cunit != ["a", "b", "c"]
# Comparison is not implemented TypeError will raise
with pytest.raises(TypeError):
w1.wcs.cunit < w2.wcs.cunit
class TestWcsWithTime:
def setup_method(self):
if _WCSLIB_VER >= Version("7.1"):
fname = get_pkg_data_filename("data/header_with_time_wcslib71.fits")
else:
fname = get_pkg_data_filename("data/header_with_time.fits")
self.header = fits.Header.fromfile(fname)
with pytest.warns(wcs.FITSFixedWarning):
self.w = wcs.WCS(self.header, key="A")
def test_keywods2wcsprm(self):
"""Make sure Wcsprm is populated correctly from the header."""
ctype = [self.header[val] for val in self.header["CTYPE*"]]
crval = [self.header[val] for val in self.header["CRVAL*"]]
crpix = [self.header[val] for val in self.header["CRPIX*"]]
cdelt = [self.header[val] for val in self.header["CDELT*"]]
cunit = [self.header[val] for val in self.header["CUNIT*"]]
assert list(self.w.wcs.ctype) == ctype
time_axis_code = 4000 if _WCSLIB_VER >= Version("7.9") else 0
assert list(self.w.wcs.axis_types) == [2200, 2201, 3300, time_axis_code]
assert_allclose(self.w.wcs.crval, crval)
assert_allclose(self.w.wcs.crpix, crpix)
assert_allclose(self.w.wcs.cdelt, cdelt)
assert list(self.w.wcs.cunit) == cunit
naxis = self.w.naxis
assert naxis == 4
pc = np.zeros((naxis, naxis), dtype=np.float64)
for i in range(1, 5):
for j in range(1, 5):
if i == j:
pc[i - 1, j - 1] = self.header.get(f"PC{i}_{j}A", 1)
else:
pc[i - 1, j - 1] = self.header.get(f"PC{i}_{j}A", 0)
assert_allclose(self.w.wcs.pc, pc)
char_keys = [
"timesys",
"trefpos",
"trefdir",
"plephem",
"timeunit",
"dateref",
"dateobs",
"datebeg",
"dateavg",
"dateend",
]
for key in char_keys:
assert getattr(self.w.wcs, key) == self.header.get(key, "")
num_keys = [
"mjdref",
"mjdobs",
"mjdbeg",
"mjdend",
"jepoch",
"bepoch",
"tstart",
"tstop",
"xposure",
"timsyer",
"timrder",
"timedel",
"timepixr",
"timeoffs",
"telapse",
"czphs",
"cperi",
]
for key in num_keys:
if key.upper() == "MJDREF":
hdrv = [
self.header.get("MJDREFIA", np.nan),
self.header.get("MJDREFFA", np.nan),
]
else:
hdrv = self.header.get(key, np.nan)
assert_allclose(getattr(self.w.wcs, key), hdrv)
def test_transforms(self):
assert_allclose(self.w.all_pix2world(*self.w.wcs.crpix, 1), self.w.wcs.crval)
def test_invalid_coordinate_masking():
# Regression test for an issue which caused all coordinates to be set to NaN
# after a transformation rather than just the invalid ones as reported by
# WCSLIB. A specific example of this is that when considering an all-sky
# spectral cube with a spectral axis that is not correlated with the sky
# axes, if transforming pixel coordinates that did not fall 'in' the sky,
# the spectral world value was also masked even though that coordinate
# was valid.
w = wcs.WCS(naxis=3)
w.wcs.ctype = "VELO_LSR", "GLON-CAR", "GLAT-CAR"
w.wcs.crval = -20, 0, 0
w.wcs.crpix = 1, 1441, 241
w.wcs.cdelt = 1.3, -0.125, 0.125
px = [-10, -10, 20]
py = [-10, 10, 20]
pz = [-10, 10, 20]
wx, wy, wz = w.wcs_pix2world(px, py, pz, 0)
# Before fixing this, wx used to return np.nan for the first element
assert_allclose(wx, [-33, -33, 6])
assert_allclose(wy, [np.nan, 178.75, 177.5])
assert_allclose(wz, [np.nan, -28.75, -27.5])
def test_no_pixel_area():
w = wcs.WCS(naxis=3)
# Pixel area cannot be computed
with pytest.raises(ValueError, match="Pixel area is defined only for 2D pixels"):
w.proj_plane_pixel_area()
# Pixel scales still possible
assert_quantity_allclose(w.proj_plane_pixel_scales(), 1)
def test_distortion_header(tmp_path):
"""
Test that plate distortion model is correctly described by `wcs.to_header()`
and preserved when creating a Cutout2D from the image, writing it to FITS,
and reading it back from the file.
"""
path = get_pkg_data_filename("data/dss.14.29.56-62.41.05.fits.gz")
cen = np.array((50, 50))
siz = np.array((20, 20))
with fits.open(path) as hdulist:
with pytest.warns(wcs.FITSFixedWarning):
w = wcs.WCS(hdulist[0].header)
cut = Cutout2D(hdulist[0].data, position=cen, size=siz, wcs=w)
# This converts the DSS plate solution model with AMD[XY]n coefficients into a
# Template Polynomial Distortion model (TPD.FWD.n coefficients);
# not testing explicitly for the header keywords here.
if _WCSLIB_VER < Version("7.4"):
with pytest.warns(
AstropyWarning, match="WCS contains a TPD distortion model in CQDIS"
):
w0 = wcs.WCS(w.to_header_string())
with pytest.warns(
AstropyWarning, match="WCS contains a TPD distortion model in CQDIS"
):
w1 = wcs.WCS(cut.wcs.to_header_string())
if _WCSLIB_VER >= Version("7.1"):
pytest.xfail("TPD coefficients incomplete with WCSLIB >= 7.1 < 7.4")
else:
w0 = wcs.WCS(w.to_header_string())
w1 = wcs.WCS(cut.wcs.to_header_string())
assert w.pixel_to_world(0, 0).separation(w0.pixel_to_world(0, 0)) < 1.0e-3 * u.mas
assert w.pixel_to_world(*cen).separation(w0.pixel_to_world(*cen)) < 1.0e-3 * u.mas
assert (
w.pixel_to_world(*cen).separation(w1.pixel_to_world(*(siz / 2)))
< 1.0e-3 * u.mas
)
cutfile = tmp_path / "cutout.fits"
fits.writeto(cutfile, cut.data, cut.wcs.to_header())
with fits.open(cutfile) as hdulist:
w2 = wcs.WCS(hdulist[0].header)
assert (
w.pixel_to_world(*cen).separation(w2.pixel_to_world(*(siz / 2)))
< 1.0e-3 * u.mas
)
def test_pixlist_wcs_colsel():
"""
Test selection of a specific pixel list WCS using ``colsel``. See #11412.
"""
hdr_file = get_pkg_data_filename("data/chandra-pixlist-wcs.hdr")
hdr = fits.Header.fromtextfile(hdr_file)
with pytest.warns(wcs.FITSFixedWarning):
w = wcs.WCS(hdr, keysel=["image", "pixel"], colsel=[11, 12])
assert w.naxis == 2
assert list(w.wcs.ctype) == ["RA---TAN", "DEC--TAN"]
assert np.allclose(w.wcs.crval, [229.38051931869, -58.81108068885])
assert np.allclose(w.wcs.pc, [[1, 0], [0, 1]])
assert np.allclose(w.wcs.cdelt, [-0.00013666666666666, 0.00013666666666666])
assert np.allclose(w.wcs.lonpole, 180.0)
@pytest.mark.skipif(
_WCSLIB_VER < Version("7.8"),
reason="TIME axis extraction only works with wcslib 7.8 or later",
)
def test_time_axis_selection():
w = wcs.WCS(naxis=3)
w.wcs.ctype = ["RA---TAN", "DEC--TAN", "TIME"]
w.wcs.set()
assert list(w.sub([wcs.WCSSUB_TIME]).wcs.ctype) == ["TIME"]
assert (
w.wcs_pix2world([[1, 2, 3]], 0)[0, 2]
== w.sub([wcs.WCSSUB_TIME]).wcs_pix2world([[3]], 0)[0, 0]
)
@pytest.mark.skipif(
_WCSLIB_VER < Version("7.8"),
reason="TIME axis extraction only works with wcslib 7.8 or later",
)
def test_temporal():
w = wcs.WCS(naxis=3)
w.wcs.ctype = ["RA---TAN", "DEC--TAN", "TIME"]
w.wcs.set()
assert w.has_temporal
assert w.sub([wcs.WCSSUB_TIME]).is_temporal
assert (
w.wcs_pix2world([[1, 2, 3]], 0)[0, 2]
== w.temporal.wcs_pix2world([[3]], 0)[0, 0]
)
def test_swapaxes_same_val_roundtrip():
w = wcs.WCS(naxis=3)
w.wcs.ctype = ["RA---TAN", "DEC--TAN", "FREQ"]
w.wcs.crpix = [32.5, 16.5, 1.0]
w.wcs.crval = [5.63, -72.05, 1.0]
w.wcs.pc = [[5.9e-06, 1.3e-05, 0.0], [-1.2e-05, 5.0e-06, 0.0], [0.0, 0.0, 1.0]]
w.wcs.cdelt = [1.0, 1.0, 1.0]
w.wcs.set()
axes_order = [3, 2, 1]
axes_order0 = list(i - 1 for i in axes_order)
ws = w.sub(axes_order)
imcoord = np.array([3, 5, 7])
imcoords = imcoord[axes_order0]
val_ref = w.wcs_pix2world([imcoord], 0)[0]
val_swapped = ws.wcs_pix2world([imcoords], 0)[0]
# check original axis and swapped give same results
assert np.allclose(val_ref[axes_order0], val_swapped, rtol=0, atol=1e-8)
# check round-tripping:
assert np.allclose(w.wcs_world2pix([val_ref], 0)[0], imcoord, rtol=0, atol=1e-8)
|
ce48d838df4f73e0efc95fdfa9812dd385d60d1f0d25127c94c063f3ba78b56b | import numpy as np
from numpy.testing import assert_allclose
from astropy.wcs import WCS
from astropy.coordinates import SkyCoord
from astropy.units import Quantity
from astropy.wcs.wcsapi.high_level_api import (
HighLevelWCSMixin,
high_level_objects_to_values,
values_to_high_level_objects,
)
from astropy.wcs.wcsapi.low_level_api import BaseLowLevelWCS
class DoubleLowLevelWCS(BaseLowLevelWCS):
"""
Basic dummy transformation that doubles values.
"""
def pixel_to_world_values(self, *pixel_arrays):
return [np.asarray(pix) * 2 for pix in pixel_arrays]
def world_to_pixel_values(self, *world_arrays):
return [np.asarray(world) / 2 for world in world_arrays]
class SimpleDuplicateWCS(DoubleLowLevelWCS, HighLevelWCSMixin):
"""
This example WCS has two of the world coordinates that use the same class,
which triggers a different path in the high level WCS code.
"""
@property
def pixel_n_dim(self):
return 2
@property
def world_n_dim(self):
return 2
@property
def world_axis_physical_types(self):
return ["pos.eq.ra", "pos.eq.dec"]
@property
def world_axis_units(self):
return ["deg", "deg"]
@property
def world_axis_object_components(self):
return [("test1", 0, "value"), ("test2", 0, "value")]
@property
def world_axis_object_classes(self):
return {
"test1": (Quantity, (), {"unit": "deg"}),
"test2": (Quantity, (), {"unit": "deg"}),
}
def test_simple_duplicate():
# Make sure that things work properly when the low-level WCS uses the same
# class for two of the coordinates.
wcs = SimpleDuplicateWCS()
q1, q2 = wcs.pixel_to_world(1, 2)
assert isinstance(q1, Quantity)
assert isinstance(q2, Quantity)
x, y = wcs.world_to_pixel(q1, q2)
assert_allclose(x, 1)
assert_allclose(y, 2)
class SkyCoordDuplicateWCS(DoubleLowLevelWCS, HighLevelWCSMixin):
"""
This example WCS returns two SkyCoord objects which, which triggers a
different path in the high level WCS code.
"""
@property
def pixel_n_dim(self):
return 4
@property
def world_n_dim(self):
return 4
@property
def world_axis_physical_types(self):
return ["pos.eq.ra", "pos.eq.dec", "pos.galactic.lon", "pos.galactic.lat"]
@property
def world_axis_units(self):
return ["deg", "deg", "deg", "deg"]
@property
def world_axis_object_components(self):
# Deliberately use 'ra'/'dec' here to make sure that string argument
# names work properly.
return [
("test1", "ra", "spherical.lon.degree"),
("test1", "dec", "spherical.lat.degree"),
("test2", 0, "spherical.lon.degree"),
("test2", 1, "spherical.lat.degree"),
]
@property
def world_axis_object_classes(self):
return {
"test1": (SkyCoord, (), {"unit": "deg"}),
"test2": (SkyCoord, (), {"unit": "deg", "frame": "galactic"}),
}
def test_skycoord_duplicate():
# Make sure that things work properly when the low-level WCS uses the same
# class, and specifically a SkyCoord for two of the coordinates.
wcs = SkyCoordDuplicateWCS()
c1, c2 = wcs.pixel_to_world(1, 2, 3, 4)
assert isinstance(c1, SkyCoord)
assert isinstance(c2, SkyCoord)
x, y, z, a = wcs.world_to_pixel(c1, c2)
assert_allclose(x, 1)
assert_allclose(y, 2)
assert_allclose(z, 3)
assert_allclose(a, 4)
class SerializedWCS(DoubleLowLevelWCS, HighLevelWCSMixin):
"""
WCS with serialized classes
"""
@property
def serialized_classes(self):
return True
@property
def pixel_n_dim(self):
return 2
@property
def world_n_dim(self):
return 2
@property
def world_axis_physical_types(self):
return ["pos.eq.ra", "pos.eq.dec"]
@property
def world_axis_units(self):
return ["deg", "deg"]
@property
def world_axis_object_components(self):
return [("test", 0, "value")]
@property
def world_axis_object_classes(self):
return {
"test": (
"astropy.units.Quantity",
(),
{"unit": ("astropy.units.Unit", ("deg",), {})},
)
}
def test_serialized_classes():
wcs = SerializedWCS()
q = wcs.pixel_to_world(1)
assert isinstance(q, Quantity)
x = wcs.world_to_pixel(q)
assert_allclose(x, 1)
def test_objects_to_values():
wcs = SkyCoordDuplicateWCS()
c1, c2 = wcs.pixel_to_world(1, 2, 3, 4)
values = high_level_objects_to_values(c1, c2, low_level_wcs=wcs)
assert np.allclose(values, [2, 4, 6, 8])
def test_values_to_objects():
wcs = SkyCoordDuplicateWCS()
c1, c2 = wcs.pixel_to_world(1, 2, 3, 4)
c1_out, c2_out = values_to_high_level_objects(*[2, 4, 6, 8], low_level_wcs=wcs)
assert c1.ra == c1_out.ra
assert c2.l == c2_out.l
assert c1.dec == c1_out.dec
assert c2.b == c2_out.b
class MinimalHighLevelWCS(HighLevelWCSMixin):
def __init__(self, low_level_wcs):
self._low_level_wcs = low_level_wcs
@property
def low_level_wcs(self):
return self._low_level_wcs
def test_minimal_mixin_subclass():
# Regression test for a bug that caused coordinate conversions to fail
# unless the WCS dimensions were defined on the high level WCS (which they
# are not required to be)
fits_wcs = WCS(naxis=2)
high_level_wcs = MinimalHighLevelWCS(fits_wcs)
coord = high_level_wcs.pixel_to_world(1, 2)
pixel = high_level_wcs.world_to_pixel(*coord)
coord = high_level_wcs.array_index_to_world(1, 2)
pixel = high_level_wcs.world_to_array_index(*coord)
assert_allclose(pixel, (1, 2))
|
1c9f7957e3f480f255377358491ddce7e563ebe76245be13f6afe234037339d6 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import copy
import datetime
import functools
import os
from copy import deepcopy
from decimal import Decimal, localcontext
from io import StringIO
import erfa
import numpy as np
import pytest
from erfa import ErfaWarning
from numpy.testing import assert_allclose
from astropy import units as u
from astropy.coordinates import EarthLocation
from astropy.table import Column, Table
from astropy.time import (
STANDARD_TIME_SCALES,
TIME_FORMATS,
AstropyDatetimeLeapSecondWarning,
ScaleValueError,
Time,
TimeDelta,
TimeString,
TimezoneInfo,
conf,
)
from astropy.utils import iers, isiterable
from astropy.utils.compat.optional_deps import HAS_H5PY, HAS_PYTZ
from astropy.utils.exceptions import AstropyDeprecationWarning
allclose_jd = functools.partial(np.allclose, rtol=np.finfo(float).eps, atol=0)
allclose_jd2 = functools.partial(
np.allclose, rtol=np.finfo(float).eps, atol=np.finfo(float).eps
) # 20 ps atol
allclose_sec = functools.partial(
np.allclose, rtol=np.finfo(float).eps, atol=np.finfo(float).eps * 24 * 3600
)
allclose_year = functools.partial(
np.allclose, rtol=np.finfo(float).eps, atol=0.0
) # 14 microsec at current epoch
def setup_function(func):
func.FORMATS_ORIG = deepcopy(Time.FORMATS)
def teardown_function(func):
Time.FORMATS.clear()
Time.FORMATS.update(func.FORMATS_ORIG)
class TestBasic:
"""Basic tests stemming from initial example and API reference"""
def test_simple(self):
times = ["1999-01-01 00:00:00.123456789", "2010-01-01 00:00:00"]
t = Time(times, format="iso", scale="utc")
assert (
repr(t) == "<Time object: scale='utc' format='iso' "
"value=['1999-01-01 00:00:00.123' '2010-01-01 00:00:00.000']>"
)
assert allclose_jd(t.jd1, np.array([2451180.0, 2455198.0]))
assert allclose_jd2(
t.jd2, np.array([-0.5 + 1.4288980208333335e-06, -0.50000000e00])
)
# Set scale to TAI
t = t.tai
assert (
repr(t) == "<Time object: scale='tai' format='iso' "
"value=['1999-01-01 00:00:32.123' '2010-01-01 00:00:34.000']>"
)
assert allclose_jd(t.jd1, np.array([2451180.0, 2455198.0]))
assert allclose_jd2(
t.jd2,
np.array([-0.5 + 0.00037179926839122024, -0.5 + 0.00039351851851851852]),
)
# Get a new ``Time`` object which is referenced to the TT scale
# (internal JD1 and JD1 are now with respect to TT scale)"""
assert (
repr(t.tt) == "<Time object: scale='tt' format='iso' "
"value=['1999-01-01 00:01:04.307' '2010-01-01 00:01:06.184']>"
)
# Get the representation of the ``Time`` object in a particular format
# (in this case seconds since 1998.0). This returns either a scalar or
# array, depending on whether the input was a scalar or array"""
assert allclose_sec(
t.cxcsec, np.array([31536064.307456788, 378691266.18400002])
)
def test_different_dimensions(self):
"""Test scalars, vector, and higher-dimensions"""
# scalar
val, val1 = 2450000.0, 0.125
t1 = Time(val, val1, format="jd")
assert t1.isscalar is True and t1.shape == ()
# vector
val = np.arange(2450000.0, 2450010.0)
t2 = Time(val, format="jd")
assert t2.isscalar is False and t2.shape == val.shape
# explicitly check broadcasting for mixed vector, scalar.
val2 = 0.0
t3 = Time(val, val2, format="jd")
assert t3.isscalar is False and t3.shape == val.shape
val2 = (np.arange(5.0) / 10.0).reshape(5, 1)
# now see if broadcasting to two-dimensional works
t4 = Time(val, val2, format="jd")
assert t4.isscalar is False
assert t4.shape == np.broadcast(val, val2).shape
@pytest.mark.parametrize("format_", Time.FORMATS)
def test_empty_value(self, format_):
t = Time([], format=format_)
assert t.size == 0
assert t.shape == (0,)
assert t.format == format_
t_value = t.value
assert t_value.size == 0
assert t_value.shape == (0,)
t2 = Time(t_value, format=format_)
assert t2.size == 0
assert t2.shape == (0,)
assert t2.format == format_
t3 = t2.tai
assert t3.size == 0
assert t3.shape == (0,)
assert t3.format == format_
assert t3.scale == "tai"
@pytest.mark.parametrize("value", [2455197.5, [2455197.5]])
def test_copy_time(self, value):
"""Test copying the values of a Time object by passing it into the
Time initializer.
"""
t = Time(value, format="jd", scale="utc")
t2 = Time(t, copy=False)
assert np.all(t.jd - t2.jd == 0)
assert np.all((t - t2).jd == 0)
assert t._time.jd1 is t2._time.jd1
assert t._time.jd2 is t2._time.jd2
t2 = Time(t, copy=True)
assert np.all(t.jd - t2.jd == 0)
assert np.all((t - t2).jd == 0)
assert t._time.jd1 is not t2._time.jd1
assert t._time.jd2 is not t2._time.jd2
# Include initializers
t2 = Time(t, format="iso", scale="tai", precision=1)
assert t2.value == "2010-01-01 00:00:34.0"
t2 = Time(t, format="iso", scale="tai", out_subfmt="date")
assert t2.value == "2010-01-01"
def test_getitem(self):
"""Test that Time objects holding arrays are properly subscriptable,
set isscalar as appropriate, and also subscript delta_ut1_utc, etc."""
mjd = np.arange(50000, 50010)
t = Time(mjd, format="mjd", scale="utc", location=("45d", "50d"))
t1 = t[3]
assert t1.isscalar is True
assert t1._time.jd1 == t._time.jd1[3]
assert t1.location is t.location
t1a = Time(mjd[3], format="mjd", scale="utc")
assert t1a.isscalar is True
assert np.all(t1._time.jd1 == t1a._time.jd1)
t1b = Time(t[3])
assert t1b.isscalar is True
assert np.all(t1._time.jd1 == t1b._time.jd1)
t2 = t[4:6]
assert t2.isscalar is False
assert np.all(t2._time.jd1 == t._time.jd1[4:6])
assert t2.location is t.location
t2a = Time(t[4:6])
assert t2a.isscalar is False
assert np.all(t2a._time.jd1 == t._time.jd1[4:6])
t2b = Time([t[4], t[5]])
assert t2b.isscalar is False
assert np.all(t2b._time.jd1 == t._time.jd1[4:6])
t2c = Time((t[4], t[5]))
assert t2c.isscalar is False
assert np.all(t2c._time.jd1 == t._time.jd1[4:6])
t.delta_tdb_tt = np.arange(len(t)) # Explicitly set (not testing .tdb)
t3 = t[4:6]
assert np.all(t3._delta_tdb_tt == t._delta_tdb_tt[4:6])
t4 = Time(
mjd,
format="mjd",
scale="utc",
location=(np.arange(len(mjd)), np.arange(len(mjd))),
)
t5a = t4[3]
assert t5a.location == t4.location[3]
assert t5a.location.shape == ()
t5b = t4[3:4]
assert t5b.location.shape == (1,)
# Check that indexing a size-1 array returns a scalar location as well;
# see gh-10113.
t5c = t5b[0]
assert t5c.location.shape == ()
t6 = t4[4:6]
assert np.all(t6.location == t4.location[4:6])
# check it is a view
# (via ndarray, since quantity setter problematic for structured array)
allzeros = np.array((0.0, 0.0, 0.0), dtype=t4.location.dtype)
assert t6.location.view(np.ndarray)[-1] != allzeros
assert t4.location.view(np.ndarray)[5] != allzeros
t6.location.view(np.ndarray)[-1] = allzeros
assert t4.location.view(np.ndarray)[5] == allzeros
# Test subscription also works for two-dimensional arrays.
frac = np.arange(0.0, 0.999, 0.2)
t7 = Time(
mjd[:, np.newaxis] + frac,
format="mjd",
scale="utc",
location=("45d", "50d"),
)
assert t7[0, 0]._time.jd1 == t7._time.jd1[0, 0]
assert t7[0, 0].isscalar is True
assert np.all(t7[5]._time.jd1 == t7._time.jd1[5])
assert np.all(t7[5]._time.jd2 == t7._time.jd2[5])
assert np.all(t7[:, 2]._time.jd1 == t7._time.jd1[:, 2])
assert np.all(t7[:, 2]._time.jd2 == t7._time.jd2[:, 2])
assert np.all(t7[:, 0]._time.jd1 == t._time.jd1)
assert np.all(t7[:, 0]._time.jd2 == t._time.jd2)
# Get tdb to check that delta_tdb_tt attribute is sliced properly.
t7_tdb = t7.tdb
assert t7_tdb[0, 0].delta_tdb_tt == t7_tdb.delta_tdb_tt[0, 0]
assert np.all(t7_tdb[5].delta_tdb_tt == t7_tdb.delta_tdb_tt[5])
assert np.all(t7_tdb[:, 2].delta_tdb_tt == t7_tdb.delta_tdb_tt[:, 2])
# Explicitly set delta_tdb_tt attribute. Now it should not be sliced.
t7.delta_tdb_tt = 0.1
t7_tdb2 = t7.tdb
assert t7_tdb2[0, 0].delta_tdb_tt == 0.1
assert t7_tdb2[5].delta_tdb_tt == 0.1
assert t7_tdb2[:, 2].delta_tdb_tt == 0.1
# Check broadcasting of location.
t8 = Time(
mjd[:, np.newaxis] + frac,
format="mjd",
scale="utc",
location=(np.arange(len(frac)), np.arange(len(frac))),
)
assert t8[0, 0].location == t8.location[0, 0]
assert np.all(t8[5].location == t8.location[5])
assert np.all(t8[:, 2].location == t8.location[:, 2])
# Finally check empty array.
t9 = t[:0]
assert t9.isscalar is False
assert t9.shape == (0,)
assert t9.size == 0
def test_properties(self):
"""Use properties to convert scales and formats. Note that the UT1 to
UTC transformation requires a supplementary value (``delta_ut1_utc``)
that can be obtained by interpolating from a table supplied by IERS.
This is tested separately."""
t = Time("2010-01-01 00:00:00", format="iso", scale="utc")
t.delta_ut1_utc = 0.3341 # Explicitly set one part of the xform
assert allclose_jd(t.jd, 2455197.5)
assert t.iso == "2010-01-01 00:00:00.000"
assert t.tt.iso == "2010-01-01 00:01:06.184"
assert t.tai.fits == "2010-01-01T00:00:34.000"
assert allclose_jd(t.utc.jd, 2455197.5)
assert allclose_jd(t.ut1.jd, 2455197.500003867)
assert t.tcg.isot == "2010-01-01T00:01:06.910"
assert allclose_sec(t.unix, 1262304000.0)
assert allclose_sec(t.cxcsec, 378691266.184)
assert allclose_sec(t.gps, 946339215.0)
assert t.datetime == datetime.datetime(2010, 1, 1)
def test_precision(self):
"""Set the output precision which is used for some formats. This is
also a test of the code that provides a dict for global and instance
options."""
t = Time("2010-01-01 00:00:00", format="iso", scale="utc")
# Uses initial class-defined precision=3
assert t.iso == "2010-01-01 00:00:00.000"
# Set instance precision to 9
t.precision = 9
assert t.iso == "2010-01-01 00:00:00.000000000"
assert t.tai.utc.iso == "2010-01-01 00:00:00.000000000"
def test_precision_input(self):
"""Verifies that precision can only be 0-9 (inclusive). Any other
value should raise a ValueError exception."""
err_message = "precision attribute must be an int"
with pytest.raises(ValueError, match=err_message):
t = Time("2010-01-01 00:00:00", format="iso", scale="utc", precision=10)
with pytest.raises(ValueError, match=err_message):
t = Time("2010-01-01 00:00:00", format="iso", scale="utc")
t.precision = -1
def test_transforms(self):
"""Transform from UTC to all supported time scales (TAI, TCB, TCG,
TDB, TT, UT1, UTC). This requires auxiliary information (latitude and
longitude)."""
lat = 19.48125
lon = -155.933222
t = Time(
"2006-01-15 21:24:37.5",
format="iso",
scale="utc",
precision=7,
location=(lon, lat),
)
t.delta_ut1_utc = 0.3341 # Explicitly set one part of the xform
assert t.utc.iso == "2006-01-15 21:24:37.5000000"
assert t.ut1.iso == "2006-01-15 21:24:37.8341000"
assert t.tai.iso == "2006-01-15 21:25:10.5000000"
assert t.tt.iso == "2006-01-15 21:25:42.6840000"
assert t.tcg.iso == "2006-01-15 21:25:43.3226905"
assert t.tdb.iso == "2006-01-15 21:25:42.6843728"
assert t.tcb.iso == "2006-01-15 21:25:56.8939523"
def test_transforms_no_location(self):
"""Location should default to geocenter (relevant for TDB, TCB)."""
t = Time("2006-01-15 21:24:37.5", format="iso", scale="utc", precision=7)
t.delta_ut1_utc = 0.3341 # Explicitly set one part of the xform
assert t.utc.iso == "2006-01-15 21:24:37.5000000"
assert t.ut1.iso == "2006-01-15 21:24:37.8341000"
assert t.tai.iso == "2006-01-15 21:25:10.5000000"
assert t.tt.iso == "2006-01-15 21:25:42.6840000"
assert t.tcg.iso == "2006-01-15 21:25:43.3226905"
assert t.tdb.iso == "2006-01-15 21:25:42.6843725"
assert t.tcb.iso == "2006-01-15 21:25:56.8939519"
# Check we get the same result
t2 = Time(
"2006-01-15 21:24:37.5",
format="iso",
scale="utc",
location=(0 * u.m, 0 * u.m, 0 * u.m),
)
assert t == t2
assert t.tdb == t2.tdb
def test_location(self):
"""Check that location creates an EarthLocation object, and that
such objects can be used as arguments.
"""
lat = 19.48125
lon = -155.933222
t = Time(
["2006-01-15 21:24:37.5"],
format="iso",
scale="utc",
precision=6,
location=(lon, lat),
)
assert isinstance(t.location, EarthLocation)
location = EarthLocation(lon, lat)
t2 = Time(
["2006-01-15 21:24:37.5"],
format="iso",
scale="utc",
precision=6,
location=location,
)
assert isinstance(t2.location, EarthLocation)
assert t2.location == t.location
t3 = Time(
["2006-01-15 21:24:37.5"],
format="iso",
scale="utc",
precision=6,
location=(location.x, location.y, location.z),
)
assert isinstance(t3.location, EarthLocation)
assert t3.location == t.location
def test_location_array(self):
"""Check that location arrays are checked for size and used
for the corresponding times. Also checks that erfa
can handle array-valued locations, and can broadcast these if needed.
"""
lat = 19.48125
lon = -155.933222
t = Time(
["2006-01-15 21:24:37.5"] * 2,
format="iso",
scale="utc",
precision=6,
location=(lon, lat),
)
assert np.all(t.utc.iso == "2006-01-15 21:24:37.500000")
assert np.all(t.tdb.iso[0] == "2006-01-15 21:25:42.684373")
t2 = Time(
["2006-01-15 21:24:37.5"] * 2,
format="iso",
scale="utc",
precision=6,
location=(np.array([lon, 0]), np.array([lat, 0])),
)
assert np.all(t2.utc.iso == "2006-01-15 21:24:37.500000")
assert t2.tdb.iso[0] == "2006-01-15 21:25:42.684373"
assert t2.tdb.iso[1] != "2006-01-15 21:25:42.684373"
with pytest.raises(ValueError): # 1 time, but two locations
Time(
"2006-01-15 21:24:37.5",
format="iso",
scale="utc",
precision=6,
location=(np.array([lon, 0]), np.array([lat, 0])),
)
with pytest.raises(ValueError): # 3 times, but two locations
Time(
["2006-01-15 21:24:37.5"] * 3,
format="iso",
scale="utc",
precision=6,
location=(np.array([lon, 0]), np.array([lat, 0])),
)
# multidimensional
mjd = np.arange(50000.0, 50008.0).reshape(4, 2)
t3 = Time(mjd, format="mjd", scale="utc", location=(lon, lat))
assert t3.shape == (4, 2)
assert t3.location.shape == ()
assert t3.tdb.shape == t3.shape
t4 = Time(
mjd,
format="mjd",
scale="utc",
location=(np.array([lon, 0]), np.array([lat, 0])),
)
assert t4.shape == (4, 2)
assert t4.location.shape == t4.shape
assert t4.tdb.shape == t4.shape
t5 = Time(
mjd,
format="mjd",
scale="utc",
location=(
np.array([[lon], [0], [0], [0]]),
np.array([[lat], [0], [0], [0]]),
),
)
assert t5.shape == (4, 2)
assert t5.location.shape == t5.shape
assert t5.tdb.shape == t5.shape
def test_all_scale_transforms(self):
"""Test that standard scale transforms work. Does not test correctness,
except reversibility [#2074]. Also tests that standard scales can't be
converted to local scales"""
lat = 19.48125
lon = -155.933222
with iers.conf.set_temp("auto_download", False):
for scale1 in STANDARD_TIME_SCALES:
t1 = Time(
"2006-01-15 21:24:37.5",
format="iso",
scale=scale1,
location=(lon, lat),
)
for scale2 in STANDARD_TIME_SCALES:
t2 = getattr(t1, scale2)
t21 = getattr(t2, scale1)
assert allclose_jd(t21.jd, t1.jd)
# test for conversion to local scale
scale3 = "local"
with pytest.raises(ScaleValueError):
t2 = getattr(t1, scale3)
def test_creating_all_formats(self):
"""Create a time object using each defined format"""
Time(2000.5, format="decimalyear")
Time(100.0, format="cxcsec")
Time(100.0, format="unix")
Time(100.0, format="gps")
Time(1950.0, format="byear", scale="tai")
Time(2000.0, format="jyear", scale="tai")
Time("B1950.0", format="byear_str", scale="tai")
Time("J2000.0", format="jyear_str", scale="tai")
Time("2000-01-01 12:23:34.0", format="iso", scale="tai")
Time("2000-01-01 12:23:34.0Z", format="iso", scale="utc")
Time("2000-01-01T12:23:34.0", format="isot", scale="tai")
Time("2000-01-01T12:23:34.0Z", format="isot", scale="utc")
Time("2000-01-01T12:23:34.0", format="fits")
Time("2000-01-01T12:23:34.0", format="fits", scale="tdb")
Time(2400000.5, 51544.0333981, format="jd", scale="tai")
Time(0.0, 51544.0333981, format="mjd", scale="tai")
Time("2000:001:12:23:34.0", format="yday", scale="tai")
Time("2000:001:12:23:34.0Z", format="yday", scale="utc")
dt = datetime.datetime(2000, 1, 2, 3, 4, 5, 123456)
Time(dt, format="datetime", scale="tai")
Time([dt, dt], format="datetime", scale="tai")
dt64 = np.datetime64("2012-06-18T02:00:05.453000000")
Time(dt64, format="datetime64", scale="tai")
Time([dt64, dt64], format="datetime64", scale="tai")
def test_local_format_transforms(self):
"""
Test transformation of local time to different formats
Transformation to formats with reference time should give
ScalevalueError
"""
t = Time("2006-01-15 21:24:37.5", scale="local")
assert_allclose(t.jd, 2453751.3921006946, atol=0.001 / 3600.0 / 24.0, rtol=0.0)
assert_allclose(t.mjd, 53750.892100694444, atol=0.001 / 3600.0 / 24.0, rtol=0.0)
assert_allclose(
t.decimalyear,
2006.0408002758752,
atol=0.001 / 3600.0 / 24.0 / 365.0,
rtol=0.0,
)
assert t.datetime == datetime.datetime(2006, 1, 15, 21, 24, 37, 500000)
assert t.isot == "2006-01-15T21:24:37.500"
assert t.yday == "2006:015:21:24:37.500"
assert t.fits == "2006-01-15T21:24:37.500"
assert_allclose(
t.byear, 2006.04217888831, atol=0.001 / 3600.0 / 24.0 / 365.0, rtol=0.0
)
assert_allclose(
t.jyear, 2006.0407723496082, atol=0.001 / 3600.0 / 24.0 / 365.0, rtol=0.0
)
assert t.byear_str == "B2006.042"
assert t.jyear_str == "J2006.041"
# epochTimeFormats
with pytest.raises(ScaleValueError):
t.gps
with pytest.raises(ScaleValueError):
t.unix
with pytest.raises(ScaleValueError):
t.cxcsec
with pytest.raises(ScaleValueError):
t.plot_date
def test_datetime(self):
"""
Test datetime format, including guessing the format from the input type
by not providing the format keyword to Time.
"""
dt = datetime.datetime(2000, 1, 2, 3, 4, 5, 123456)
dt2 = datetime.datetime(2001, 1, 1)
t = Time(dt, scale="utc", precision=9)
assert t.iso == "2000-01-02 03:04:05.123456000"
assert t.datetime == dt
assert t.value == dt
t2 = Time(t.iso, scale="utc")
assert t2.datetime == dt
t = Time([dt, dt2], scale="utc")
assert np.all(t.value == [dt, dt2])
t = Time("2000-01-01 01:01:01.123456789", scale="tai")
assert t.datetime == datetime.datetime(2000, 1, 1, 1, 1, 1, 123457)
# broadcasting
dt3 = (dt + (dt2 - dt) * np.arange(12)).reshape(4, 3)
t3 = Time(dt3, scale="utc")
assert t3.shape == (4, 3)
assert t3[2, 1].value == dt3[2, 1]
assert t3[2, 1] == Time(dt3[2, 1])
assert np.all(t3.value == dt3)
assert np.all(t3[1].value == dt3[1])
assert np.all(t3[:, 2] == Time(dt3[:, 2]))
assert Time(t3[2, 0]) == t3[2, 0]
def test_datetime64(self):
dt64 = np.datetime64("2000-01-02T03:04:05.123456789")
dt64_2 = np.datetime64("2000-01-02")
t = Time(dt64, scale="utc", precision=9, format="datetime64")
assert t.iso == "2000-01-02 03:04:05.123456789"
assert t.datetime64 == dt64
assert t.value == dt64
t2 = Time(t.iso, scale="utc")
assert t2.datetime64 == dt64
t = Time(dt64_2, scale="utc", precision=3, format="datetime64")
assert t.iso == "2000-01-02 00:00:00.000"
assert t.datetime64 == dt64_2
assert t.value == dt64_2
t2 = Time(t.iso, scale="utc")
assert t2.datetime64 == dt64_2
t = Time([dt64, dt64_2], scale="utc", format="datetime64")
assert np.all(t.value == [dt64, dt64_2])
t = Time("2000-01-01 01:01:01.123456789", scale="tai")
assert t.datetime64 == np.datetime64("2000-01-01T01:01:01.123456789")
# broadcasting
dt3 = (dt64 + (dt64_2 - dt64) * np.arange(12)).reshape(4, 3)
t3 = Time(dt3, scale="utc", format="datetime64")
assert t3.shape == (4, 3)
assert t3[2, 1].value == dt3[2, 1]
assert t3[2, 1] == Time(dt3[2, 1], format="datetime64")
assert np.all(t3.value == dt3)
assert np.all(t3[1].value == dt3[1])
assert np.all(t3[:, 2] == Time(dt3[:, 2], format="datetime64"))
assert Time(t3[2, 0], format="datetime64") == t3[2, 0]
def test_epoch_transform(self):
"""Besselian and julian epoch transforms"""
jd = 2457073.05631
t = Time(jd, format="jd", scale="tai", precision=6)
assert allclose_year(t.byear, 2015.1365941020817)
assert allclose_year(t.jyear, 2015.1349933196439)
assert t.byear_str == "B2015.136594"
assert t.jyear_str == "J2015.134993"
t2 = Time(t.byear, format="byear", scale="tai")
assert allclose_jd(t2.jd, jd)
t2 = Time(t.jyear, format="jyear", scale="tai")
assert allclose_jd(t2.jd, jd)
t = Time("J2015.134993", scale="tai", precision=6)
assert np.allclose(
t.jd, jd, rtol=1e-10, atol=0
) # J2015.134993 has 10 digit precision
assert t.byear_str == "B2015.136594"
def test_input_validation(self):
"""Wrong input type raises error"""
times = [10, 20]
with pytest.raises(ValueError):
Time(times, format="iso", scale="utc")
with pytest.raises(ValueError):
Time("2000:001", format="jd", scale="utc")
with pytest.raises(ValueError): # unguessable
Time([])
with pytest.raises(ValueError):
Time([50000.0], ["bad"], format="mjd", scale="tai")
with pytest.raises(ValueError):
Time(50000.0, "bad", format="mjd", scale="tai")
with pytest.raises(ValueError):
Time("2005-08-04T00:01:02.000Z", scale="tai")
# regression test against #3396
with pytest.raises(ValueError):
Time(np.nan, format="jd", scale="utc")
with pytest.raises(ValueError):
with pytest.warns(AstropyDeprecationWarning):
Time("2000-01-02T03:04:05(TAI)", scale="utc")
with pytest.raises(ValueError):
Time("2000-01-02T03:04:05(TAI")
with pytest.raises(ValueError):
Time("2000-01-02T03:04:05(UT(NIST)")
def test_utc_leap_sec(self):
"""Time behaves properly near or in UTC leap second. This
uses the 2012-06-30 leap second for testing."""
for year, month, day in ((2012, 6, 30), (2016, 12, 31)):
# Start with a day without a leap second and note rollover
yyyy_mm = f"{year:04d}-{month:02d}"
yyyy_mm_dd = f"{year:04d}-{month:02d}-{day:02d}"
with pytest.warns(ErfaWarning):
t1 = Time(yyyy_mm + "-01 23:59:60.0", scale="utc")
assert t1.iso == yyyy_mm + "-02 00:00:00.000"
# Leap second is different
t1 = Time(yyyy_mm_dd + " 23:59:59.900", scale="utc")
assert t1.iso == yyyy_mm_dd + " 23:59:59.900"
t1 = Time(yyyy_mm_dd + " 23:59:60.000", scale="utc")
assert t1.iso == yyyy_mm_dd + " 23:59:60.000"
t1 = Time(yyyy_mm_dd + " 23:59:60.999", scale="utc")
assert t1.iso == yyyy_mm_dd + " 23:59:60.999"
if month == 6:
yyyy_mm_dd_plus1 = f"{year:04d}-07-01"
else:
yyyy_mm_dd_plus1 = f"{year + 1:04d}-01-01"
with pytest.warns(ErfaWarning):
t1 = Time(yyyy_mm_dd + " 23:59:61.0", scale="utc")
assert t1.iso == yyyy_mm_dd_plus1 + " 00:00:00.000"
# Delta time gives 2 seconds here as expected
t0 = Time(yyyy_mm_dd + " 23:59:59", scale="utc")
t1 = Time(yyyy_mm_dd_plus1 + " 00:00:00", scale="utc")
assert allclose_sec((t1 - t0).sec, 2.0)
def test_init_from_time_objects(self):
"""Initialize from one or more Time objects"""
t1 = Time("2007:001", scale="tai")
t2 = Time(["2007-01-02", "2007-01-03"], scale="utc")
# Init from a list of Time objects without an explicit scale
t3 = Time([t1, t2])
# Test that init appropriately combines a scalar (t1) and list (t2)
# and that scale and format are same as first element.
assert len(t3) == 3
assert t3.scale == t1.scale
assert t3.format == t1.format # t1 format is yday
assert np.all(t3.value == np.concatenate([[t1.yday], t2.tai.yday]))
# Init from a single Time object without a scale
t3 = Time(t1)
assert t3.isscalar
assert t3.scale == t1.scale
assert t3.format == t1.format
assert np.all(t3.value == t1.value)
# Init from a single Time object with scale specified
t3 = Time(t1, scale="utc")
assert t3.scale == "utc"
assert np.all(t3.value == t1.utc.value)
# Init from a list of Time object with scale specified
t3 = Time([t1, t2], scale="tt")
assert t3.scale == "tt"
assert t3.format == t1.format # yday
assert np.all(t3.value == np.concatenate([[t1.tt.yday], t2.tt.yday]))
# OK, how likely is this... but might as well test.
mjd = np.arange(50000.0, 50006.0)
frac = np.arange(0.0, 0.999, 0.2)
t4 = Time(mjd[:, np.newaxis] + frac, format="mjd", scale="utc")
t5 = Time([t4[:2], t4[4:5]])
assert t5.shape == (3, 5)
# throw error when deriving local scale time
# from non local time scale
with pytest.raises(ValueError):
Time(t1, scale="local")
class TestVal2:
"""Tests related to val2"""
@pytest.mark.parametrize(
"d",
[
dict(val="2001:001", val2="ignored", scale="utc"),
dict(
val={
"year": 2015,
"month": 2,
"day": 3,
"hour": 12,
"minute": 13,
"second": 14.567,
},
val2="ignored",
scale="utc",
),
dict(val=np.datetime64("2005-02-25"), val2="ignored", scale="utc"),
dict(
val=datetime.datetime(2000, 1, 2, 12, 0, 0), val2="ignored", scale="utc"
),
],
)
def test_unused_val2_raises(self, d):
"""Test that providing val2 is for string input lets user know we won't use it"""
with pytest.raises(ValueError):
Time(**d)
def test_val2(self):
"""Various tests of the val2 input"""
t = Time([0.0, 50000.0], [50000.0, 0.0], format="mjd", scale="tai")
assert t.mjd[0] == t.mjd[1]
assert t.jd[0] == t.jd[1]
def test_val_broadcasts_against_val2(self):
mjd = np.arange(50000.0, 50007.0)
frac = np.arange(0.0, 0.999, 0.2)
t = Time(mjd[:, np.newaxis], frac, format="mjd", scale="utc")
assert t.shape == (7, 5)
with pytest.raises(ValueError):
Time([0.0, 50000.0], [0.0, 1.0, 2.0], format="mjd", scale="tai")
def test_broadcast_not_writable(self):
val = (2458000 + np.arange(3))[:, None]
val2 = np.linspace(0, 1, 4, endpoint=False)
t = Time(val=val, val2=val2, format="jd", scale="tai")
t_b = Time(val=val + 0 * val2, val2=0 * val + val2, format="jd", scale="tai")
t_i = Time(val=57990, val2=0.3, format="jd", scale="tai")
t_b[1, 2] = t_i
t[1, 2] = t_i
assert t_b[1, 2] == t[1, 2], "writing worked"
assert t_b[0, 2] == t[0, 2], "broadcasting didn't cause problems"
assert t_b[1, 1] == t[1, 1], "broadcasting didn't cause problems"
assert np.all(t_b == t), "behaved as expected"
def test_broadcast_one_not_writable(self):
val = 2458000 + np.arange(3)
val2 = np.arange(1)
t = Time(val=val, val2=val2, format="jd", scale="tai")
t_b = Time(val=val + 0 * val2, val2=0 * val + val2, format="jd", scale="tai")
t_i = Time(val=57990, val2=0.3, format="jd", scale="tai")
t_b[1] = t_i
t[1] = t_i
assert t_b[1] == t[1], "writing worked"
assert t_b[0] == t[0], "broadcasting didn't cause problems"
assert np.all(t_b == t), "behaved as expected"
class TestSubFormat:
"""Test input and output subformat functionality"""
def test_input_subformat(self):
"""Input subformat selection"""
# Heterogeneous input formats with in_subfmt='*' (default)
times = [
"2000-01-01",
"2000-01-01 01:01",
"2000-01-01 01:01:01",
"2000-01-01 01:01:01.123",
]
t = Time(times, format="iso", scale="tai")
assert np.all(
t.iso
== np.array(
[
"2000-01-01 00:00:00.000",
"2000-01-01 01:01:00.000",
"2000-01-01 01:01:01.000",
"2000-01-01 01:01:01.123",
]
)
)
# Heterogeneous input formats with in_subfmt='date_*'
times = ["2000-01-01 01:01", "2000-01-01 01:01:01", "2000-01-01 01:01:01.123"]
t = Time(times, format="iso", scale="tai", in_subfmt="date_*")
assert np.all(
t.iso
== np.array(
[
"2000-01-01 01:01:00.000",
"2000-01-01 01:01:01.000",
"2000-01-01 01:01:01.123",
]
)
)
def test_input_subformat_fail(self):
"""Failed format matching"""
with pytest.raises(ValueError):
Time("2000-01-01 01:01", format="iso", scale="tai", in_subfmt="date")
def test_bad_input_subformat(self):
"""Non-existent input subformat"""
with pytest.raises(ValueError):
Time(
"2000-01-01 01:01", format="iso", scale="tai", in_subfmt="doesnt exist"
)
def test_output_subformat(self):
"""Input subformat selection"""
# Heterogeneous input formats with in_subfmt='*' (default)
times = [
"2000-01-01",
"2000-01-01 01:01",
"2000-01-01 01:01:01",
"2000-01-01 01:01:01.123",
]
t = Time(times, format="iso", scale="tai", out_subfmt="date_hm")
assert np.all(
t.iso
== np.array(
[
"2000-01-01 00:00",
"2000-01-01 01:01",
"2000-01-01 01:01",
"2000-01-01 01:01",
]
)
)
def test_fits_format(self):
"""FITS format includes bigger years."""
# Heterogeneous input formats with in_subfmt='*' (default)
times = ["2000-01-01", "2000-01-01T01:01:01", "2000-01-01T01:01:01.123"]
t = Time(times, format="fits", scale="tai")
assert np.all(
t.fits
== np.array(
[
"2000-01-01T00:00:00.000",
"2000-01-01T01:01:01.000",
"2000-01-01T01:01:01.123",
]
)
)
# Explicit long format for output, default scale is UTC.
t2 = Time(times, format="fits", out_subfmt="long*")
assert np.all(
t2.fits
== np.array(
[
"+02000-01-01T00:00:00.000",
"+02000-01-01T01:01:01.000",
"+02000-01-01T01:01:01.123",
]
)
)
# Implicit long format for output, because of negative year.
times[2] = "-00594-01-01"
t3 = Time(times, format="fits", scale="tai")
assert np.all(
t3.fits
== np.array(
[
"+02000-01-01T00:00:00.000",
"+02000-01-01T01:01:01.000",
"-00594-01-01T00:00:00.000",
]
)
)
# Implicit long format for output, because of large positive year.
times[2] = "+10594-01-01"
t4 = Time(times, format="fits", scale="tai")
assert np.all(
t4.fits
== np.array(
[
"+02000-01-01T00:00:00.000",
"+02000-01-01T01:01:01.000",
"+10594-01-01T00:00:00.000",
]
)
)
def test_yday_format(self):
"""Year:Day_of_year format"""
# Heterogeneous input formats with in_subfmt='*' (default)
times = ["2000-12-01", "2001-12-01 01:01:01.123"]
t = Time(times, format="iso", scale="tai")
t.out_subfmt = "date_hm"
assert np.all(t.yday == np.array(["2000:336:00:00", "2001:335:01:01"]))
t.out_subfmt = "*"
assert np.all(
t.yday == np.array(["2000:336:00:00:00.000", "2001:335:01:01:01.123"])
)
def test_scale_input(self):
"""Test for issues related to scale input"""
# Check case where required scale is defined by the TimeFormat.
# All three should work.
t = Time(100.0, format="cxcsec", scale="utc")
assert t.scale == "utc"
t = Time(100.0, format="unix", scale="tai")
assert t.scale == "tai"
t = Time(100.0, format="gps", scale="utc")
assert t.scale == "utc"
# Check that bad scale is caught when format is specified
with pytest.raises(ScaleValueError):
Time(1950.0, format="byear", scale="bad scale")
# Check that bad scale is caught when format is auto-determined
with pytest.raises(ScaleValueError):
Time("2000:001:00:00:00", scale="bad scale")
def test_fits_scale(self):
"""Test that the previous FITS-string formatting can still be handled
but with a DeprecationWarning."""
for inputs in (
("2000-01-02(TAI)", "tai"),
("1999-01-01T00:00:00.123(ET(NIST))", "tt"),
("2014-12-12T01:00:44.1(UTC)", "utc"),
):
with pytest.warns(AstropyDeprecationWarning):
t = Time(inputs[0])
assert t.scale == inputs[1]
# Create Time using normal ISOT syntax and compare with FITS
t2 = Time(inputs[0][: inputs[0].index("(")], format="isot", scale=inputs[1])
assert t == t2
# Explicit check that conversions still work despite warning
with pytest.warns(AstropyDeprecationWarning):
t = Time("1999-01-01T00:00:00.123456789(UTC)")
t = t.tai
assert t.isot == "1999-01-01T00:00:32.123"
with pytest.warns(AstropyDeprecationWarning):
t = Time("1999-01-01T00:00:32.123456789(TAI)")
t = t.utc
assert t.isot == "1999-01-01T00:00:00.123"
# Check scale consistency
with pytest.warns(AstropyDeprecationWarning):
t = Time("1999-01-01T00:00:32.123456789(TAI)", scale="tai")
assert t.scale == "tai"
with pytest.warns(AstropyDeprecationWarning):
t = Time("1999-01-01T00:00:32.123456789(ET)", scale="tt")
assert t.scale == "tt"
with pytest.raises(ValueError), pytest.warns(AstropyDeprecationWarning):
t = Time("1999-01-01T00:00:32.123456789(TAI)", scale="utc")
def test_scale_default(self):
"""Test behavior when no scale is provided"""
# These first three are TimeFromEpoch and have an intrinsic time scale
t = Time(100.0, format="cxcsec")
assert t.scale == "tt"
t = Time(100.0, format="unix")
assert t.scale == "utc"
t = Time(100.0, format="gps")
assert t.scale == "tai"
for date in ("2000:001", "2000-01-01T00:00:00"):
t = Time(date)
assert t.scale == "utc"
t = Time(2000.1, format="byear")
assert t.scale == "tt"
t = Time("J2000")
assert t.scale == "tt"
def test_epoch_times(self):
"""Test time formats derived from EpochFromTime"""
t = Time(0.0, format="cxcsec", scale="tai")
assert t.tt.iso == "1998-01-01 00:00:00.000"
# Create new time object from this one and change scale, format
t2 = Time(t, scale="tt", format="iso")
assert t2.value == "1998-01-01 00:00:00.000"
# Value take from Chandra.Time.DateTime('2010:001:00:00:00').secs
t_cxcsec = 378691266.184
t = Time(t_cxcsec, format="cxcsec", scale="utc")
assert allclose_sec(t.value, t_cxcsec)
assert allclose_sec(t.cxcsec, t_cxcsec)
assert allclose_sec(t.tt.value, t_cxcsec)
assert allclose_sec(t.tt.cxcsec, t_cxcsec)
assert t.yday == "2010:001:00:00:00.000"
t = Time("2010:001:00:00:00.000", scale="utc")
assert allclose_sec(t.cxcsec, t_cxcsec)
assert allclose_sec(t.tt.cxcsec, t_cxcsec)
# Round trip through epoch time
for scale in ("utc", "tt"):
t = Time("2000:001", scale=scale)
t2 = Time(t.unix, scale=scale, format="unix")
assert getattr(t2, scale).iso == "2000-01-01 00:00:00.000"
# Test unix time. Values taken from http://en.wikipedia.org/wiki/Unix_time
t = Time("2013-05-20 21:18:46", scale="utc")
assert allclose_sec(t.unix, 1369084726.0)
assert allclose_sec(t.tt.unix, 1369084726.0)
# Values from issue #1118
t = Time("2004-09-16T23:59:59", scale="utc")
assert allclose_sec(t.unix, 1095379199.0)
def test_plot_date(self):
"""Test the plot_date format.
Depending on the situation with matplotlib, this can give different
results because the plot date epoch time changed in matplotlib 3.3. This
test tries to use the matplotlib date2num function to make the test
independent of version, but if matplotlib isn't available then the code
(and test) use the pre-3.3 epoch.
"""
try:
from matplotlib.dates import date2num
except ImportError:
# No matplotlib, in which case this uses the epoch 0000-12-31
# as per matplotlib < 3.3.
# Value from:
# matplotlib.dates.set_epoch('0000-12-31')
# val = matplotlib.dates.date2num('2000-01-01')
val = 730120.0
else:
val = date2num(datetime.datetime(2000, 1, 1))
t = Time("2000-01-01 00:00:00", scale="utc")
assert np.allclose(t.plot_date, val, atol=1e-5, rtol=0)
class TestNumericalSubFormat:
def test_explicit_example(self):
t = Time("54321.000000000001", format="mjd")
assert t == Time(54321, 1e-12, format="mjd")
assert t.mjd == 54321.0 # Lost precision!
assert t.value == 54321.0 # Lost precision!
assert t.to_value("mjd") == 54321.0 # Lost precision!
assert t.to_value("mjd", subfmt="str") == "54321.000000000001"
assert t.to_value("mjd", "bytes") == b"54321.000000000001"
expected_long = np.longdouble(54321.0) + np.longdouble(1e-12)
# Check we're the same to within the double holding jd2
# (which is less precise than longdouble on arm64).
assert np.allclose(
t.to_value("mjd", subfmt="long"),
expected_long,
rtol=0,
atol=np.finfo(float).eps,
)
t.out_subfmt = "str"
assert t.value == "54321.000000000001"
assert t.to_value("mjd") == 54321.0 # Lost precision!
assert t.mjd == "54321.000000000001"
assert t.to_value("mjd", subfmt="bytes") == b"54321.000000000001"
assert t.to_value("mjd", subfmt="float") == 54321.0 # Lost precision!
t.out_subfmt = "long"
assert np.allclose(t.value, expected_long, rtol=0.0, atol=np.finfo(float).eps)
assert np.allclose(
t.to_value("mjd", subfmt=None),
expected_long,
rtol=0.0,
atol=np.finfo(float).eps,
)
assert np.allclose(t.mjd, expected_long, rtol=0.0, atol=np.finfo(float).eps)
assert t.to_value("mjd", subfmt="str") == "54321.000000000001"
assert t.to_value("mjd", subfmt="float") == 54321.0 # Lost precision!
@pytest.mark.skipif(
np.finfo(np.longdouble).eps >= np.finfo(float).eps,
reason="long double is the same as float",
)
def test_explicit_longdouble(self):
i = 54321
# Create a different long double (which will give a different jd2
# even when long doubles are more precise than Time, as on arm64).
f = max(2.0 ** (-np.finfo(np.longdouble).nmant) * 65536, np.finfo(float).eps)
mjd_long = np.longdouble(i) + np.longdouble(f)
assert mjd_long != i, "longdouble failure!"
t = Time(mjd_long, format="mjd")
expected = Time(i, f, format="mjd")
assert abs(t - expected) <= 20.0 * u.ps
t_float = Time(i + f, format="mjd")
assert t_float == Time(i, format="mjd")
assert t_float != t
assert t.value == 54321.0 # Lost precision!
assert np.allclose(
t.to_value("mjd", subfmt="long"),
mjd_long,
rtol=0.0,
atol=np.finfo(float).eps,
)
t2 = Time(mjd_long, format="mjd", out_subfmt="long")
assert np.allclose(t2.value, mjd_long, rtol=0.0, atol=np.finfo(float).eps)
@pytest.mark.skipif(
np.finfo(np.longdouble).eps >= np.finfo(float).eps,
reason="long double is the same as float",
)
def test_explicit_longdouble_one_val(self):
"""Ensure either val1 or val2 being longdouble is possible.
Regression test for issue gh-10033.
"""
i = 54321
f = max(2.0 ** (-np.finfo(np.longdouble).nmant) * 65536, np.finfo(float).eps)
t1 = Time(i, f, format="mjd")
t2 = Time(np.longdouble(i), f, format="mjd")
t3 = Time(i, np.longdouble(f), format="mjd")
t4 = Time(np.longdouble(i), np.longdouble(f), format="mjd")
assert t1 == t2 == t3 == t4
@pytest.mark.skipif(
np.finfo(np.longdouble).eps >= np.finfo(float).eps,
reason="long double is the same as float",
)
@pytest.mark.parametrize("fmt", ["mjd", "unix", "cxcsec"])
def test_longdouble_for_other_types(self, fmt):
t_fmt = getattr(Time(58000, format="mjd"), fmt) # Get regular float
t_fmt_long = np.longdouble(t_fmt)
# Create a different long double (ensuring it will give a different jd2
# even when long doubles are more precise than Time, as on arm64).
atol = np.finfo(float).eps * (1.0 if fmt == "mjd" else 24.0 * 3600.0)
t_fmt_long2 = t_fmt_long + max(
t_fmt_long * np.finfo(np.longdouble).eps * 2, atol
)
assert t_fmt_long != t_fmt_long2, "longdouble weird!"
tm = Time(t_fmt_long, format=fmt)
tm2 = Time(t_fmt_long2, format=fmt)
assert tm != tm2
tm_long2 = tm2.to_value(fmt, subfmt="long")
assert np.allclose(tm_long2, t_fmt_long2, rtol=0.0, atol=atol)
def test_subformat_input(self):
s = "54321.01234567890123456789"
i, f = s.split(".") # Note, OK only for fraction < 0.5
t = Time(float(i), float("." + f), format="mjd")
t_str = Time(s, format="mjd")
t_bytes = Time(s.encode("ascii"), format="mjd")
t_decimal = Time(Decimal(s), format="mjd")
assert t_str == t
assert t_bytes == t
assert t_decimal == t
@pytest.mark.parametrize("out_subfmt", ("str", "bytes"))
def test_subformat_output(self, out_subfmt):
i = 54321
f = np.array([0.0, 1e-9, 1e-12])
t = Time(i, f, format="mjd", out_subfmt=out_subfmt)
t_value = t.value
expected = np.array(
["54321.0", "54321.000000001", "54321.000000000001"], dtype=out_subfmt
)
assert np.all(t_value == expected)
assert np.all(Time(expected, format="mjd") == t)
# Explicit sub-format.
t = Time(i, f, format="mjd")
t_mjd_subfmt = t.to_value("mjd", subfmt=out_subfmt)
assert np.all(t_mjd_subfmt == expected)
@pytest.mark.parametrize(
"fmt,string,val1,val2",
[
("jd", "2451544.5333981", 2451544.5, 0.0333981),
("decimalyear", "2000.54321", 2000.0, 0.54321),
("cxcsec", "100.0123456", 100.0123456, None),
("unix", "100.0123456", 100.0123456, None),
("gps", "100.0123456", 100.0123456, None),
("byear", "1950.1", 1950.1, None),
("jyear", "2000.1", 2000.1, None),
],
)
def test_explicit_string_other_formats(self, fmt, string, val1, val2):
t = Time(string, format=fmt)
assert t == Time(val1, val2, format=fmt)
assert t.to_value(fmt, subfmt="str") == string
def test_basic_subformat_setting(self):
t = Time("2001", format="jyear", scale="tai")
t.format = "mjd"
t.out_subfmt = "str"
assert t.value.startswith("5")
def test_basic_subformat_cache_does_not_crash(self):
t = Time("2001", format="jyear", scale="tai")
t.to_value("mjd", subfmt="str")
assert ("mjd", "str") in t.cache["format"]
t.to_value("mjd", "str")
@pytest.mark.parametrize("fmt", ["jd", "mjd", "cxcsec", "unix", "gps", "jyear"])
def test_decimal_context_does_not_affect_string(self, fmt):
t = Time("2001", format="jyear", scale="tai")
t.format = fmt
with localcontext() as ctx:
ctx.prec = 2
t_s_2 = t.to_value(fmt, "str")
t2 = Time("2001", format="jyear", scale="tai")
t2.format = fmt
with localcontext() as ctx:
ctx.prec = 40
t2_s_40 = t.to_value(fmt, "str")
assert (
t_s_2 == t2_s_40
), "String representation should not depend on Decimal context"
def test_decimal_context_caching(self):
t = Time(val=58000, val2=1e-14, format="mjd", scale="tai")
with localcontext() as ctx:
ctx.prec = 2
t_s_2 = t.to_value("mjd", subfmt="decimal")
t2 = Time(val=58000, val2=1e-14, format="mjd", scale="tai")
with localcontext() as ctx:
ctx.prec = 40
t_s_40 = t.to_value("mjd", subfmt="decimal")
t2_s_40 = t2.to_value("mjd", subfmt="decimal")
assert t_s_2 == t_s_40, "Should be the same but cache might make this automatic"
assert t_s_2 == t2_s_40, "Different precision should produce the same results"
@pytest.mark.parametrize(
"f, s, t",
[
("sec", "long", np.longdouble),
("sec", "decimal", Decimal),
("sec", "str", str),
],
)
def test_timedelta_basic(self, f, s, t):
dt = Time("58000", format="mjd", scale="tai") - Time(
"58001", format="mjd", scale="tai"
)
value = dt.to_value(f, s)
assert isinstance(value, t)
dt.format = f
dt.out_subfmt = s
assert isinstance(dt.value, t)
assert isinstance(dt.to_value(f, None), t)
def test_need_format_argument(self):
t = Time("J2000")
with pytest.raises(TypeError, match="missing.*required.*'format'"):
t.to_value()
with pytest.raises(ValueError, match="format must be one of"):
t.to_value("julian")
def test_wrong_in_subfmt(self):
with pytest.raises(ValueError, match="not among selected"):
Time("58000", format="mjd", in_subfmt="float")
with pytest.raises(ValueError, match="not among selected"):
Time(np.longdouble(58000), format="mjd", in_subfmt="float")
with pytest.raises(ValueError, match="not among selected"):
Time(58000.0, format="mjd", in_subfmt="str")
with pytest.raises(ValueError, match="not among selected"):
Time(58000.0, format="mjd", in_subfmt="long")
def test_wrong_subfmt(self):
t = Time(58000.0, format="mjd")
with pytest.raises(ValueError, match="must match one"):
t.to_value("mjd", subfmt="parrot")
with pytest.raises(ValueError, match="must match one"):
t.out_subfmt = "parrot"
with pytest.raises(ValueError, match="must match one"):
t.in_subfmt = "parrot"
def test_not_allowed_subfmt(self):
"""Test case where format has no defined subfmts"""
t = Time("J2000")
match = "subformat not allowed for format jyear_str"
with pytest.raises(ValueError, match=match):
t.to_value("jyear_str", subfmt="parrot")
with pytest.raises(ValueError, match=match):
t.out_subfmt = "parrot"
with pytest.raises(ValueError, match=match):
Time("J2000", out_subfmt="parrot")
with pytest.raises(ValueError, match=match):
t.in_subfmt = "parrot"
with pytest.raises(ValueError, match=match):
Time("J2000", format="jyear_str", in_subfmt="parrot")
def test_switch_to_format_with_no_out_subfmt(self):
t = Time("2001-01-01", out_subfmt="date_hm")
assert t.out_subfmt == "date_hm"
# Now do an in-place switch to format 'jyear_str' that has no subfmts
# where out_subfmt is changed to '*'.
t.format = "jyear_str"
assert t.out_subfmt == "*"
assert t.value == "J2001.001"
class TestSofaErrors:
"""Test that erfa status return values are handled correctly"""
def test_bad_time(self):
iy = np.array([2000], dtype=np.intc)
im = np.array([2000], dtype=np.intc) # bad month
id = np.array([2000], dtype=np.intc) # bad day
with pytest.raises(ValueError): # bad month, fatal error
djm0, djm = erfa.cal2jd(iy, im, id)
iy[0] = -5000
im[0] = 2
with pytest.raises(ValueError): # bad year, fatal error
djm0, djm = erfa.cal2jd(iy, im, id)
iy[0] = 2000
with pytest.warns(ErfaWarning, match=r"bad day \(JD computed\)") as w:
djm0, djm = erfa.cal2jd(iy, im, id)
assert len(w) == 1
assert allclose_jd(djm0, [2400000.5])
assert allclose_jd(djm, [53574.0])
class TestCopyReplicate:
"""Test issues related to copying and replicating data"""
def test_immutable_input(self):
"""Internals are never mutable."""
jds = np.array([2450000.5], dtype=np.double)
t = Time(jds, format="jd", scale="tai")
assert allclose_jd(t.jd, jds)
jds[0] = 2458654
assert not allclose_jd(t.jd, jds)
mjds = np.array([50000.0], dtype=np.double)
t = Time(mjds, format="mjd", scale="tai")
assert allclose_jd(t.jd, [2450000.5])
mjds[0] = 0.0
assert allclose_jd(t.jd, [2450000.5])
def test_replicate(self):
"""Test replicate method"""
t = Time(["2000:001"], format="yday", scale="tai", location=("45d", "45d"))
t_yday = t.yday
t_loc_x = t.location.x.copy()
t2 = t.replicate()
assert t.yday == t2.yday
assert t.format == t2.format
assert t.scale == t2.scale
assert t.location == t2.location
# This is not allowed publicly, but here we hack the internal time
# and location values to show that t and t2 are sharing references.
t2._time.jd1 += 100.0
# Need to delete the cached yday attributes (only an issue because
# of the internal _time hack).
del t.cache
del t2.cache
assert t.yday == t2.yday
assert t.yday != t_yday # prove that it changed
t2_loc_x_view = t2.location.x
t2_loc_x_view[()] = 0 # use 0 to avoid having to give units
assert t2.location.x == t2_loc_x_view
assert t.location.x == t2.location.x
assert t.location.x != t_loc_x # prove that it changed
def test_copy(self):
"""Test copy method"""
t = Time("2000:001", format="yday", scale="tai", location=("45d", "45d"))
t_yday = t.yday
t_loc_x = t.location.x.copy()
t2 = t.copy()
assert t.yday == t2.yday
# This is not allowed publicly, but here we hack the internal time
# and location values to show that t and t2 are not sharing references.
t2._time.jd1 += 100.0
# Need to delete the cached yday attributes (only an issue because
# of the internal _time hack).
del t.cache
del t2.cache
assert t.yday != t2.yday
assert t.yday == t_yday # prove that it did not change
t2_loc_x_view = t2.location.x
t2_loc_x_view[()] = 0 # use 0 to avoid having to give units
assert t2.location.x == t2_loc_x_view
assert t.location.x != t2.location.x
assert t.location.x == t_loc_x # prove that it changed
class TestStardate:
"""Sync chronometers with Starfleet Command"""
def test_iso_to_stardate(self):
assert str(Time("2320-01-01", scale="tai").stardate)[:7] == "1368.99"
assert str(Time("2330-01-01", scale="tai").stardate)[:8] == "10552.76"
assert str(Time("2340-01-01", scale="tai").stardate)[:8] == "19734.02"
@pytest.mark.parametrize(
"dates",
[
(10000, "2329-05-26 03:02"),
(20000, "2340-04-15 19:05"),
(30000, "2351-03-07 11:08"),
],
)
def test_stardate_to_iso(self, dates):
stardate, iso = dates
t_star = Time(stardate, format="stardate")
t_iso = Time(t_star, format="iso", out_subfmt="date_hm")
assert t_iso.value == iso
def test_python_builtin_copy():
t = Time("2000:001", format="yday", scale="tai")
t2 = copy.copy(t)
t3 = copy.deepcopy(t)
assert t.jd == t2.jd
assert t.jd == t3.jd
def test_now():
"""
Tests creating a Time object with the `now` class method.
"""
now = datetime.datetime.utcnow()
t = Time.now()
assert t.format == "datetime"
assert t.scale == "utc"
dt = t.datetime - now # a datetime.timedelta object
# this gives a .1 second margin between the `utcnow` call and the `Time`
# initializer, which is really way more generous than necessary - typical
# times are more like microseconds. But it seems safer in case some
# platforms have slow clock calls or something.
assert dt.total_seconds() < 0.1
def test_decimalyear():
t = Time("2001:001", format="yday")
assert t.decimalyear == 2001.0
t = Time(2000.0, [0.5, 0.75], format="decimalyear")
assert np.all(t.value == [2000.5, 2000.75])
jd0 = Time("2000:001").jd
jd1 = Time("2001:001").jd
d_jd = jd1 - jd0
assert np.all(t.jd == [jd0 + 0.5 * d_jd, jd0 + 0.75 * d_jd])
def test_decimalyear_no_quantity():
with pytest.raises(ValueError, match="cannot use Quantities"):
Time(2005.5 * u.yr, format="decimalyear")
def test_fits_year0():
t = Time(1721425.5, format="jd", scale="tai")
assert t.fits == "0001-01-01T00:00:00.000"
t = Time(1721425.5 - 366.0, format="jd", scale="tai")
assert t.fits == "+00000-01-01T00:00:00.000"
t = Time(1721425.5 - 366.0 - 365.0, format="jd", scale="tai")
assert t.fits == "-00001-01-01T00:00:00.000"
def test_fits_year10000():
t = Time(5373484.5, format="jd", scale="tai")
assert t.fits == "+10000-01-01T00:00:00.000"
t = Time(5373484.5 - 365.0, format="jd", scale="tai")
assert t.fits == "9999-01-01T00:00:00.000"
t = Time(5373484.5, -1.0 / 24.0 / 3600.0, format="jd", scale="tai")
assert t.fits == "9999-12-31T23:59:59.000"
def test_dir():
t = Time("2000:001", format="yday", scale="tai")
assert "utc" in dir(t)
def test_time_from_epoch_jds():
"""Test that jd1/jd2 in a TimeFromEpoch format is always well-formed:
jd1 is an integral value and abs(jd2) <= 0.5.
"""
# From 1999:001 00:00 to 1999:002 12:00 by a non-round step. This will
# catch jd2 == 0 and a case of abs(jd2) == 0.5.
cxcsecs = np.linspace(0, 86400 * 1.5, 49)
for cxcsec in cxcsecs:
t = Time(cxcsec, format="cxcsec")
assert np.round(t.jd1) == t.jd1
assert np.abs(t.jd2) <= 0.5
t = Time(cxcsecs, format="cxcsec")
assert np.all(np.round(t.jd1) == t.jd1)
assert np.all(np.abs(t.jd2) <= 0.5)
assert np.any(np.abs(t.jd2) == 0.5) # At least one exactly 0.5
def test_bool():
"""Any Time object should evaluate to True unless it is empty [#3520]."""
t = Time(np.arange(50000, 50010), format="mjd", scale="utc")
assert bool(t) is True
assert bool(t[0]) is True
assert bool(t[:0]) is False
def test_len_size():
"""Check length of Time objects and that scalar ones do not have one."""
t = Time(np.arange(50000, 50010), format="mjd", scale="utc")
assert len(t) == 10 and t.size == 10
t1 = Time(np.arange(50000, 50010).reshape(2, 5), format="mjd", scale="utc")
assert len(t1) == 2 and t1.size == 10
# Can have length 1 or length 0 arrays.
t2 = t[:1]
assert len(t2) == 1 and t2.size == 1
t3 = t[:0]
assert len(t3) == 0 and t3.size == 0
# But cannot get length from scalar.
t4 = t[0]
with pytest.raises(TypeError) as err:
len(t4)
# Ensure we're not just getting the old error of
# "object of type 'float' has no len()".
assert "Time" in str(err.value)
def test_TimeFormat_scale():
"""guard against recurrence of #1122, where TimeFormat class looses uses
attributes (delta_ut1_utc here), preventing conversion to unix, cxc"""
t = Time("1900-01-01", scale="ut1")
t.delta_ut1_utc = 0.0
with pytest.warns(ErfaWarning):
t.unix
assert t.unix == t.utc.unix
@pytest.mark.remote_data
def test_scale_conversion(monkeypatch):
# Check that if we have internet, and downloading is allowed, we
# can get conversion to UT1 for the present, since we will download
# IERS_A in IERS_Auto.
monkeypatch.setattr("astropy.utils.iers.conf.auto_download", True)
Time(Time.now().cxcsec, format="cxcsec", scale="ut1")
def test_byteorder():
"""Ensure that bigendian and little-endian both work (closes #2942)"""
mjd = np.array([53000.00, 54000.00])
big_endian = mjd.astype(">f8")
little_endian = mjd.astype("<f8")
time_mjd = Time(mjd, format="mjd")
time_big = Time(big_endian, format="mjd")
time_little = Time(little_endian, format="mjd")
assert np.all(time_big == time_mjd)
assert np.all(time_little == time_mjd)
def test_datetime_tzinfo():
"""
Test #3160 that time zone info in datetime objects is respected.
"""
class TZm6(datetime.tzinfo):
def utcoffset(self, dt):
return datetime.timedelta(hours=-6)
d = datetime.datetime(2002, 1, 2, 10, 3, 4, tzinfo=TZm6())
t = Time(d)
assert t.value == datetime.datetime(2002, 1, 2, 16, 3, 4)
def test_subfmts_regex():
"""
Test having a custom subfmts with a regular expression
"""
class TimeLongYear(TimeString):
name = "longyear"
subfmts = (
(
"date",
r"(?P<year>[+-]\d{5})-%m-%d", # hybrid
"{year:+06d}-{mon:02d}-{day:02d}",
),
)
t = Time("+02000-02-03", format="longyear")
assert t.value == "+02000-02-03"
assert t.jd == Time("2000-02-03").jd
def test_set_format_basic():
"""
Test basics of setting format attribute.
"""
for format, value in (
("jd", 2451577.5),
("mjd", 51577.0),
("cxcsec", 65923264.184), # confirmed with Chandra.Time
("datetime", datetime.datetime(2000, 2, 3, 0, 0)),
("iso", "2000-02-03 00:00:00.000"),
):
t = Time("+02000-02-03", format="fits")
t0 = t.replicate()
t.format = format
assert t.value == value
# Internal jd1 and jd2 are preserved
assert t._time.jd1 is t0._time.jd1
assert t._time.jd2 is t0._time.jd2
def test_unix_tai_format():
t = Time("2020-01-01", scale="utc")
assert allclose_sec(t.unix_tai - t.unix, 37.0)
t = Time("1970-01-01", scale="utc")
assert allclose_sec(t.unix_tai - t.unix, 8 + 8.2e-05)
def test_set_format_shares_subfmt():
"""
Set format and round trip through a format that shares out_subfmt
"""
t = Time("+02000-02-03", format="fits", out_subfmt="date_hms", precision=5)
tc = t.copy()
t.format = "isot"
assert t.precision == 5
assert t.out_subfmt == "date_hms"
assert t.value == "2000-02-03T00:00:00.00000"
t.format = "fits"
assert t.value == tc.value
assert t.precision == 5
def test_set_format_does_not_share_subfmt():
"""
Set format and round trip through a format that does not share out_subfmt
"""
t = Time("+02000-02-03", format="fits", out_subfmt="longdate")
t.format = "isot"
assert t.out_subfmt == "*" # longdate_hms not there, goes to default
assert t.value == "2000-02-03T00:00:00.000"
t.format = "fits"
assert t.out_subfmt == "*"
assert t.value == "2000-02-03T00:00:00.000" # date_hms
def test_replicate_value_error():
"""
Passing a bad format to replicate should raise ValueError, not KeyError.
PR #3857.
"""
t1 = Time("2007:001", scale="tai")
with pytest.raises(ValueError) as err:
t1.replicate(format="definitely_not_a_valid_format")
assert "format must be one of" in str(err.value)
def test_remove_astropy_time():
"""
Make sure that 'astropy_time' format is really gone after #3857. Kind of
silly test but just to be sure.
"""
t1 = Time("2007:001", scale="tai")
assert "astropy_time" not in t1.FORMATS
with pytest.raises(ValueError) as err:
Time(t1, format="astropy_time")
assert "format must be one of" in str(err.value)
def test_isiterable():
"""
Ensure that scalar `Time` instances are not reported as iterable by the
`isiterable` utility.
Regression test for https://github.com/astropy/astropy/issues/4048
"""
t1 = Time.now()
assert not isiterable(t1)
t2 = Time(
["1999-01-01 00:00:00.123456789", "2010-01-01 00:00:00"],
format="iso",
scale="utc",
)
assert isiterable(t2)
def test_to_datetime():
tz = TimezoneInfo(utc_offset=-10 * u.hour, tzname="US/Hawaii")
# The above lines produces a `datetime.tzinfo` object similar to:
# tzinfo = pytz.timezone('US/Hawaii')
time = Time("2010-09-03 00:00:00")
tz_aware_datetime = time.to_datetime(tz)
assert tz_aware_datetime.time() == datetime.time(14, 0)
forced_to_astropy_time = Time(tz_aware_datetime)
assert tz.tzname(time.datetime) == tz_aware_datetime.tzname()
assert time == forced_to_astropy_time
# Test non-scalar time inputs:
time = Time(["2010-09-03 00:00:00", "2005-09-03 06:00:00", "1990-09-03 06:00:00"])
tz_aware_datetime = time.to_datetime(tz)
forced_to_astropy_time = Time(tz_aware_datetime)
for dt, tz_dt in zip(time.datetime, tz_aware_datetime):
assert tz.tzname(dt) == tz_dt.tzname()
assert np.all(time == forced_to_astropy_time)
def test_to_datetime_leap_second_strict():
t = Time("2015-06-30 23:59:60.000")
dt_exp = datetime.datetime(2015, 7, 1, 0, 0, 0)
with pytest.raises(ValueError, match=r"does not support leap seconds"):
t.to_datetime()
with pytest.warns(
AstropyDatetimeLeapSecondWarning, match=r"does not support leap seconds"
):
dt = t.to_datetime(leap_second_strict="warn")
assert dt == dt_exp
dt = t.to_datetime(leap_second_strict="silent")
assert dt == dt_exp
with pytest.raises(ValueError, match=r"leap_second_strict must be 'raise'"):
t.to_datetime(leap_second_strict="invalid")
@pytest.mark.skipif(not HAS_PYTZ, reason="requires pytz")
def test_to_datetime_pytz():
import pytz
tz = pytz.timezone("US/Hawaii")
time = Time("2010-09-03 00:00:00")
tz_aware_datetime = time.to_datetime(tz)
forced_to_astropy_time = Time(tz_aware_datetime)
assert tz_aware_datetime.time() == datetime.time(14, 0)
assert tz.tzname(time.datetime) == tz_aware_datetime.tzname()
assert time == forced_to_astropy_time
# Test non-scalar time inputs:
time = Time(["2010-09-03 00:00:00", "2005-09-03 06:00:00", "1990-09-03 06:00:00"])
tz_aware_datetime = time.to_datetime(tz)
forced_to_astropy_time = Time(tz_aware_datetime)
for dt, tz_dt in zip(time.datetime, tz_aware_datetime):
assert tz.tzname(dt) == tz_dt.tzname()
assert np.all(time == forced_to_astropy_time)
def test_cache():
t = Time("2010-09-03 00:00:00")
t2 = Time("2010-09-03 00:00:00")
# Time starts out without a cache
assert "cache" not in t._time.__dict__
# Access the iso format and confirm that the cached version is as expected
t.iso
assert t.cache["format"]["iso"] == t2.iso
# Access the TAI scale and confirm that the cached version is as expected
t.tai
assert t.cache["scale"]["tai"] == t2.tai
# New Time object after scale transform does not have a cache yet
assert "cache" not in t.tt._time.__dict__
# Clear the cache
del t.cache
assert "cache" not in t._time.__dict__
# Check accessing the cache creates an empty dictionary
assert not t.cache
assert "cache" in t._time.__dict__
def test_epoch_date_jd_is_day_fraction():
"""
Ensure that jd1 and jd2 of an epoch Time are respect the (day, fraction) convention
(see #6638)
"""
t0 = Time("J2000", scale="tdb")
assert t0.jd1 == 2451545.0
assert t0.jd2 == 0.0
t1 = Time(datetime.datetime(2000, 1, 1, 12, 0, 0), scale="tdb")
assert t1.jd1 == 2451545.0
assert t1.jd2 == 0.0
def test_sum_is_equivalent():
"""
Ensure that two equal dates defined in different ways behave equally (#6638)
"""
t0 = Time("J2000", scale="tdb")
t1 = Time("2000-01-01 12:00:00", scale="tdb")
assert t0 == t1
assert (t0 + 1 * u.second) == (t1 + 1 * u.second)
def test_string_valued_columns():
# Columns have a nice shim that translates bytes to string as needed.
# Ensure Time can handle these. Use multi-d array just to be sure.
times = [
[[f"{y:04d}-{m:02d}-{d:02d}" for d in range(1, 3)] for m in range(5, 7)]
for y in range(2012, 2014)
]
cutf32 = Column(times)
cbytes = cutf32.astype("S")
tutf32 = Time(cutf32)
tbytes = Time(cbytes)
assert np.all(tutf32 == tbytes)
tutf32 = Time(Column(["B1950"]))
tbytes = Time(Column([b"B1950"]))
assert tutf32 == tbytes
# Regression tests for arrays with entries with unequal length. gh-6903.
times = Column([b"2012-01-01", b"2012-01-01T00:00:00"])
assert np.all(Time(times) == Time(["2012-01-01", "2012-01-01T00:00:00"]))
def test_bytes_input():
tstring = "2011-01-02T03:04:05"
tbytes = b"2011-01-02T03:04:05"
assert tbytes.decode("ascii") == tstring
t0 = Time(tstring)
t1 = Time(tbytes)
assert t1 == t0
tarray = np.array(tbytes)
assert tarray.dtype.kind == "S"
t2 = Time(tarray)
assert t2 == t0
def test_writeable_flag():
t = Time([1, 2, 3], format="cxcsec")
t[1] = 5.0
assert allclose_sec(t[1].value, 5.0)
t.writeable = False
with pytest.raises(ValueError) as err:
t[1] = 5.0
assert "Time object is read-only. Make a copy()" in str(err.value)
with pytest.raises(ValueError) as err:
t[:] = 5.0
assert "Time object is read-only. Make a copy()" in str(err.value)
t.writeable = True
t[1] = 10.0
assert allclose_sec(t[1].value, 10.0)
# Scalar is writeable because it gets boxed into a zero-d array
t = Time("2000:001", scale="utc")
t[()] = "2000:002"
assert t.value.startswith("2000:002")
# Transformed attribute is not writeable
t = Time(["2000:001", "2000:002"], scale="utc")
t2 = t.tt # t2 is read-only now because t.tt is cached
with pytest.raises(ValueError) as err:
t2[0] = "2005:001"
assert "Time object is read-only. Make a copy()" in str(err.value)
def test_setitem_location():
loc = EarthLocation(x=[1, 2] * u.m, y=[3, 4] * u.m, z=[5, 6] * u.m)
t = Time([[1, 2], [3, 4]], format="cxcsec", location=loc)
# Succeeds because the right hand side makes no implication about
# location and just inherits t.location
t[0, 0] = 0
assert allclose_sec(t.value, [[0, 2], [3, 4]])
# Fails because the right hand side has location=None
with pytest.raises(ValueError) as err:
t[0, 0] = Time(-1, format="cxcsec")
assert (
"cannot set to Time with different location: "
"expected location={} and "
"got location=None".format(loc[0]) in str(err.value)
)
# Succeeds because the right hand side correctly sets location
t[0, 0] = Time(-2, format="cxcsec", location=loc[0])
assert allclose_sec(t.value, [[-2, 2], [3, 4]])
# Fails because the right hand side has different location
with pytest.raises(ValueError) as err:
t[0, 0] = Time(-2, format="cxcsec", location=loc[1])
assert (
"cannot set to Time with different location: "
"expected location={} and "
"got location={}".format(loc[0], loc[1]) in str(err.value)
)
# Fails because the Time has None location and RHS has defined location
t = Time([[1, 2], [3, 4]], format="cxcsec")
with pytest.raises(ValueError) as err:
t[0, 0] = Time(-2, format="cxcsec", location=loc[1])
assert (
"cannot set to Time with different location: "
"expected location=None and "
"got location={}".format(loc[1]) in str(err.value)
)
# Broadcasting works
t = Time([[1, 2], [3, 4]], format="cxcsec", location=loc)
t[0, :] = Time([-3, -4], format="cxcsec", location=loc)
assert allclose_sec(t.value, [[-3, -4], [3, 4]])
def test_setitem_from_python_objects():
t = Time([[1, 2], [3, 4]], format="cxcsec")
assert t.cache == {}
t.iso
assert "iso" in t.cache["format"]
assert np.all(
t.iso
== [
["1998-01-01 00:00:01.000", "1998-01-01 00:00:02.000"],
["1998-01-01 00:00:03.000", "1998-01-01 00:00:04.000"],
]
)
# Setting item clears cache
t[0, 1] = 100
assert t.cache == {}
assert allclose_sec(t.value, [[1, 100], [3, 4]])
assert np.all(
t.iso
== [
["1998-01-01 00:00:01.000", "1998-01-01 00:01:40.000"],
["1998-01-01 00:00:03.000", "1998-01-01 00:00:04.000"],
]
)
# Set with a float value
t.iso
t[1, :] = 200
assert t.cache == {}
assert allclose_sec(t.value, [[1, 100], [200, 200]])
# Array of strings in yday format
t[:, 1] = ["1998:002", "1998:003"]
assert allclose_sec(t.value, [[1, 86400 * 1], [200, 86400 * 2]])
# Incompatible numeric value
t = Time(["2000:001", "2000:002"])
t[0] = "2001:001"
with pytest.raises(ValueError) as err:
t[0] = 100
assert "cannot convert value to a compatible Time object" in str(err.value)
def test_setitem_from_time_objects():
"""Set from existing Time object."""
# Set from time object with different scale
t = Time(["2000:001", "2000:002"], scale="utc")
t2 = Time(["2000:010"], scale="tai")
t[1] = t2[0]
assert t.value[1] == t2.utc.value[0]
# Time object with different scale and format
t = Time(["2000:001", "2000:002"], scale="utc")
t2.format = "jyear"
t[1] = t2[0]
assert t.yday[1] == t2.utc.yday[0]
def test_setitem_bad_item():
t = Time([1, 2], format="cxcsec")
with pytest.raises(IndexError):
t["asdf"] = 3
def test_setitem_deltas():
"""Setting invalidates any transform deltas"""
t = Time([1, 2], format="cxcsec")
t.delta_tdb_tt = [1, 2]
t.delta_ut1_utc = [3, 4]
t[1] = 3
assert not hasattr(t, "_delta_tdb_tt")
assert not hasattr(t, "_delta_ut1_utc")
def test_subclass():
"""Check that we can initialize subclasses with a Time instance."""
# Ref: Issue gh-#7449 and PR gh-#7453.
class _Time(Time):
pass
t1 = Time("1999-01-01T01:01:01")
t2 = _Time(t1)
assert t2.__class__ == _Time
assert t1 == t2
def test_strftime_scalar():
"""Test of Time.strftime"""
time_string = "2010-09-03 06:00:00"
t = Time(time_string)
for format in t.FORMATS:
t.format = format
assert t.strftime("%Y-%m-%d %H:%M:%S") == time_string
def test_strftime_array():
tstrings = ["2010-09-03 00:00:00", "2005-09-03 06:00:00", "1995-12-31 23:59:60"]
t = Time(tstrings)
for format in t.FORMATS:
t.format = format
assert t.strftime("%Y-%m-%d %H:%M:%S").tolist() == tstrings
def test_strftime_array_2():
tstrings = [
["1998-01-01 00:00:01", "1998-01-01 00:00:02"],
["1998-01-01 00:00:03", "1995-12-31 23:59:60"],
]
tstrings = np.array(tstrings)
t = Time(tstrings)
for format in t.FORMATS:
t.format = format
assert np.all(t.strftime("%Y-%m-%d %H:%M:%S") == tstrings)
assert t.strftime("%Y-%m-%d %H:%M:%S").shape == tstrings.shape
def test_strftime_leapsecond():
time_string = "1995-12-31 23:59:60"
t = Time(time_string)
for format in t.FORMATS:
t.format = format
assert t.strftime("%Y-%m-%d %H:%M:%S") == time_string
def test_strptime_scalar():
"""Test of Time.strptime"""
time_string = "2007-May-04 21:08:12"
time_object = Time("2007-05-04 21:08:12")
t = Time.strptime(time_string, "%Y-%b-%d %H:%M:%S")
assert t == time_object
def test_strptime_array():
"""Test of Time.strptime"""
tstrings = [
["1998-Jan-01 00:00:01", "1998-Jan-01 00:00:02"],
["1998-Jan-01 00:00:03", "1998-Jan-01 00:00:04"],
]
tstrings = np.array(tstrings)
time_object = Time(
[
["1998-01-01 00:00:01", "1998-01-01 00:00:02"],
["1998-01-01 00:00:03", "1998-01-01 00:00:04"],
]
)
t = Time.strptime(tstrings, "%Y-%b-%d %H:%M:%S")
assert np.all(t == time_object)
assert t.shape == tstrings.shape
def test_strptime_badinput():
tstrings = [1, 2, 3]
with pytest.raises(TypeError):
Time.strptime(tstrings, "%S")
def test_strptime_input_bytes_scalar():
time_string = b"2007-May-04 21:08:12"
time_object = Time("2007-05-04 21:08:12")
t = Time.strptime(time_string, "%Y-%b-%d %H:%M:%S")
assert t == time_object
def test_strptime_input_bytes_array():
tstrings = [
[b"1998-Jan-01 00:00:01", b"1998-Jan-01 00:00:02"],
[b"1998-Jan-01 00:00:03", b"1998-Jan-01 00:00:04"],
]
tstrings = np.array(tstrings)
time_object = Time(
[
["1998-01-01 00:00:01", "1998-01-01 00:00:02"],
["1998-01-01 00:00:03", "1998-01-01 00:00:04"],
]
)
t = Time.strptime(tstrings, "%Y-%b-%d %H:%M:%S")
assert np.all(t == time_object)
assert t.shape == tstrings.shape
def test_strptime_leapsecond():
time_obj1 = Time("1995-12-31T23:59:60", format="isot")
time_obj2 = Time.strptime("1995-Dec-31 23:59:60", "%Y-%b-%d %H:%M:%S")
assert time_obj1 == time_obj2
def test_strptime_3_digit_year():
time_obj1 = Time("0995-12-31T00:00:00", format="isot", scale="tai")
time_obj2 = Time.strptime("0995-Dec-31 00:00:00", "%Y-%b-%d %H:%M:%S", scale="tai")
assert time_obj1 == time_obj2
def test_strptime_fracsec_scalar():
time_string = "2007-May-04 21:08:12.123"
time_object = Time("2007-05-04 21:08:12.123")
t = Time.strptime(time_string, "%Y-%b-%d %H:%M:%S.%f")
assert t == time_object
def test_strptime_fracsec_array():
"""Test of Time.strptime"""
tstrings = [
["1998-Jan-01 00:00:01.123", "1998-Jan-01 00:00:02.000001"],
["1998-Jan-01 00:00:03.000900", "1998-Jan-01 00:00:04.123456"],
]
tstrings = np.array(tstrings)
time_object = Time(
[
["1998-01-01 00:00:01.123", "1998-01-01 00:00:02.000001"],
["1998-01-01 00:00:03.000900", "1998-01-01 00:00:04.123456"],
]
)
t = Time.strptime(tstrings, "%Y-%b-%d %H:%M:%S.%f")
assert np.all(t == time_object)
assert t.shape == tstrings.shape
def test_strftime_scalar_fracsec():
"""Test of Time.strftime"""
time_string = "2010-09-03 06:00:00.123"
t = Time(time_string)
for format in t.FORMATS:
t.format = format
assert t.strftime("%Y-%m-%d %H:%M:%S.%f") == time_string
def test_strftime_scalar_fracsec_precision():
time_string = "2010-09-03 06:00:00.123123123"
t = Time(time_string)
assert t.strftime("%Y-%m-%d %H:%M:%S.%f") == "2010-09-03 06:00:00.123"
t.precision = 9
assert t.strftime("%Y-%m-%d %H:%M:%S.%f") == "2010-09-03 06:00:00.123123123"
def test_strftime_array_fracsec():
tstrings = [
"2010-09-03 00:00:00.123000",
"2005-09-03 06:00:00.000001",
"1995-12-31 23:59:60.000900",
]
t = Time(tstrings)
t.precision = 6
for format in t.FORMATS:
t.format = format
assert t.strftime("%Y-%m-%d %H:%M:%S.%f").tolist() == tstrings
def test_insert_time():
tm = Time([1, 2], format="unix")
# Insert a scalar using an auto-parsed string
tm2 = tm.insert(1, "1970-01-01 00:01:00")
assert np.all(tm2 == Time([1, 60, 2], format="unix"))
# Insert scalar using a Time value
tm2 = tm.insert(1, Time("1970-01-01 00:01:00"))
assert np.all(tm2 == Time([1, 60, 2], format="unix"))
# Insert length=1 array with a Time value
tm2 = tm.insert(1, [Time("1970-01-01 00:01:00")])
assert np.all(tm2 == Time([1, 60, 2], format="unix"))
# Insert length=2 list with float values matching unix format.
# Also actually provide axis=0 unlike all other tests.
tm2 = tm.insert(1, [10, 20], axis=0)
assert np.all(tm2 == Time([1, 10, 20, 2], format="unix"))
# Insert length=2 np.array with float values matching unix format
tm2 = tm.insert(1, np.array([10, 20]))
assert np.all(tm2 == Time([1, 10, 20, 2], format="unix"))
# Insert length=2 np.array with float values at the end
tm2 = tm.insert(2, np.array([10, 20]))
assert np.all(tm2 == Time([1, 2, 10, 20], format="unix"))
# Insert length=2 np.array with float values at the beginning
# with a negative index
tm2 = tm.insert(-2, np.array([10, 20]))
assert np.all(tm2 == Time([10, 20, 1, 2], format="unix"))
def test_insert_time_out_subfmt():
# Check insert() with out_subfmt set
T = Time(["1999-01-01", "1999-01-02"], out_subfmt="date")
T = T.insert(0, T[0])
assert T.out_subfmt == "date"
assert T[0] == T[1]
T = T.insert(1, "1999-01-03")
assert T.out_subfmt == "date"
assert str(T[1]) == "1999-01-03"
def test_insert_exceptions():
tm = Time(1, format="unix")
with pytest.raises(TypeError) as err:
tm.insert(0, 50)
assert "cannot insert into scalar" in str(err.value)
tm = Time([1, 2], format="unix")
with pytest.raises(ValueError) as err:
tm.insert(0, 50, axis=1)
assert "axis must be 0" in str(err.value)
with pytest.raises(TypeError) as err:
tm.insert(slice(None), 50)
assert "obj arg must be an integer" in str(err.value)
with pytest.raises(IndexError) as err:
tm.insert(-100, 50)
assert "index -100 is out of bounds for axis 0 with size 2" in str(err.value)
def test_datetime64_no_format():
dt64 = np.datetime64("2000-01-02T03:04:05.123456789")
t = Time(dt64, scale="utc", precision=9)
assert t.iso == "2000-01-02 03:04:05.123456789"
assert t.datetime64 == dt64
assert t.value == dt64
def test_hash_time():
loc1 = EarthLocation(1 * u.m, 2 * u.m, 3 * u.m)
for loc in None, loc1:
t = Time([1, 1, 2, 3], format="cxcsec", location=loc)
t[3] = np.ma.masked
h1 = hash(t[0])
h2 = hash(t[1])
h3 = hash(t[2])
assert h1 == h2
assert h1 != h3
with pytest.raises(TypeError) as exc:
hash(t)
assert exc.value.args[0] == "unhashable type: 'Time' (must be scalar)"
with pytest.raises(TypeError) as exc:
hash(t[3])
assert exc.value.args[0] == "unhashable type: 'Time' (value is masked)"
t = Time(1, format="cxcsec", location=loc)
t2 = Time(1, format="cxcsec")
assert hash(t) != hash(t2)
t = Time("2000:180", scale="utc")
t2 = Time(t, scale="tai")
assert t == t2
assert hash(t) != hash(t2)
def test_hash_time_delta():
t = TimeDelta([1, 1, 2, 3], format="sec")
t[3] = np.ma.masked
h1 = hash(t[0])
h2 = hash(t[1])
h3 = hash(t[2])
assert h1 == h2
assert h1 != h3
with pytest.raises(TypeError) as exc:
hash(t)
assert exc.value.args[0] == "unhashable type: 'TimeDelta' (must be scalar)"
with pytest.raises(TypeError) as exc:
hash(t[3])
assert exc.value.args[0] == "unhashable type: 'TimeDelta' (value is masked)"
def test_get_time_fmt_exception_messages():
with pytest.raises(ValueError) as err:
Time(10)
assert "No time format was given, and the input is" in str(err.value)
with pytest.raises(ValueError) as err:
Time("2000:001", format="not-a-format")
assert "Format 'not-a-format' is not one of the allowed" in str(err.value)
with pytest.raises(ValueError) as err:
Time("200")
assert "Input values did not match any of the formats where" in str(err.value)
with pytest.raises(ValueError) as err:
Time("200", format="iso")
assert (
"Input values did not match the format class iso:"
+ os.linesep
+ "ValueError: Time 200 does not match iso format"
) == str(err.value)
with pytest.raises(ValueError) as err:
Time(200, format="iso")
assert (
"Input values did not match the format class iso:"
+ os.linesep
+ "TypeError: Input values for iso class must be strings"
) == str(err.value)
def test_ymdhms_defaults():
t1 = Time({"year": 2001}, format="ymdhms")
assert t1 == Time("2001-01-01")
times_dict_ns = {
"year": [2001, 2002],
"month": [2, 3],
"day": [4, 5],
"hour": [6, 7],
"minute": [8, 9],
"second": [10, 11],
}
table_ns = Table(times_dict_ns)
struct_array_ns = table_ns.as_array()
rec_array_ns = struct_array_ns.view(np.recarray)
ymdhms_names = ("year", "month", "day", "hour", "minute", "second")
@pytest.mark.parametrize("tm_input", [table_ns, struct_array_ns, rec_array_ns])
@pytest.mark.parametrize("kwargs", [{}, {"format": "ymdhms"}])
@pytest.mark.parametrize("as_row", [False, True])
def test_ymdhms_init_from_table_like(tm_input, kwargs, as_row):
time_ns = Time(["2001-02-04 06:08:10", "2002-03-05 07:09:11"])
if as_row:
tm_input = tm_input[0]
time_ns = time_ns[0]
tm = Time(tm_input, **kwargs)
assert np.all(tm == time_ns)
assert tm.value.dtype.names == ymdhms_names
def test_ymdhms_init_from_dict_array():
times_dict_shape = {"year": [[2001, 2002], [2003, 2004]], "month": [2, 3], "day": 4}
time_shape = Time([["2001-02-04", "2002-03-04"], ["2003-02-04", "2004-03-04"]])
time = Time(times_dict_shape, format="ymdhms")
assert np.all(time == time_shape)
assert time.ymdhms.shape == time_shape.shape
@pytest.mark.parametrize("kwargs", [{}, {"format": "ymdhms"}])
def test_ymdhms_init_from_dict_scalar(kwargs):
"""
Test YMDHMS functionality for a dict input. This includes ensuring that
key and attribute access work. For extra fun use a time within a leap
second.
"""
time_dict = {
"year": 2016,
"month": 12,
"day": 31,
"hour": 23,
"minute": 59,
"second": 60.123456789,
}
tm = Time(time_dict, **kwargs)
assert tm == Time("2016-12-31T23:59:60.123456789")
for attr in time_dict:
for value in (tm.value[attr], getattr(tm.value, attr)):
if attr == "second":
assert allclose_sec(time_dict[attr], value)
else:
assert time_dict[attr] == value
# Now test initializing from a YMDHMS format time using the object
tm_rt = Time(tm)
assert tm_rt == tm
assert tm_rt.format == "ymdhms"
# Test initializing from a YMDHMS value (np.void, i.e. recarray row)
# without specified format.
tm_rt = Time(tm.ymdhms)
assert tm_rt == tm
assert tm_rt.format == "ymdhms"
def test_ymdhms_exceptions():
with pytest.raises(ValueError, match="input must be dict or table-like"):
Time(10, format="ymdhms")
match = "'wrong' not allowed as YMDHMS key name(s)"
# NB: for reasons unknown, using match=match in pytest.raises() fails, so we
# fall back to old school ``match in str(err.value)``.
with pytest.raises(ValueError) as err:
Time({"year": 2019, "wrong": 1}, format="ymdhms")
assert match in str(err.value)
match = "for 2 input key names you must supply 'year', 'month'"
with pytest.raises(ValueError, match=match):
Time({"year": 2019, "minute": 1}, format="ymdhms")
def test_ymdhms_masked():
tm = Time({"year": [2000, 2001]}, format="ymdhms")
tm[0] = np.ma.masked
assert isinstance(tm.value[0], np.ma.core.mvoid)
for name in ymdhms_names:
assert tm.value[0][name] is np.ma.masked
# Converted from doctest in astropy/test/formats.py for debugging
def test_ymdhms_output():
t = Time(
{
"year": 2015,
"month": 2,
"day": 3,
"hour": 12,
"minute": 13,
"second": 14.567,
},
scale="utc",
)
# NOTE: actually comes back as np.void for some reason
# NOTE: not necessarily a python int; might be an int32
assert t.ymdhms.year == 2015
@pytest.mark.parametrize("fmt", TIME_FORMATS)
def test_write_every_format_to_ecsv(fmt):
"""Test special-case serialization of certain Time formats"""
t = Table()
# Use a time that tests the default serialization of the time format
tm = Time("2020-01-01") + [[1, 1 / 7], [3, 4.5]] * u.s
tm.format = fmt
t["a"] = tm
out = StringIO()
t.write(out, format="ascii.ecsv")
t2 = Table.read(out.getvalue(), format="ascii.ecsv")
assert t["a"].format == t2["a"].format
# Some loss of precision in the serialization
assert not np.all(t["a"] == t2["a"])
# But no loss in the format representation
assert np.all(t["a"].value == t2["a"].value)
@pytest.mark.parametrize("fmt", TIME_FORMATS)
def test_write_every_format_to_fits(fmt, tmp_path):
"""Test special-case serialization of certain Time formats"""
t = Table()
# Use a time that tests the default serialization of the time format
tm = Time("2020-01-01") + [[1, 1 / 7], [3, 4.5]] * u.s
tm.format = fmt
t["a"] = tm
out = tmp_path / "out.fits"
t.write(out, format="fits")
t2 = Table.read(out, format="fits", astropy_native=True)
# Currently the format is lost in FITS so set it back
t2["a"].format = fmt
# No loss of precision in the serialization or representation
assert np.all(t["a"] == t2["a"])
assert np.all(t["a"].value == t2["a"].value)
@pytest.mark.skipif(not HAS_H5PY, reason="Needs h5py")
@pytest.mark.parametrize("fmt", TIME_FORMATS)
def test_write_every_format_to_hdf5(fmt, tmp_path):
"""Test special-case serialization of certain Time formats"""
t = Table()
# Use a time that tests the default serialization of the time format
tm = Time("2020-01-01") + [[1, 1 / 7], [3, 4.5]] * u.s
tm.format = fmt
t["a"] = tm
out = tmp_path / "out.h5"
t.write(str(out), format="hdf5", path="root", serialize_meta=True)
t2 = Table.read(str(out), format="hdf5", path="root")
assert t["a"].format == t2["a"].format
# No loss of precision in the serialization or representation
assert np.all(t["a"] == t2["a"])
assert np.all(t["a"].value == t2["a"].value)
# There are two stages of validation now - one on input into a format, so that
# the format conversion code has tidy matched arrays to work with, and the
# other when object construction does not go through a format object. Or at
# least, the format object is constructed with "from_jd=True". In this case the
# normal input validation does not happen but the new input validation does,
# and can ensure that strange broadcasting anomalies can't happen.
# This form of construction uses from_jd=True.
def test_broadcasting_writeable():
t = Time("J2015") + np.linspace(-1, 1, 10) * u.day
t[2] = Time(58000, format="mjd")
def test_format_subformat_compatibility():
"""Test that changing format with out_subfmt defined is not a problem.
See #9812, #9810."""
t = Time("2019-12-20", out_subfmt="date_??")
assert t.mjd == 58837.0
assert t.yday == "2019:354:00:00" # Preserves out_subfmt
t2 = t.replicate(format="mjd")
assert t2.out_subfmt == "*" # Changes to default
t2 = t.copy(format="mjd")
assert t2.out_subfmt == "*"
t2 = Time(t, format="mjd")
assert t2.out_subfmt == "*"
t2 = t.copy(format="yday")
assert t2.out_subfmt == "date_??"
assert t2.value == "2019:354:00:00"
t.format = "yday"
assert t.value == "2019:354:00:00"
assert t.out_subfmt == "date_??"
t = Time("2019-12-20", out_subfmt="date")
assert t.mjd == 58837.0
assert t.yday == "2019:354"
@pytest.mark.parametrize("use_fast_parser", ["force", "False"])
def test_format_fractional_string_parsing(use_fast_parser):
"""Test that string like "2022-08-01.123" does not parse as ISO.
See #6476 and the fix."""
with pytest.raises(
ValueError, match=r"Input values did not match the format class iso"
):
with conf.set_temp("use_fast_parser", use_fast_parser):
Time("2022-08-01.123", format="iso")
@pytest.mark.parametrize("fmt_name,fmt_class", TIME_FORMATS.items())
def test_to_value_with_subfmt_for_every_format(fmt_name, fmt_class):
"""From a starting Time value, test that every valid combination of
to_value(format, subfmt) works. See #9812, #9361.
"""
t = Time("2000-01-01")
subfmts = list(subfmt[0] for subfmt in fmt_class.subfmts) + [None, "*"]
for subfmt in subfmts:
t.to_value(fmt_name, subfmt)
@pytest.mark.parametrize("location", [None, (45, 45)])
def test_location_init(location):
"""Test fix in #9969 for issue #9962 where the location attribute is
lost when initializing Time from an existing Time instance of list of
Time instances.
"""
tm = Time("J2010", location=location)
# Init from a scalar Time
tm2 = Time(tm)
assert np.all(tm.location == tm2.location)
assert type(tm.location) is type(tm2.location)
# From a list of Times
tm2 = Time([tm, tm])
if location is None:
assert tm2.location is None
else:
for loc in tm2.location:
assert loc == tm.location
assert type(tm.location) is type(tm2.location)
# Effectively the same as a list of Times, but just to be sure that
# Table mixin initialization is working as expected.
tm2 = Table([[tm, tm]])["col0"]
if location is None:
assert tm2.location is None
else:
for loc in tm2.location:
assert loc == tm.location
assert type(tm.location) is type(tm2.location)
def test_location_init_fail():
"""Test fix in #9969 for issue #9962 where the location attribute is
lost when initializing Time from an existing Time instance of list of
Time instances. Make sure exception is correct.
"""
tm = Time("J2010", location=(45, 45))
tm2 = Time("J2010")
with pytest.raises(
ValueError, match="cannot concatenate times unless all locations"
):
Time([tm, tm2])
def test_linspace():
"""Test `np.linspace` `__array_func__` implementation for scalar and arrays."""
t1 = Time(["2021-01-01 00:00:00", "2021-01-02 00:00:00"])
t2 = Time(["2021-01-01 01:00:00", "2021-12-28 00:00:00"])
atol = 2 * np.finfo(float).eps * abs(t1 - t2).max()
ts = np.linspace(t1[0], t2[0], 3)
assert ts[0].isclose(Time("2021-01-01 00:00:00"), atol=atol)
assert ts[1].isclose(Time("2021-01-01 00:30:00"), atol=atol)
assert ts[2].isclose(Time("2021-01-01 01:00:00"), atol=atol)
ts = np.linspace(t1, t2[0], 2, endpoint=False)
assert ts.shape == (2, 2)
assert all(
ts[0].isclose(Time(["2021-01-01 00:00:00", "2021-01-02 00:00:00"]), atol=atol)
)
assert all(
ts[1].isclose(Time(["2021-01-01 00:30:00", "2021-01-01 12:30:00"]), atol=atol)
)
ts = np.linspace(t1, t2, 7)
assert ts.shape == (7, 2)
assert all(
ts[0].isclose(Time(["2021-01-01 00:00:00", "2021-01-02 00:00:00"]), atol=atol)
)
assert all(
ts[1].isclose(Time(["2021-01-01 00:10:00", "2021-03-03 00:00:00"]), atol=atol)
)
assert all(
ts[5].isclose(Time(["2021-01-01 00:50:00", "2021-10-29 00:00:00"]), atol=atol)
)
assert all(
ts[6].isclose(Time(["2021-01-01 01:00:00", "2021-12-28 00:00:00"]), atol=atol)
)
def test_linspace_steps():
"""Test `np.linspace` `retstep` option."""
t1 = Time(["2021-01-01 00:00:00", "2021-01-01 12:00:00"])
t2 = Time("2021-01-02 00:00:00")
atol = 2 * np.finfo(float).eps * abs(t1 - t2).max()
ts, st = np.linspace(t1, t2, 7, retstep=True)
assert ts.shape == (7, 2)
assert st.shape == (2,)
assert all(ts[1].isclose(ts[0] + st, atol=atol))
assert all(ts[6].isclose(ts[0] + 6 * st, atol=atol))
assert all(st.isclose(TimeDelta([14400, 7200], format="sec"), atol=atol))
def test_linspace_fmts():
"""Test `np.linspace` `__array_func__` implementation for start/endpoints
from different formats/systems.
"""
t1 = Time(["2020-01-01 00:00:00", "2020-01-02 00:00:00"])
t2 = Time(2458850, format="jd")
t3 = Time(1578009600, format="unix")
atol = 2 * np.finfo(float).eps * abs(t1 - Time([t2, t3])).max()
ts = np.linspace(t1, t2, 3)
assert ts.shape == (3, 2)
assert all(
ts[0].isclose(Time(["2020-01-01 00:00:00", "2020-01-02 00:00:00"]), atol=atol)
)
assert all(
ts[1].isclose(Time(["2020-01-01 06:00:00", "2020-01-01 18:00:00"]), atol=atol)
)
assert all(
ts[2].isclose(Time(["2020-01-01 12:00:00", "2020-01-01 12:00:00"]), atol=atol)
)
ts = np.linspace(t1, Time([t2, t3]), 3)
assert ts.shape == (3, 2)
assert all(
ts[0].isclose(Time(["2020-01-01 00:00:00", "2020-01-02 00:00:00"]), atol=atol)
)
assert all(
ts[1].isclose(Time(["2020-01-01 06:00:00", "2020-01-02 12:00:00"]), atol=atol)
)
assert all(
ts[2].isclose(Time(["2020-01-01 12:00:00", "2020-01-03 00:00:00"]), atol=atol)
)
def test_to_string():
dims = [8, 2, 8]
dx = np.arange(np.prod(dims)).reshape(dims)
tm = Time("2020-01-01", out_subfmt="date") + dx * u.day
exp_lines = [
"[[['2020-01-01' '2020-01-02' ... '2020-01-07' '2020-01-08']",
" ['2020-01-09' '2020-01-10' ... '2020-01-15' '2020-01-16']]",
"",
" [['2020-01-17' '2020-01-18' ... '2020-01-23' '2020-01-24']",
" ['2020-01-25' '2020-01-26' ... '2020-01-31' '2020-02-01']]",
"",
" ...",
"",
" [['2020-04-06' '2020-04-07' ... '2020-04-12' '2020-04-13']",
" ['2020-04-14' '2020-04-15' ... '2020-04-20' '2020-04-21']]",
"",
" [['2020-04-22' '2020-04-23' ... '2020-04-28' '2020-04-29']",
" ['2020-04-30' '2020-05-01' ... '2020-05-06' '2020-05-07']]]",
]
exp_str = "\n".join(exp_lines)
with np.printoptions(threshold=100, edgeitems=2, linewidth=75):
out_str = str(tm)
out_repr = repr(tm)
assert out_str == exp_str
exp_repr = f"<Time object: scale='utc' format='iso' value={exp_str}>"
assert out_repr == exp_repr
|
081151758ef7a9c6bb036c1f5f3f82250c6b6ee16694b0c05c0784312e5d738c | import contextlib
import decimal
import functools
import warnings
from datetime import datetime, timedelta
from decimal import Decimal
import erfa
import numpy as np
import pytest
from erfa import ErfaError, ErfaWarning
from hypothesis import assume, example, given, target
from hypothesis.extra.numpy import array_shapes, arrays
from hypothesis.strategies import (
composite,
datetimes,
floats,
integers,
one_of,
sampled_from,
timedeltas,
tuples,
)
import astropy.units as u
from astropy.tests.helper import assert_quantity_allclose
from astropy.time import STANDARD_TIME_SCALES, Time, TimeDelta
from astropy.time.utils import day_frac, two_sum
from astropy.utils import iers
allclose_jd = functools.partial(np.allclose, rtol=np.finfo(float).eps, atol=0)
allclose_jd2 = functools.partial(
np.allclose, rtol=np.finfo(float).eps, atol=np.finfo(float).eps
) # 20 ps atol
allclose_sec = functools.partial(
np.allclose, rtol=np.finfo(float).eps, atol=np.finfo(float).eps * 24 * 3600
)
tiny = np.finfo(float).eps
dt_tiny = TimeDelta(tiny, format="jd")
def setup_module():
# Pre-load leap seconds table to avoid flakiness in hypothesis runs.
# See https://github.com/astropy/astropy/issues/11030
Time("2020-01-01").ut1
@pytest.fixture(scope="module")
def iers_b():
"""This is an expensive operation, so we share it between tests using a
module-scoped fixture instead of using the context manager form. This
is particularly important for Hypothesis, which invokes the decorated
test function many times (100 by default; see conftest.py for details).
"""
with iers.earth_orientation_table.set(iers.IERS_B.open(iers.IERS_B_FILE)):
yield "<using IERS-B orientation table>"
@contextlib.contextmanager
def quiet_erfa():
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=ErfaWarning)
yield
def assert_almost_equal(a, b, *, rtol=None, atol=None, label=""):
"""Assert numbers are almost equal.
This version also lets hypothesis know how far apart the inputs are, so
that it can work towards a failure and present the worst failure ever seen
as well as the simplest, which often just barely exceeds the threshold.
"""
__tracebackhide__ = True
if rtol is None or rtol == 0:
thresh = atol
elif atol is None:
thresh = rtol * (abs(a) + abs(b)) / 2
else:
thresh = atol + rtol * (abs(a) + abs(b)) / 2
amb = a - b
if isinstance(amb, TimeDelta):
ambv = amb.to_value(u.s)
target(ambv, label=label + " (a-b).to_value(u.s), from TimeDelta")
target(-ambv, label=label + " (b-a).to_value(u.s), from TimeDelta")
if isinstance(thresh, u.Quantity):
amb = amb.to(thresh.unit)
else:
try:
target_value = float(amb)
except TypeError:
pass
else:
target(target_value, label=label + " float(a-b)")
target(-target_value, label=label + " float(b-a)")
assert abs(amb) < thresh
# Days that end with leap seconds
# Some time scales use a so-called "leap smear" to cope with these, others
# have times they can't represent or can represent two different ways.
# In any case these days are liable to cause trouble in time conversions.
# Note that from_erfa includes some weird non-integer steps before 1970.
leap_second_table = iers.LeapSeconds.from_iers_leap_seconds()
# Days that contain leap_seconds
leap_second_days = leap_second_table["mjd"] - 1
leap_second_deltas = list(
zip(leap_second_days[1:], np.diff(leap_second_table["tai_utc"]))
)
today = Time.now()
mjd0 = Time(0, format="mjd")
def reasonable_ordinary_jd():
return tuples(floats(2440000, 2470000), floats(-0.5, 0.5))
@composite
def leap_second_tricky(draw):
mjd = draw(
one_of(
sampled_from(leap_second_days),
sampled_from(leap_second_days + 1),
sampled_from(leap_second_days - 1),
)
)
return mjd + mjd0.jd1 + mjd0.jd2, draw(floats(0, 1))
def reasonable_jd():
"""Pick a reasonable JD.
These should be not too far in the past or future (so that date conversion
routines don't have to deal with anything too exotic), but they should
include leap second days as a special case, and they should include several
particularly simple cases (today, the beginning of the MJD scale, a
reasonable date) so that hypothesis' example simplification produces
obviously simple examples when they trigger problems.
"""
moments = [(2455000.0, 0.0), (mjd0.jd1, mjd0.jd2), (today.jd1, today.jd2)]
return one_of(sampled_from(moments), reasonable_ordinary_jd(), leap_second_tricky())
def unreasonable_ordinary_jd():
"""JD pair that might be unordered or far away"""
return tuples(floats(-1e7, 1e7), floats(-1e7, 1e7))
def ordered_jd():
"""JD pair that is ordered but not necessarily near now"""
return tuples(floats(-1e7, 1e7), floats(-0.5, 0.5))
def unreasonable_jd():
return one_of(reasonable_jd(), ordered_jd(), unreasonable_ordinary_jd())
@composite
def jd_arrays(draw, jd_values):
s = draw(array_shapes())
d = np.dtype([("jd1", float), ("jd2", float)])
jdv = jd_values.map(lambda x: np.array(x, dtype=d))
a = draw(arrays(d, s, elements=jdv))
return a["jd1"], a["jd2"]
def unreasonable_delta():
return tuples(floats(-1e7, 1e7), floats(-1e7, 1e7))
def reasonable_delta():
return tuples(floats(-1e4, 1e4), floats(-0.5, 0.5))
# redundant?
def test_abs_jd2_always_less_than_half():
"""Make jd2 approach +/-0.5, and check that it doesn't go over."""
t1 = Time(2400000.5, [-tiny, +tiny], format="jd")
assert np.all(t1.jd1 % 1 == 0)
assert np.all(abs(t1.jd2) < 0.5)
t2 = Time(
2400000.0, [[0.5 - tiny, 0.5 + tiny], [-0.5 - tiny, -0.5 + tiny]], format="jd"
)
assert np.all(t2.jd1 % 1 == 0)
assert np.all(abs(t2.jd2) < 0.5)
@given(jd_arrays(unreasonable_jd()))
def test_abs_jd2_always_less_than_half_on_construction(jds):
jd1, jd2 = jds
t = Time(jd1, jd2, format="jd")
target(np.amax(np.abs(t.jd2)))
assert np.all(t.jd1 % 1 == 0)
assert np.all(abs(t.jd2) <= 0.5)
assert np.all((abs(t.jd2) < 0.5) | (t.jd1 % 2 == 0))
@given(integers(-(10**8), 10**8), sampled_from([-0.5, 0.5]))
def test_round_to_even(jd1, jd2):
t = Time(jd1, jd2, format="jd")
assert (abs(t.jd2) == 0.5) and (t.jd1 % 2 == 0)
def test_addition():
"""Check that an addition at the limit of precision (2^-52) is seen"""
t = Time(2455555.0, 0.5, format="jd", scale="utc")
t_dt = t + dt_tiny
assert t_dt.jd1 == t.jd1 and t_dt.jd2 != t.jd2
# Check that the addition is exactly reversed by the corresponding
# subtraction
t2 = t_dt - dt_tiny
assert t2.jd1 == t.jd1 and t2.jd2 == t.jd2
def test_mult_div():
"""Test precision with multiply and divide"""
dt_small = 6 * dt_tiny
# pick a number that will leave remainder if divided by 6.
dt_big = TimeDelta(20000.0, format="jd")
dt_big_small_by_6 = (dt_big + dt_small) / 6.0
dt_frac = dt_big_small_by_6 - TimeDelta(3333.0, format="jd")
assert allclose_jd2(dt_frac.jd2, 0.33333333333333354)
def test_init_variations():
"""Check that 3 ways of specifying a time + small offset are equivalent"""
dt_tiny_sec = dt_tiny.jd2 * 86400.0
t1 = Time(1e11, format="cxcsec") + dt_tiny
t2 = Time(1e11, dt_tiny_sec, format="cxcsec")
t3 = Time(dt_tiny_sec, 1e11, format="cxcsec")
assert t1.jd1 == t2.jd1
assert t1.jd2 == t3.jd2
assert t1.jd1 == t2.jd1
assert t1.jd2 == t3.jd2
def test_precision_exceeds_64bit():
"""
Check that Time object really holds more precision than float64 by looking
at the (naively) summed 64-bit result and asserting equality at the
bit level.
"""
t1 = Time(1.23456789e11, format="cxcsec")
t2 = t1 + dt_tiny
assert t1.jd == t2.jd
def test_through_scale_change():
"""Check that precision holds through scale change (cxcsec is TT)"""
t0 = Time(1.0, format="cxcsec")
t1 = Time(1.23456789e11, format="cxcsec")
dt_tt = t1 - t0
dt_tai = t1.tai - t0.tai
assert allclose_jd(dt_tt.jd1, dt_tai.jd1)
assert allclose_jd2(dt_tt.jd2, dt_tai.jd2)
def test_iso_init():
"""Check when initializing from ISO date"""
t1 = Time("2000:001:00:00:00.00000001", scale="tai")
t2 = Time("3000:001:13:00:00.00000002", scale="tai")
dt = t2 - t1
assert allclose_jd2(dt.jd2, 13.0 / 24.0 + 1e-8 / 86400.0 - 1.0)
def test_jd1_is_mult_of_one():
"""
Check that jd1 is a multiple of 1.
"""
t1 = Time("2000:001:00:00:00.00000001", scale="tai")
assert np.round(t1.jd1) == t1.jd1
t1 = Time(1.23456789, 12345678.90123456, format="jd", scale="tai")
assert np.round(t1.jd1) == t1.jd1
def test_precision_neg():
"""
Check precision when jd1 is negative. This used to fail because ERFA
routines use a test like jd1 > jd2 to decide which component to update.
It was updated to abs(jd1) > abs(jd2) in erfa 1.6 (sofa 20190722).
"""
t1 = Time(-100000.123456, format="jd", scale="tt")
assert np.round(t1.jd1) == t1.jd1
t1_tai = t1.tai
assert np.round(t1_tai.jd1) == t1_tai.jd1
def test_precision_epoch():
"""
Check that input via epoch also has full precision, i.e., against
regression on https://github.com/astropy/astropy/pull/366
"""
t_utc = Time(range(1980, 2001), format="jyear", scale="utc")
t_tai = Time(range(1980, 2001), format="jyear", scale="tai")
dt = t_utc - t_tai
assert allclose_sec(dt.sec, np.round(dt.sec))
def test_leap_seconds_rounded_correctly():
"""Regression tests against #2083, where a leap second was rounded
incorrectly by the underlying ERFA routine."""
with iers.conf.set_temp("auto_download", False):
t = Time(
["2012-06-30 23:59:59.413", "2012-07-01 00:00:00.413"],
scale="ut1",
precision=3,
).utc
assert np.all(
t.iso == np.array(["2012-06-30 23:59:60.000", "2012-07-01 00:00:00.000"])
)
# with the bug, both yielded '2012-06-30 23:59:60.000'
@given(integers(-(2**52) + 2, 2**52 - 2), floats(-1, 1))
@example(i=65536, f=3.637978807091714e-12)
def test_two_sum(i, f):
with decimal.localcontext(decimal.Context(prec=40)):
a = Decimal(i) + Decimal(f)
s, r = two_sum(i, f)
b = Decimal(s) + Decimal(r)
assert_almost_equal(a, b, atol=Decimal(tiny), rtol=Decimal(0))
# The bounds are here since we want to be sure the sum does not go to infinity,
# which does not have to be completely symmetric; e.g., this used to fail:
# @example(f1=-3.089785075544792e307, f2=1.7976931348623157e308)
# See https://github.com/astropy/astropy/issues/12955#issuecomment-1186293703
@given(
floats(min_value=np.finfo(float).min / 2, max_value=np.finfo(float).max / 2),
floats(min_value=np.finfo(float).min / 2, max_value=np.finfo(float).max / 2),
)
def test_two_sum_symmetric(f1, f2):
np.testing.assert_equal(two_sum(f1, f2), two_sum(f2, f1))
@given(
floats(allow_nan=False, allow_infinity=False),
floats(allow_nan=False, allow_infinity=False),
)
@example(f1=8.988465674311579e307, f2=8.98846567431158e307)
@example(f1=8.988465674311579e307, f2=-8.98846567431158e307)
@example(f1=-8.988465674311579e307, f2=-8.98846567431158e307)
@example(f1=-7.303128360378417e307, f2=1.7976931348623157e308)
def test_two_sum_size(f1, f2):
r1, r2 = two_sum(f1, f2)
assert (
abs(r1) > abs(r2) / np.finfo(float).eps
or r1 == r2 == 0
or not np.isfinite(abs(f1) + abs(f2))
)
@given(integers(-(2**52) + 2, 2**52 - 2), floats(-1, 1))
@example(i=65536, f=3.637978807091714e-12)
def test_day_frac_harmless(i, f):
with decimal.localcontext(decimal.Context(prec=40)):
a = Decimal(i) + Decimal(f)
i_d, f_d = day_frac(i, f)
a_d = Decimal(i_d) + Decimal(f_d)
assert_almost_equal(a, a_d, atol=Decimal(tiny), rtol=Decimal(0))
@given(integers(-(2**52) + 2, 2**52 - 2), floats(-0.5, 0.5))
@example(i=65536, f=3.637978807091714e-12)
@example(i=1, f=0.49999999999999994)
def test_day_frac_exact(i, f):
assume(abs(f) < 0.5 or i % 2 == 0)
i_d, f_d = day_frac(i, f)
assert i == i_d
assert f == f_d
@given(integers(-(2**52) + 2, 2**52 - 2), floats(-1, 1))
@example(i=65536, f=3.637978807091714e-12)
def test_day_frac_idempotent(i, f):
i_d, f_d = day_frac(i, f)
assert (i_d, f_d) == day_frac(i_d, f_d)
@given(integers(-(2**52) + 2, 2**52 - int(erfa.DJM0) - 3), floats(-1, 1))
@example(i=65536, f=3.637978807091714e-12)
def test_mjd_initialization_precise(i, f):
t = Time(val=i, val2=f, format="mjd", scale="tai")
jd1, jd2 = day_frac(i + erfa.DJM0, f)
jd1_t, jd2_t = day_frac(t.jd1, t.jd2)
assert (abs((jd1 - jd1_t) + (jd2 - jd2_t)) * u.day).to(u.ns) < 1 * u.ns
@given(jd_arrays(unreasonable_jd()))
def test_day_frac_always_less_than_half(jds):
jd1, jd2 = jds
t_jd1, t_jd2 = day_frac(jd1, jd2)
assert np.all(t_jd1 % 1 == 0)
assert np.all(abs(t_jd2) <= 0.5)
assert np.all((abs(t_jd2) < 0.5) | (t_jd1 % 2 == 0))
@given(integers(-(10**8), 10**8), sampled_from([-0.5, 0.5]))
def test_day_frac_round_to_even(jd1, jd2):
t_jd1, t_jd2 = day_frac(jd1, jd2)
assert (abs(t_jd2) == 0.5) and (t_jd1 % 2 == 0)
@given(
scale=sampled_from([sc for sc in STANDARD_TIME_SCALES if sc != "utc"]),
jds=unreasonable_jd(),
)
@example(scale="tai", jds=(0.0, 0.0))
@example(scale="tai", jds=(0.0, -31738.500000000346))
def test_resolution_never_decreases(scale, jds):
jd1, jd2 = jds
t = Time(jd1, jd2, format="jd", scale=scale)
with quiet_erfa():
assert t != t + dt_tiny
@given(reasonable_jd())
@example(jds=(2442777.5, 0.9999999999999999))
def test_resolution_never_decreases_utc(jds):
"""UTC is very unhappy with unreasonable times,
Unlike for the other timescales, in which addition is done
directly, here the time is transformed to TAI before addition, and
then back to UTC. Hence, some rounding errors can occur and only
a change of 2*dt_tiny is guaranteed to give a different time.
"""
jd1, jd2 = jds
t = Time(jd1, jd2, format="jd", scale="utc")
with quiet_erfa():
assert t != t + 2 * dt_tiny
@given(
scale1=sampled_from(STANDARD_TIME_SCALES),
scale2=sampled_from(STANDARD_TIME_SCALES),
jds=unreasonable_jd(),
)
@example(scale1="tcg", scale2="ut1", jds=(2445149.5, 0.47187700984387526))
@example(scale1="tai", scale2="tcb", jds=(2441316.5, 0.0))
@example(scale1="tai", scale2="tcb", jds=(0.0, 0.0))
def test_conversion_preserves_jd1_jd2_invariant(iers_b, scale1, scale2, jds):
jd1, jd2 = jds
t = Time(jd1, jd2, scale=scale1, format="jd")
try:
with quiet_erfa():
t2 = getattr(t, scale2)
except iers.IERSRangeError: # UT1 conversion needs IERS data
assume(False)
except ErfaError:
assume(False)
assert t2.jd1 % 1 == 0
assert abs(t2.jd2) <= 0.5
assert abs(t2.jd2) < 0.5 or t2.jd1 % 2 == 0
@given(
scale1=sampled_from(STANDARD_TIME_SCALES),
scale2=sampled_from(STANDARD_TIME_SCALES),
jds=unreasonable_jd(),
)
@example(scale1="tai", scale2="utc", jds=(0.0, 0.0))
@example(scale1="utc", scale2="ut1", jds=(2441316.5, 0.9999999999999991))
@example(scale1="ut1", scale2="tai", jds=(2441498.5, 0.9999999999999999))
def test_conversion_never_loses_precision(iers_b, scale1, scale2, jds):
"""Check that time ordering remains if we convert to another scale.
Here, since scale differences can involve multiplication, we allow
for losing one ULP, i.e., we test that two times that differ by
two ULP will keep the same order if changed to another scale.
"""
jd1, jd2 = jds
t = Time(jd1, jd2, scale=scale1, format="jd")
# Near-zero UTC JDs degrade accuracy; not clear why,
# but also not so relevant, so ignoring.
if (scale1 == "utc" or scale2 == "utc") and abs(jd1 + jd2) < 1:
tiny = 100 * u.us
else:
tiny = 2 * dt_tiny
try:
with quiet_erfa():
t2 = t + tiny
t_scale2 = getattr(t, scale2)
t2_scale2 = getattr(t2, scale2)
assert t_scale2 < t2_scale2
except iers.IERSRangeError: # UT1 conversion needs IERS data
assume(scale1 != "ut1" or 2440000 < jd1 + jd2 < 2458000)
assume(scale2 != "ut1" or 2440000 < jd1 + jd2 < 2458000)
raise
except ErfaError:
# If the generated date is too early to compute a UTC julian date,
# and we're not converting between scales which are known to be safe,
# tell Hypothesis that this example is invalid and to try another.
# See https://docs.astropy.org/en/latest/time/index.html#time-scale
barycentric = {scale1, scale2}.issubset({"tcb", "tdb"})
geocentric = {scale1, scale2}.issubset({"tai", "tt", "tcg"})
assume(jd1 + jd2 >= -31738.5 or geocentric or barycentric)
raise
except AssertionError:
# Before 1972, TAI-UTC changed smoothly but not always very
# consistently; this can cause trouble on day boundaries for UTC to
# UT1; it is not clear whether this will ever be resolved (and is
# unlikely ever to matter).
# Furthermore, exactly at leap-second boundaries, it is possible to
# get the wrong leap-second correction due to rounding errors.
# The latter is xfail'd for now, but should be fixed; see gh-13517.
if "ut1" in (scale1, scale2):
if abs(t_scale2 - t2_scale2 - 1 * u.s) < 1 * u.ms:
pytest.xfail()
assume(t.jd > 2441317.5 or t.jd2 < 0.4999999)
raise
@given(sampled_from(leap_second_deltas), floats(0.1, 0.9))
def test_leap_stretch_mjd(d, f):
mjd, delta = d
t0 = Time(mjd, format="mjd", scale="utc")
th = Time(mjd + f, format="mjd", scale="utc")
t1 = Time(mjd + 1, format="mjd", scale="utc")
assert_quantity_allclose((t1 - t0).to(u.s), (1 * u.day + delta * u.s))
assert_quantity_allclose((th - t0).to(u.s), f * (1 * u.day + delta * u.s))
assert_quantity_allclose((t1 - th).to(u.s), (1 - f) * (1 * u.day + delta * u.s))
@given(
scale=sampled_from(STANDARD_TIME_SCALES),
jds=unreasonable_jd(),
delta=floats(-10000, 10000),
)
@example(scale="utc", jds=(0.0, 2.2204460492503136e-13), delta=6.661338147750941e-13)
@example(
scale="utc", jds=(2441682.5, 2.2204460492503136e-16), delta=7.327471962526035e-12
)
@example(scale="utc", jds=(0.0, 5.787592627370942e-13), delta=0.0)
@example(scale="utc", jds=(1.0, 0.25000000023283064), delta=-1.0)
@example(scale="utc", jds=(0.0, 0.0), delta=2 * 2.220446049250313e-16)
@example(scale="utc", jds=(2442778.5, 0.0), delta=-2.220446049250313e-16)
def test_jd_add_subtract_round_trip(scale, jds, delta):
jd1, jd2 = jds
minimum_for_change = np.finfo(float).eps
thresh = 2 * dt_tiny
if scale == "utc":
if jd1 + jd2 < 1 or jd1 + jd2 + delta < 1:
# Near-zero UTC JDs degrade accuracy; not clear why,
# but also not so relevant, so ignoring.
minimum_for_change = 1e-9
thresh = minimum_for_change * u.day
else:
# UTC goes via TAI, so one can loose an extra bit.
minimum_for_change *= 2
t = Time(jd1, jd2, scale=scale, format="jd")
try:
with quiet_erfa():
t2 = t + delta * u.day
if abs(delta) >= minimum_for_change:
assert t2 != t
t3 = t2 - delta * u.day
assert_almost_equal(t3, t, atol=thresh, rtol=0)
except ErfaError:
assume(scale != "utc" or 2440000 < jd1 + jd2 < 2460000)
raise
@given(
scale=sampled_from(TimeDelta.SCALES),
jds=reasonable_jd(),
delta=floats(-3 * tiny, 3 * tiny),
)
@example(scale="tai", jds=(0.0, 3.5762786865234384), delta=2.220446049250313e-16)
@example(scale="tai", jds=(2441316.5, 0.0), delta=6.938893903907228e-17)
@example(scale="tai", jds=(2441317.5, 0.0), delta=-6.938893903907228e-17)
@example(scale="tai", jds=(2440001.0, 0.49999999999999994), delta=5.551115123125783e-17)
def test_time_argminmaxsort(scale, jds, delta):
jd1, jd2 = jds
t = Time(jd1, jd2, scale=scale, format="jd") + TimeDelta(
[0, delta], scale=scale, format="jd"
)
imin = t.argmin()
imax = t.argmax()
isort = t.argsort()
# Be careful in constructing diff, for case that abs(jd2[1]-jd2[0]) ~ 1.
# and that is compensated by jd1[1]-jd1[0] (see example above).
diff, extra = two_sum(t.jd2[1], -t.jd2[0])
diff += t.jd1[1] - t.jd1[0]
diff += extra
if diff < 0: # item 1 smaller
assert delta < 0
assert imin == 1 and imax == 0 and np.all(isort == [1, 0])
elif diff == 0: # identical within precision
assert abs(delta) <= tiny
assert imin == 0 and imax == 0 and np.all(isort == [0, 1])
else:
assert delta > 0
assert imin == 0 and imax == 1 and np.all(isort == [0, 1])
@given(sampled_from(STANDARD_TIME_SCALES), unreasonable_jd(), unreasonable_jd())
@example(scale="utc", jds_a=(2455000.0, 0.0), jds_b=(2443144.5, 0.5000462962962965))
@example(
scale="utc",
jds_a=(2459003.0, 0.267502885949074),
jds_b=(2454657.001045462, 0.49895453779026877),
)
def test_timedelta_full_precision(scale, jds_a, jds_b):
jd1_a, jd2_a = jds_a
jd1_b, jd2_b = jds_b
assume(
scale != "utc"
or (2440000 < jd1_a + jd2_a < 2460000 and 2440000 < jd1_b + jd2_b < 2460000)
)
if scale == "utc":
# UTC subtraction implies a scale change, so possible rounding errors.
tiny = 2 * dt_tiny
else:
tiny = dt_tiny
t_a = Time(jd1_a, jd2_a, scale=scale, format="jd")
t_b = Time(jd1_b, jd2_b, scale=scale, format="jd")
dt = t_b - t_a
assert dt != (t_b + tiny) - t_a
with quiet_erfa():
assert_almost_equal(
t_b - dt / 2, t_a + dt / 2, atol=2 * dt_tiny, rtol=0, label="midpoint"
)
assert_almost_equal(
t_b + dt, t_a + 2 * dt, atol=2 * dt_tiny, rtol=0, label="up"
)
assert_almost_equal(
t_b - 2 * dt, t_a - dt, atol=2 * dt_tiny, rtol=0, label="down"
)
@given(
scale=sampled_from(STANDARD_TIME_SCALES),
jds_a=unreasonable_jd(),
jds_b=unreasonable_jd(),
x=integers(1, 100),
y=integers(1, 100),
)
def test_timedelta_full_precision_arithmetic(scale, jds_a, jds_b, x, y):
jd1_a, jd2_a = jds_a
jd1_b, jd2_b = jds_b
t_a = Time(jd1_a, jd2_a, scale=scale, format="jd")
t_b = Time(jd1_b, jd2_b, scale=scale, format="jd")
with quiet_erfa():
try:
dt = t_b - t_a
dt_x = x * dt / (x + y)
dt_y = y * dt / (x + y)
assert_almost_equal(dt_x + dt_y, dt, atol=(x + y) * dt_tiny, rtol=0)
except ErfaError:
assume(
scale != "utc"
or (
2440000 < jd1_a + jd2_a < 2460000
and 2440000 < jd1_b + jd2_b < 2460000
)
)
raise
@given(
scale1=sampled_from(STANDARD_TIME_SCALES),
scale2=sampled_from(STANDARD_TIME_SCALES),
jds_a=reasonable_jd(),
jds_b=reasonable_jd(),
)
def test_timedelta_conversion(scale1, scale2, jds_a, jds_b):
jd1_a, jd2_a = jds_a
jd1_b, jd2_b = jds_b
# not translation invariant so can't convert TimeDelta
assume("utc" not in [scale1, scale2])
# Conversions a problem but within UT1 it should work
assume(("ut1" not in [scale1, scale2]) or scale1 == scale2)
t_a = Time(jd1_a, jd2_a, scale=scale1, format="jd")
t_b = Time(jd1_b, jd2_b, scale=scale2, format="jd")
with quiet_erfa():
dt = t_b - t_a
t_a_2 = getattr(t_a, scale2)
t_b_2 = getattr(t_b, scale2)
dt_2 = getattr(dt, scale2)
assert_almost_equal(
t_b_2 - t_a_2, dt_2, atol=dt_tiny, rtol=0, label="converted"
)
# Implicit conversion
assert_almost_equal(
t_b_2 - t_a_2, dt, atol=dt_tiny, rtol=0, label="not converted"
)
# UTC disagrees when there are leap seconds
_utc_bad = [
(pytest.param(s, marks=pytest.mark.xfail) if s == "utc" else s)
for s in STANDARD_TIME_SCALES
]
@given(datetimes(), datetimes()) # datetimes have microsecond resolution
@example(dt1=datetime(1235, 1, 1, 0, 0), dt2=datetime(9950, 1, 1, 0, 0, 0, 890773))
@pytest.mark.parametrize("scale", _utc_bad)
def test_datetime_difference_agrees_with_timedelta(scale, dt1, dt2):
t1 = Time(dt1, scale=scale)
t2 = Time(dt2, scale=scale)
assert_almost_equal(
t2 - t1,
TimeDelta(dt2 - dt1, scale=None if scale == "utc" else scale),
atol=2 * u.us,
)
@given(
days=integers(-3000 * 365, 3000 * 365),
microseconds=integers(0, 24 * 60 * 60 * 1000000),
)
@pytest.mark.parametrize("scale", _utc_bad)
def test_datetime_to_timedelta(scale, days, microseconds):
td = timedelta(days=days, microseconds=microseconds)
assert TimeDelta(td, scale=scale) == TimeDelta(
days, microseconds / (86400 * 1e6), scale=scale, format="jd"
)
@given(
days=integers(-3000 * 365, 3000 * 365),
microseconds=integers(0, 24 * 60 * 60 * 1000000),
)
@pytest.mark.parametrize("scale", _utc_bad)
def test_datetime_timedelta_roundtrip(scale, days, microseconds):
td = timedelta(days=days, microseconds=microseconds)
assert td == TimeDelta(td, scale=scale).value
@given(days=integers(-3000 * 365, 3000 * 365), day_frac=floats(0, 1))
@example(days=262144, day_frac=2.314815006343452e-11)
@example(days=1048576, day_frac=1.157407503171726e-10)
@pytest.mark.parametrize("scale", _utc_bad)
def test_timedelta_datetime_roundtrip(scale, days, day_frac):
td = TimeDelta(days, day_frac, format="jd", scale=scale)
td.format = "datetime"
assert_almost_equal(td, TimeDelta(td.value, scale=scale), atol=2 * u.us)
@given(integers(-3000 * 365, 3000 * 365), floats(0, 1))
@example(days=262144, day_frac=2.314815006343452e-11)
@pytest.mark.parametrize("scale", _utc_bad)
def test_timedelta_from_parts(scale, days, day_frac):
kwargs = dict(format="jd", scale=scale)
whole = TimeDelta(days, day_frac, **kwargs)
from_parts = TimeDelta(days, **kwargs) + TimeDelta(day_frac, **kwargs)
assert whole == from_parts
def test_datetime_difference_agrees_with_timedelta_no_hypothesis():
scale = "tai"
dt1 = datetime(1235, 1, 1, 0, 0)
dt2 = datetime(9950, 1, 1, 0, 0, 0, 890773)
t1 = Time(dt1, scale=scale)
t2 = Time(dt2, scale=scale)
assert abs((t2 - t1) - TimeDelta(dt2 - dt1, scale=scale)) < 1 * u.us
# datetimes have microsecond resolution
@given(datetimes(), timedeltas())
@example(dt=datetime(2000, 1, 1, 0, 0), td=timedelta(days=-397683, microseconds=2))
@example(dt=datetime(2179, 1, 1, 0, 0), td=timedelta(days=-795365, microseconds=53))
@example(dt=datetime(2000, 1, 1, 0, 0), td=timedelta(days=1590729, microseconds=10))
@example(
dt=datetime(4357, 1, 1, 0, 0), td=timedelta(days=-1590729, microseconds=107770)
)
@example(
dt=datetime(4357, 1, 1, 0, 0, 0, 29),
td=timedelta(days=-1590729, microseconds=746292),
)
@pytest.mark.parametrize("scale", _utc_bad)
def test_datetime_timedelta_sum(scale, dt, td):
try:
dt + td
except OverflowError:
assume(False)
dt_a = Time(dt, scale=scale)
td_a = TimeDelta(td, scale=None if scale == "utc" else scale)
assert_almost_equal(dt_a + td_a, Time(dt + td, scale=scale), atol=2 * u.us)
@given(
jds=reasonable_jd(),
lat1=floats(-90, 90),
lat2=floats(-90, 90),
lon=floats(-180, 180),
)
@pytest.mark.parametrize("kind", ["apparent", "mean"])
def test_sidereal_lat_independent(iers_b, kind, jds, lat1, lat2, lon):
jd1, jd2 = jds
t1 = Time(jd1, jd2, scale="ut1", format="jd", location=(lon, lat1))
t2 = Time(jd1, jd2, scale="ut1", format="jd", location=(lon, lat2))
try:
assert_almost_equal(
t1.sidereal_time(kind), t2.sidereal_time(kind), atol=1 * u.uas
)
except iers.IERSRangeError:
assume(False)
@given(
jds=reasonable_jd(),
lat=floats(-90, 90),
lon=floats(-180, 180),
lon_delta=floats(-360, 360),
)
@pytest.mark.parametrize("kind", ["apparent", "mean"])
def test_sidereal_lon_independent(iers_b, kind, jds, lat, lon, lon_delta):
jd1, jd2 = jds
t1 = Time(jd1, jd2, scale="ut1", format="jd", location=(lon, lat))
t2 = Time(jd1, jd2, scale="ut1", format="jd", location=(lon + lon_delta, lat))
try:
diff = t1.sidereal_time(kind) + lon_delta * u.degree - t2.sidereal_time(kind)
except iers.IERSRangeError:
assume(False)
else:
expected_degrees = (diff.to_value(u.degree) + 180) % 360
assert_almost_equal(expected_degrees, 180, atol=1 / (60 * 60 * 1000))
|
3c4c59b334998314ef5d72649a20d1fa57c79dfd4f900191fb54c93394f6f36f | # Licensed under a 3-clause BSD style license - see LICNSE.rst
# This module includes files automatically generated from ply (these end in
# _lextab.py and _parsetab.py). To generate these files, remove them from this
# folder, then build astropy and run the tests in-place:
#
# python setup.py build_ext --inplace
# pytest astropy/units
#
# You can then commit the changes to the re-generated _lextab.py and
# _parsetab.py files.
"""Handles the CDS string format for units."""
import re
from astropy.units.utils import is_effectively_unity
from astropy.utils import classproperty, parsing
from astropy.utils.misc import did_you_mean
from . import core, utils
from .base import Base
class CDS(Base):
"""
Support the `Centre de Données astronomiques de Strasbourg
<http://cds.u-strasbg.fr/>`_ `Standards for Astronomical
Catalogues 2.0 <http://vizier.u-strasbg.fr/vizier/doc/catstd-3.2.htx>`_
format, and the `complete set of supported units
<https://vizier.u-strasbg.fr/viz-bin/Unit>`_. This format is used
by VOTable up to version 1.2.
"""
_space = "."
_times = "x"
_scale_unit_separator = ""
_tokens = (
"PRODUCT",
"DIVISION",
"OPEN_PAREN",
"CLOSE_PAREN",
"OPEN_BRACKET",
"CLOSE_BRACKET",
"X",
"SIGN",
"UINT",
"UFLOAT",
"UNIT",
"DIMENSIONLESS",
)
@classproperty(lazy=True)
def _units(cls):
return cls._generate_unit_names()
@classproperty(lazy=True)
def _parser(cls):
return cls._make_parser()
@classproperty(lazy=True)
def _lexer(cls):
return cls._make_lexer()
@staticmethod
def _generate_unit_names():
from astropy import units as u
from astropy.units import cds
names = {}
for key, val in cds.__dict__.items():
if isinstance(val, u.UnitBase):
names[key] = val
return names
@classmethod
def _make_lexer(cls):
tokens = cls._tokens
t_PRODUCT = r"\."
t_DIVISION = r"/"
t_OPEN_PAREN = r"\("
t_CLOSE_PAREN = r"\)"
t_OPEN_BRACKET = r"\["
t_CLOSE_BRACKET = r"\]"
# NOTE THE ORDERING OF THESE RULES IS IMPORTANT!!
# Regular expression rules for simple tokens
def t_UFLOAT(t):
r"((\d+\.?\d+)|(\.\d+))([eE][+-]?\d+)?"
if not re.search(r"[eE\.]", t.value):
t.type = "UINT"
t.value = int(t.value)
else:
t.value = float(t.value)
return t
def t_UINT(t):
r"\d+"
t.value = int(t.value)
return t
def t_SIGN(t):
r"[+-](?=\d)"
t.value = float(t.value + "1")
return t
def t_X(t): # multiplication for factor in front of unit
r"[x×]"
return t
def t_UNIT(t):
r"\%|°|\\h|((?!\d)\w)+"
t.value = cls._get_unit(t)
return t
def t_DIMENSIONLESS(t):
r"---|-"
# These are separate from t_UNIT since they cannot have a prefactor.
t.value = cls._get_unit(t)
return t
t_ignore = ""
# Error handling rule
def t_error(t):
raise ValueError(f"Invalid character at col {t.lexpos}")
return parsing.lex(
lextab="cds_lextab", package="astropy/units", reflags=int(re.UNICODE)
)
@classmethod
def _make_parser(cls):
"""
The grammar here is based on the description in the `Standards
for Astronomical Catalogues 2.0
<http://vizier.u-strasbg.fr/vizier/doc/catstd-3.2.htx>`_, which is not
terribly precise. The exact grammar is here is based on the
YACC grammar in the `unity library <https://purl.org/nxg/dist/unity/>`_.
"""
tokens = cls._tokens
def p_main(p):
"""
main : factor combined_units
| combined_units
| DIMENSIONLESS
| OPEN_BRACKET combined_units CLOSE_BRACKET
| OPEN_BRACKET DIMENSIONLESS CLOSE_BRACKET
| factor
"""
from astropy.units import dex
from astropy.units.core import Unit
if len(p) == 3:
p[0] = Unit(p[1] * p[2])
elif len(p) == 4:
p[0] = dex(p[2])
else:
p[0] = Unit(p[1])
def p_combined_units(p):
"""
combined_units : product_of_units
| division_of_units
"""
p[0] = p[1]
def p_product_of_units(p):
"""
product_of_units : unit_expression PRODUCT combined_units
| unit_expression
"""
if len(p) == 4:
p[0] = p[1] * p[3]
else:
p[0] = p[1]
def p_division_of_units(p):
"""
division_of_units : DIVISION unit_expression
| combined_units DIVISION unit_expression
"""
if len(p) == 3:
p[0] = p[2] ** -1
else:
p[0] = p[1] / p[3]
def p_unit_expression(p):
"""
unit_expression : unit_with_power
| OPEN_PAREN combined_units CLOSE_PAREN
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = p[2]
def p_factor(p):
"""
factor : signed_float X UINT signed_int
| UINT X UINT signed_int
| UINT signed_int
| UINT
| signed_float
"""
if len(p) == 5:
if p[3] != 10:
raise ValueError("Only base ten exponents are allowed in CDS")
p[0] = p[1] * 10.0 ** p[4]
elif len(p) == 3:
if p[1] != 10:
raise ValueError("Only base ten exponents are allowed in CDS")
p[0] = 10.0 ** p[2]
elif len(p) == 2:
p[0] = p[1]
def p_unit_with_power(p):
"""
unit_with_power : UNIT numeric_power
| UNIT
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = p[1] ** p[2]
def p_numeric_power(p):
"""
numeric_power : sign UINT
"""
p[0] = p[1] * p[2]
def p_sign(p):
"""
sign : SIGN
|
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = 1.0
def p_signed_int(p):
"""
signed_int : SIGN UINT
"""
p[0] = p[1] * p[2]
def p_signed_float(p):
"""
signed_float : sign UINT
| sign UFLOAT
"""
p[0] = p[1] * p[2]
def p_error(p):
raise ValueError()
return parsing.yacc(tabmodule="cds_parsetab", package="astropy/units")
@classmethod
def _get_unit(cls, t):
try:
return cls._parse_unit(t.value)
except ValueError as e:
registry = core.get_current_unit_registry()
if t.value in registry.aliases:
return registry.aliases[t.value]
raise ValueError(f"At col {t.lexpos}, {str(e)}")
@classmethod
def _parse_unit(cls, unit, detailed_exception=True):
if unit not in cls._units:
if detailed_exception:
raise ValueError(
"Unit '{}' not supported by the CDS SAC standard. {}".format(
unit, did_you_mean(unit, cls._units)
)
)
else:
raise ValueError()
return cls._units[unit]
@classmethod
def parse(cls, s, debug=False):
if " " in s:
raise ValueError("CDS unit must not contain whitespace")
if not isinstance(s, str):
s = s.decode("ascii")
# This is a short circuit for the case where the string
# is just a single unit name
try:
return cls._parse_unit(s, detailed_exception=False)
except ValueError:
try:
return cls._parser.parse(s, lexer=cls._lexer, debug=debug)
except ValueError as e:
if str(e):
raise ValueError(str(e))
else:
raise ValueError("Syntax error")
@classmethod
def format_exponential_notation(cls, val, format_spec=".8g"):
m, ex = utils.split_mantissa_exponent(val)
parts = []
if m not in ("", "1"):
parts.append(m)
if ex:
if not ex.startswith("-"):
ex = "+" + ex
parts.append(f"10{cls._format_superscript(ex)}")
return cls._times.join(parts)
@classmethod
def _format_superscript(cls, number):
return number
@classmethod
def to_string(cls, unit, fraction=False):
# Remove units that aren't known to the format
unit = utils.decompose_to_known_units(unit, cls._get_unit_name)
if not unit.bases:
if unit.scale == 1:
return "---"
elif is_effectively_unity(unit.scale * 100.0):
return "%"
return super().to_string(unit, fraction=fraction)
|
32699647e6d1cb56a01b4026087a7cc5f2a5048ba4845fb54b51b872a7003123 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Handles the "Console" unit format.
"""
from . import base, utils
class Console(base.Base):
"""
Output-only format for to display pretty formatting at the
console.
For example::
>>> import astropy.units as u
>>> print(u.Ry.decompose().to_string('console')) # doctest: +FLOAT_CMP
2.1798721*10^-18 m^2 kg s^-2
>>> print(u.Ry.decompose().to_string('console', fraction='multiline')) # doctest: +FLOAT_CMP
m^2 kg
2.1798721*10^-18 ------
s^2
>>> print(u.Ry.decompose().to_string('console', fraction='inline')) # doctest: +FLOAT_CMP
2.1798721*10^-18 m^2 kg / s^2
"""
_times = "*"
_line = "-"
_space = " "
@classmethod
def _format_mantissa(cls, m):
return m
@classmethod
def _format_superscript(cls, number):
return f"^{number}"
@classmethod
def format_exponential_notation(cls, val, format_spec=".8g"):
m, ex = utils.split_mantissa_exponent(val, format_spec)
parts = []
if m:
parts.append(cls._format_mantissa(m))
if ex:
parts.append(f"10{cls._format_superscript(ex)}")
return cls._times.join(parts)
@classmethod
def _format_fraction(cls, scale, numerator, denominator, fraction="multiline"):
if fraction != "multiline":
return super()._format_fraction(
scale, numerator, denominator, fraction=fraction
)
fraclength = max(len(numerator), len(denominator))
f = f"{{0:<{len(scale)}s}}{{1:^{fraclength}s}}"
return "\n".join(
(
f.format("", numerator),
f.format(scale, cls._line * fraclength),
f.format("", denominator),
)
)
@classmethod
def to_string(cls, unit, fraction=False):
# Change default of fraction to False, i.e., we typeset
# without a fraction by default.
return super().to_string(unit, fraction=fraction)
|
f7fbf0960d77a63c477d9817093db55b2efb7a5b6994a35ae873abd294cd62a2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Handles the "VOUnit" unit format.
"""
import copy
import keyword
import re
import warnings
from . import core, generic, utils
class VOUnit(generic.Generic):
"""
The IVOA standard for units used by the VO.
This is an implementation of `Units in the VO 1.0
<http://www.ivoa.net/documents/VOUnits/>`_.
"""
_explicit_custom_unit_regex = re.compile(r"^[YZEPTGMkhdcmunpfazy]?'((?!\d)\w)+'$")
_custom_unit_regex = re.compile(r"^((?!\d)\w)+$")
_custom_units = {}
_space = "."
_scale_unit_separator = ""
@staticmethod
def _generate_unit_names():
from astropy import units as u
from astropy.units import required_by_vounit as uvo
names = {}
deprecated_names = set()
bases = [
"A", "C", "D", "F", "G", "H", "Hz", "J", "Jy", "K", "N",
"Ohm", "Pa", "R", "Ry", "S", "T", "V", "W", "Wb", "a",
"adu", "arcmin", "arcsec", "barn", "beam", "bin", "cd",
"chan", "count", "ct", "d", "deg", "eV", "erg", "g", "h",
"lm", "lx", "lyr", "m", "mag", "min", "mol", "pc", "ph",
"photon", "pix", "pixel", "rad", "rad", "s", "solLum",
"solMass", "solRad", "sr", "u", "voxel", "yr",
] # fmt: skip
binary_bases = ["bit", "byte", "B"]
simple_units = ["Angstrom", "angstrom", "AU", "au", "Ba", "dB", "mas"]
si_prefixes = [
"y", "z", "a", "f", "p", "n", "u", "m", "c", "d",
"", "da", "h", "k", "M", "G", "T", "P", "E", "Z", "Y"
] # fmt: skip
binary_prefixes = ["Ki", "Mi", "Gi", "Ti", "Pi", "Ei"]
deprecated_units = {
"a", "angstrom", "Angstrom", "au", "Ba", "barn", "ct",
"erg", "G", "ph", "pix",
} # fmt: skip
def do_defines(bases, prefixes, skips=[]):
for base in bases:
for prefix in prefixes:
key = prefix + base
if key in skips:
continue
if keyword.iskeyword(key):
continue
names[key] = getattr(u if hasattr(u, key) else uvo, key)
if base in deprecated_units:
deprecated_names.add(key)
do_defines(bases, si_prefixes, ["pct", "pcount", "yd"])
do_defines(binary_bases, si_prefixes + binary_prefixes, ["dB", "dbyte"])
do_defines(simple_units, [""])
return names, deprecated_names, []
@classmethod
def parse(cls, s, debug=False):
if s in ("unknown", "UNKNOWN"):
return None
if s == "":
return core.dimensionless_unscaled
# Check for excess solidi, but exclude fractional exponents (allowed)
if s.count("/") > 1 and s.count("/") - len(re.findall(r"\(\d+/\d+\)", s)) > 1:
raise core.UnitsError(
f"'{s}' contains multiple slashes, which is "
"disallowed by the VOUnit standard."
)
result = cls._do_parse(s, debug=debug)
if hasattr(result, "function_unit"):
raise ValueError("Function units are not yet supported in VOUnit.")
return result
@classmethod
def _get_unit(cls, t):
try:
return super()._get_unit(t)
except ValueError:
if cls._explicit_custom_unit_regex.match(t.value):
return cls._def_custom_unit(t.value)
if cls._custom_unit_regex.match(t.value):
warnings.warn(
f"Unit {t.value!r} not supported by the VOUnit standard. "
+ utils.did_you_mean_units(
t.value,
cls._units,
cls._deprecated_units,
cls._to_decomposed_alternative,
),
core.UnitsWarning,
)
return cls._def_custom_unit(t.value)
raise
@classmethod
def _parse_unit(cls, unit, detailed_exception=True):
if unit not in cls._units:
raise ValueError()
if unit in cls._deprecated_units:
utils.unit_deprecation_warning(
unit, cls._units[unit], "VOUnit", cls._to_decomposed_alternative
)
return cls._units[unit]
@classmethod
def _get_unit_name(cls, unit):
# The da- and d- prefixes are discouraged. This has the
# effect of adding a scale to value in the result.
if isinstance(unit, core.PrefixUnit):
if unit._represents.scale == 10.0:
raise ValueError(
f"In '{unit}': VOUnit can not represent units with the 'da' "
"(deka) prefix"
)
elif unit._represents.scale == 0.1:
raise ValueError(
f"In '{unit}': VOUnit can not represent units with the 'd' "
"(deci) prefix"
)
name = super()._get_unit_name(unit)
if unit in cls._custom_units.values():
return name
if name not in cls._units:
raise ValueError(f"Unit {name!r} is not part of the VOUnit standard")
if name in cls._deprecated_units:
utils.unit_deprecation_warning(
name, unit, "VOUnit", cls._to_decomposed_alternative
)
return name
@classmethod
def _def_custom_unit(cls, unit):
def def_base(name):
if name in cls._custom_units:
return cls._custom_units[name]
if name.startswith("'"):
return core.def_unit(
[name[1:-1], name],
format={"vounit": name},
namespace=cls._custom_units,
)
else:
return core.def_unit(name, namespace=cls._custom_units)
if unit in cls._custom_units:
return cls._custom_units[unit]
for short, full, factor in core.si_prefixes:
for prefix in short:
if unit.startswith(prefix):
base_name = unit[len(prefix) :]
base_unit = def_base(base_name)
return core.PrefixUnit(
[prefix + x for x in base_unit.names],
core.CompositeUnit(
factor, [base_unit], [1], _error_check=False
),
format={"vounit": prefix + base_unit.names[-1]},
namespace=cls._custom_units,
)
return def_base(unit)
@classmethod
def _format_superscript(cls, number):
return f"({number})" if "/" in number or "." in number else f"**{number}"
@classmethod
def format_exponential_notation(cls, val, format_spec=".8g"):
return super().format_exponential_notation(val, format_spec)
@classmethod
def to_string(cls, unit, fraction=False):
from astropy.units import core
# Remove units that aren't known to the format
unit = utils.decompose_to_known_units(unit, cls._get_unit_name)
if unit.physical_type == "dimensionless" and unit.scale != 1:
raise core.UnitScaleError(
"The VOUnit format is not able to "
"represent scale for dimensionless units. "
f"Multiply your data by {unit.scale:e}."
)
return super().to_string(unit, fraction=fraction)
@classmethod
def _to_decomposed_alternative(cls, unit):
from astropy.units import core
try:
s = cls.to_string(unit)
except core.UnitScaleError:
scale = unit.scale
unit = copy.copy(unit)
unit._scale = 1.0
return f"{cls.to_string(unit)} (with data multiplied by {scale})"
return s
|
2d5c69726fb990a37a85a683041e8b22485eba5bdc04d707abc7f6a46403d362 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Handles the "LaTeX" unit format.
"""
import re
from . import console, utils
class Latex(console.Console):
"""
Output LaTeX to display the unit based on IAU style guidelines.
Attempts to follow the `IAU Style Manual
<https://www.iau.org/static/publications/stylemanual1989.pdf>`_.
"""
_space = r"\,"
_scale_unit_separator = r"\,"
_times = r" \times "
@classmethod
def _get_unit_name(cls, unit):
# Do not use super() to help latex_inline subclass.
name = unit.get_format_name("latex")
if name == unit.name:
# This doesn't escape arbitrary LaTeX strings, but it should
# be good enough for unit names which are required to be alpha
# + "_" anyway.
return name.replace("_", r"\_")
else:
return name
@classmethod
def _format_mantissa(cls, m):
return m.replace("nan", r"{\rm NaN}").replace("inf", r"\infty")
@classmethod
def _format_superscript(cls, number):
return f"^{{{number}}}"
@classmethod
def _format_unit_power(cls, unit, power=1):
name = cls._get_unit_name(unit)
if power != 1:
# If the LaTeX representation of the base unit already ends with
# a superscript, we need to spell out the unit to avoid double
# superscripts. For example, the logic below ensures that
# `u.deg**2` returns `deg^{2}` instead of `{}^{\circ}^{2}`.
if re.match(r".*\^{[^}]*}$", name): # ends w/ superscript?
name = unit.short_names[0]
name += cls._format_superscript(utils.format_power(power))
return name
@classmethod
def _format_fraction(cls, scale, numerator, denominator, *, fraction="multiline"):
if fraction != "multiline":
return super()._format_fraction(
scale, numerator, denominator, fraction=fraction
)
return rf"{scale}\frac{{{numerator}}}{{{denominator}}}"
@classmethod
def to_string(cls, unit, fraction="multiline"):
s = super().to_string(unit, fraction=fraction)
return rf"$\mathrm{{{s}}}$"
class LatexInline(Latex):
"""
Output LaTeX to display the unit based on IAU style guidelines with negative
powers.
Attempts to follow the `IAU Style Manual
<https://www.iau.org/static/publications/stylemanual1989.pdf>`_ and the
`ApJ and AJ style guide
<https://journals.aas.org/manuscript-preparation/>`_.
"""
name = "latex_inline"
@classmethod
def to_string(cls, unit, fraction=False):
return super().to_string(unit, fraction=fraction)
|
cf3f53b35624f5d3dd36263a2530b09e776b9351dee9c68366d1ba8fd7ec5936 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Handles the "Unicode" unit format.
"""
from . import console
class Unicode(console.Console):
"""
Output-only format to display pretty formatting at the console
using Unicode characters.
For example::
>>> import astropy.units as u
>>> print(u.bar.decompose().to_string('unicode'))
100000 kg m⁻¹ s⁻²
>>> print(u.bar.decompose().to_string('unicode', fraction='multiline'))
kg
100000 ────
m s²
>>> print(u.bar.decompose().to_string('unicode', fraction='inline'))
100000 kg / (m s²)
"""
_times = "×"
_line = "─"
@classmethod
def _format_mantissa(cls, m):
return m.replace("-", "−")
@classmethod
def _format_superscript(cls, number):
mapping = str.maketrans(
{
"0": "⁰",
"1": "¹",
"2": "²",
"3": "³",
"4": "⁴",
"5": "⁵",
"6": "⁶",
"7": "⁷",
"8": "⁸",
"9": "⁹",
"-": "⁻",
"−": "⁻",
# This is actually a "raised omission bracket", but it's
# the closest thing I could find to a superscript solidus.
"/": "⸍",
}
)
return number.translate(mapping)
|
69ccbf67ce1c8e0b0fa3fa9e53bd47e000f689eb93d43dd4e3a39c8442fdd845 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from . import utils
class Base:
"""
The abstract base class of all unit formats.
"""
registry = {}
_space = " "
_scale_unit_separator = " "
def __new__(cls, *args, **kwargs):
# This __new__ is to make it clear that there is no reason to
# instantiate a Formatter--if you try to you'll just get back the
# class
return cls
def __init_subclass__(cls, **kwargs):
# Keep a registry of all formats. Key by the class name unless a name
# is explicitly set (i.e., one *not* inherited from a superclass).
if "name" not in cls.__dict__:
cls.name = cls.__name__.lower()
Base.registry[cls.name] = cls
super().__init_subclass__(**kwargs)
@classmethod
def _get_unit_name(cls, unit):
return unit.get_format_name(cls.name)
@classmethod
def format_exponential_notation(cls, val, format_spec="g"):
"""
Formats a value in exponential notation.
Parameters
----------
val : number
The value to be formatted
format_spec : str, optional
Format used to split up mantissa and exponent
Returns
-------
str
The value in exponential notation in a this class's format.
"""
return format(val, format_spec)
@classmethod
def _format_superscript(cls, number):
return f"({number})" if "/" in number or "." in number else number
@classmethod
def _format_unit_power(cls, unit, power=1):
"""Format the unit for this format class raised to the given power.
This is overridden in Latex where the name of the unit can depend on the power
(e.g., for degrees).
"""
name = cls._get_unit_name(unit)
if power != 1:
name += cls._format_superscript(utils.format_power(power))
return name
@classmethod
def _format_unit_list(cls, units):
return cls._space.join(
cls._format_unit_power(base_, power) for base_, power in units
)
@classmethod
def _format_fraction(cls, scale, numerator, denominator, *, fraction="inline"):
if not (fraction is True or fraction == "inline"):
raise ValueError(
"format {cls.name!r} only supports inline fractions,"
f"not fraction={fraction!r}."
)
if cls._space in denominator:
denominator = f"({denominator})"
if scale and numerator == "1":
return f"{scale}/ {denominator}"
return f"{scale}{numerator} / {denominator}"
@classmethod
def to_string(cls, unit, *, fraction=True):
"""Convert a unit to its string representation.
Implementation for `~astropy.units.UnitBase.to_string`.
Parameters
----------
unit : |Unit|
The unit to convert.
fraction : {False|True|'inline'|'multiline'}, optional
Options are as follows:
- `False` : display unit bases with negative powers as they are
(e.g., ``km s-1``);
- 'inline' or `True` : use a single-line fraction (e.g., ``km / s``);
- 'multiline' : use a multiline fraction (available for the
``latex``, ``console`` and ``unicode`` formats only; e.g.,
``$\\mathrm{\\frac{km}{s}}$``).
Raises
------
ValueError
If ``fraction`` is not recognized.
"""
# First the scale. Normally unity, in which case we omit
# it, but non-unity scale can happen, e.g., in decompositions
# like u.Ry.decompose(), which gives "2.17987e-18 kg m2 / s2".
if unit.scale == 1:
s = ""
else:
s = cls.format_exponential_notation(unit.scale)
# Now the unit baes, taking care that dimensionless does not have any
# (but can have a scale; e.g., u.percent.decompose() gives "0.01").
if len(unit.bases):
if s:
s += cls._scale_unit_separator
if fraction:
numerator, denominator = utils.get_grouped_by_powers(
unit.bases, unit.powers
)
else:
numerator = list(zip(unit.bases, unit.powers))
denominator = []
if len(denominator):
if len(numerator):
numerator = cls._format_unit_list(numerator)
else:
numerator = "1"
denominator = cls._format_unit_list(denominator)
s = cls._format_fraction(s, numerator, denominator, fraction=fraction)
else:
s += cls._format_unit_list(numerator)
return s
@classmethod
def parse(cls, s):
"""
Convert a string to a unit object.
"""
raise NotImplementedError(f"Can not parse with {cls.__name__} format")
|
a399ffb9d5aaafeeb33d99e4b205ae584e0eea3a7188fbef6f1606d7a8b77f72 | # Licensed under a 3-clause BSD style license - see LICNSE.rst
# This module includes files automatically generated from ply (these end in
# _lextab.py and _parsetab.py). To generate these files, remove them from this
# folder, then build astropy and run the tests in-place:
#
# python setup.py build_ext --inplace
# pytest astropy/units
#
# You can then commit the changes to the re-generated _lextab.py and
# _parsetab.py files.
"""
Handles units in `Office of Guest Investigator Programs (OGIP)
FITS files
<https://heasarc.gsfc.nasa.gov/docs/heasarc/ofwg/docs/general/ogip_93_001/>`__.
"""
import copy
import keyword
import math
import warnings
from fractions import Fraction
from astropy.utils import parsing
from . import core, generic, utils
class OGIP(generic.Generic):
"""
Support the units in `Office of Guest Investigator Programs (OGIP)
FITS files
<https://heasarc.gsfc.nasa.gov/docs/heasarc/ofwg/docs/general/ogip_93_001/>`__.
"""
_tokens = (
"DIVISION",
"OPEN_PAREN",
"CLOSE_PAREN",
"WHITESPACE",
"STARSTAR",
"STAR",
"SIGN",
"UFLOAT",
"LIT10",
"UINT",
"UNKNOWN",
"UNIT",
)
@staticmethod
def _generate_unit_names():
from astropy import units as u
names = {}
deprecated_names = set()
bases = [
"A", "C", "cd", "eV", "F", "g", "H", "Hz", "J",
"Jy", "K", "lm", "lx", "m", "mol", "N", "ohm", "Pa",
"pc", "rad", "s", "S", "sr", "T", "V", "W", "Wb",
] # fmt: skip
deprecated_bases = []
prefixes = [
"y", "z", "a", "f", "p", "n", "u", "m", "c", "d",
"", "da", "h", "k", "M", "G", "T", "P", "E", "Z", "Y",
] # fmt: skip
for base in bases + deprecated_bases:
for prefix in prefixes:
key = prefix + base
if keyword.iskeyword(key):
continue
names[key] = getattr(u, key)
for base in deprecated_bases:
for prefix in prefixes:
deprecated_names.add(prefix + base)
simple_units = [
"angstrom", "arcmin", "arcsec", "AU", "barn", "bin",
"byte", "chan", "count", "day", "deg", "erg", "G",
"h", "lyr", "mag", "min", "photon", "pixel",
"voxel", "yr",
] # fmt: skip
for unit in simple_units:
names[unit] = getattr(u, unit)
# Create a separate, disconnected unit for the special case of
# Crab and mCrab, since OGIP doesn't define their quantities.
Crab = u.def_unit(["Crab"], prefixes=False, doc="Crab (X-ray flux)")
mCrab = u.Unit(10**-3 * Crab)
names["Crab"] = Crab
names["mCrab"] = mCrab
deprecated_units = ["Crab", "mCrab"]
for unit in deprecated_units:
deprecated_names.add(unit)
functions = [
"log", "ln", "exp", "sqrt", "sin", "cos", "tan", "asin",
"acos", "atan", "sinh", "cosh", "tanh",
] # fmt: skip
for name in functions:
names[name] = name
return names, deprecated_names, functions
@classmethod
def _make_lexer(cls):
tokens = cls._tokens
t_DIVISION = r"/"
t_OPEN_PAREN = r"\("
t_CLOSE_PAREN = r"\)"
t_WHITESPACE = "[ \t]+"
t_STARSTAR = r"\*\*"
t_STAR = r"\*"
# NOTE THE ORDERING OF THESE RULES IS IMPORTANT!!
# Regular expression rules for simple tokens
def t_UFLOAT(t):
r"(((\d+\.?\d*)|(\.\d+))([eE][+-]?\d+))|(((\d+\.\d*)|(\.\d+))([eE][+-]?\d+)?)"
t.value = float(t.value)
return t
def t_UINT(t):
r"\d+"
t.value = int(t.value)
return t
def t_SIGN(t):
r"[+-](?=\d)"
t.value = float(t.value + "1")
return t
def t_X(t): # multiplication for factor in front of unit
r"[x×]"
return t
def t_LIT10(t):
r"10"
return 10
def t_UNKNOWN(t):
r"[Uu][Nn][Kk][Nn][Oo][Ww][Nn]"
return None
def t_UNIT(t):
r"[a-zA-Z][a-zA-Z_]*"
t.value = cls._get_unit(t)
return t
# Don't ignore whitespace
t_ignore = ""
# Error handling rule
def t_error(t):
raise ValueError(f"Invalid character at col {t.lexpos}")
return parsing.lex(lextab="ogip_lextab", package="astropy/units")
@classmethod
def _make_parser(cls):
"""
The grammar here is based on the description in the
`Specification of Physical Units within OGIP FITS files
<https://heasarc.gsfc.nasa.gov/docs/heasarc/ofwg/docs/general/ogip_93_001/>`__,
which is not terribly precise. The exact grammar is here is
based on the YACC grammar in the `unity library
<https://bitbucket.org/nxg/unity/>`_.
"""
tokens = cls._tokens
def p_main(p):
"""
main : UNKNOWN
| complete_expression
| scale_factor complete_expression
| scale_factor WHITESPACE complete_expression
"""
if len(p) == 4:
p[0] = p[1] * p[3]
elif len(p) == 3:
p[0] = p[1] * p[2]
else:
p[0] = p[1]
def p_complete_expression(p):
"""
complete_expression : product_of_units
"""
p[0] = p[1]
def p_product_of_units(p):
"""
product_of_units : unit_expression
| division unit_expression
| product_of_units product unit_expression
| product_of_units division unit_expression
"""
if len(p) == 4:
if p[2] == "DIVISION":
p[0] = p[1] / p[3]
else:
p[0] = p[1] * p[3]
elif len(p) == 3:
p[0] = p[2] ** -1
else:
p[0] = p[1]
def p_unit_expression(p):
"""
unit_expression : unit
| UNIT OPEN_PAREN complete_expression CLOSE_PAREN
| OPEN_PAREN complete_expression CLOSE_PAREN
| UNIT OPEN_PAREN complete_expression CLOSE_PAREN power numeric_power
| OPEN_PAREN complete_expression CLOSE_PAREN power numeric_power
"""
# If we run p[1] in cls._functions, it will try and parse each
# item in the list into a unit, which is slow. Since we know that
# all the items in the list are strings, we can simply convert
# p[1] to a string instead.
p1_str = str(p[1])
if p1_str in cls._functions and p1_str != "sqrt":
raise ValueError(
f"The function '{p[1]}' is valid in OGIP, but not understood "
"by astropy.units."
)
if len(p) == 7:
if p1_str == "sqrt":
p[0] = p[1] * p[3] ** (0.5 * p[6])
else:
p[0] = p[1] * p[3] ** p[6]
elif len(p) == 6:
p[0] = p[2] ** p[5]
elif len(p) == 5:
if p1_str == "sqrt":
p[0] = p[3] ** 0.5
else:
p[0] = p[1] * p[3]
elif len(p) == 4:
p[0] = p[2]
else:
p[0] = p[1]
def p_scale_factor(p):
"""
scale_factor : LIT10 power numeric_power
| LIT10
| signed_float
| signed_float power numeric_power
| signed_int power numeric_power
"""
if len(p) == 4:
p[0] = 10 ** p[3]
else:
p[0] = p[1]
# Can't use np.log10 here, because p[0] may be a Python long.
if math.log10(p[0]) % 1.0 != 0.0:
from astropy.units.core import UnitsWarning
warnings.warn(
f"'{p[0]}' scale should be a power of 10 in OGIP format",
UnitsWarning,
)
def p_division(p):
"""
division : DIVISION
| WHITESPACE DIVISION
| WHITESPACE DIVISION WHITESPACE
| DIVISION WHITESPACE
"""
p[0] = "DIVISION"
def p_product(p):
"""
product : WHITESPACE
| STAR
| WHITESPACE STAR
| WHITESPACE STAR WHITESPACE
| STAR WHITESPACE
"""
p[0] = "PRODUCT"
def p_power(p):
"""
power : STARSTAR
"""
p[0] = "POWER"
def p_unit(p):
"""
unit : UNIT
| UNIT power numeric_power
"""
if len(p) == 4:
p[0] = p[1] ** p[3]
else:
p[0] = p[1]
def p_numeric_power(p):
"""
numeric_power : UINT
| signed_float
| OPEN_PAREN signed_int CLOSE_PAREN
| OPEN_PAREN signed_float CLOSE_PAREN
| OPEN_PAREN signed_float division UINT CLOSE_PAREN
"""
if len(p) == 6:
p[0] = Fraction(int(p[2]), int(p[4]))
elif len(p) == 4:
p[0] = p[2]
else:
p[0] = p[1]
def p_sign(p):
"""
sign : SIGN
|
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = 1.0
def p_signed_int(p):
"""
signed_int : SIGN UINT
"""
p[0] = p[1] * p[2]
def p_signed_float(p):
"""
signed_float : sign UINT
| sign UFLOAT
"""
p[0] = p[1] * p[2]
def p_error(p):
raise ValueError()
return parsing.yacc(tabmodule="ogip_parsetab", package="astropy/units")
@classmethod
def _validate_unit(cls, unit, detailed_exception=True):
if unit not in cls._units:
if detailed_exception:
raise ValueError(
f"Unit '{unit}' not supported by the OGIP standard. "
+ utils.did_you_mean_units(
unit,
cls._units,
cls._deprecated_units,
cls._to_decomposed_alternative,
),
)
else:
raise ValueError()
if unit in cls._deprecated_units:
utils.unit_deprecation_warning(
unit, cls._units[unit], "OGIP", cls._to_decomposed_alternative
)
@classmethod
def _parse_unit(cls, unit, detailed_exception=True):
cls._validate_unit(unit, detailed_exception=detailed_exception)
return cls._units[unit]
@classmethod
def parse(cls, s, debug=False):
s = s.strip()
try:
# This is a short circuit for the case where the string is
# just a single unit name
return cls._parse_unit(s, detailed_exception=False)
except ValueError:
try:
return core.Unit(cls._parser.parse(s, lexer=cls._lexer, debug=debug))
except ValueError as e:
if str(e):
raise
else:
raise ValueError(f"Syntax error parsing unit '{s}'")
@classmethod
def _get_unit_name(cls, unit):
name = super()._get_unit_name(unit)
cls._validate_unit(name)
return name
@classmethod
def _format_superscript(cls, number):
return f"**({number})" if "/" in number else f"**{number}"
@classmethod
def to_string(cls, unit, fraction="inline"):
# Remove units that aren't known to the format
unit = utils.decompose_to_known_units(unit, cls._get_unit_name)
if isinstance(unit, core.CompositeUnit):
# Can't use np.log10 here, because p[0] may be a Python long.
if math.log10(unit.scale) % 1.0 != 0.0:
warnings.warn(
f"'{unit.scale}' scale should be a power of 10 in OGIP format",
core.UnitsWarning,
)
return super().to_string(unit, fraction=fraction)
@classmethod
def _to_decomposed_alternative(cls, unit):
# Remove units that aren't known to the format
unit = utils.decompose_to_known_units(unit, cls._get_unit_name)
if isinstance(unit, core.CompositeUnit):
# Can't use np.log10 here, because p[0] may be a Python long.
if math.log10(unit.scale) % 1.0 != 0.0:
scale = unit.scale
unit = copy.copy(unit)
unit._scale = 1.0
return (
f"{generic._to_string(cls, unit)} (with data multiplied by {scale})"
)
return super().to_string(unit)
|
c05ff363874c65f9d3f6a2be9d1bd37f73a0d48ee4d660bd6131e0d2fbe7be72 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Utilities shared by the different formats.
"""
import warnings
from astropy.units.utils import maybe_simple_fraction
from astropy.utils.misc import did_you_mean
def get_grouped_by_powers(bases, powers):
"""
Groups the powers and bases in the given
`~astropy.units.CompositeUnit` into positive powers and
negative powers for easy display on either side of a solidus.
Parameters
----------
bases : list of `astropy.units.UnitBase` instances
powers : list of int
Returns
-------
positives, negatives : tuple of lists
Each element in each list is tuple of the form (*base*,
*power*). The negatives have the sign of their power reversed
(i.e. the powers are all positive).
"""
positive = []
negative = []
for base, power in zip(bases, powers):
if power < 0:
negative.append((base, -power))
elif power > 0:
positive.append((base, power))
else:
raise ValueError("Unit with 0 power")
return positive, negative
def split_mantissa_exponent(v, format_spec=".8g"):
"""
Given a number, split it into its mantissa and base 10 exponent
parts, each as strings. If the exponent is too small, it may be
returned as the empty string.
Parameters
----------
v : float
format_spec : str, optional
Number representation formatting string
Returns
-------
mantissa, exponent : tuple of strings
"""
x = format(v, format_spec).split("e")
if x[0] != "1." + "0" * (len(x[0]) - 2):
m = x[0]
else:
m = ""
if len(x) == 2:
ex = x[1].lstrip("0+")
if len(ex) > 0 and ex[0] == "-":
ex = "-" + ex[1:].lstrip("0")
else:
ex = ""
return m, ex
def decompose_to_known_units(unit, func):
"""
Partially decomposes a unit so it is only composed of units that
are "known" to a given format.
Parameters
----------
unit : `~astropy.units.UnitBase` instance
func : callable
This function will be called to determine if a given unit is
"known". If the unit is not known, this function should raise a
`ValueError`.
Returns
-------
unit : `~astropy.units.UnitBase` instance
A flattened unit.
"""
from astropy.units import core
if isinstance(unit, core.CompositeUnit):
new_unit = core.Unit(unit.scale)
for base, power in zip(unit.bases, unit.powers):
new_unit = new_unit * decompose_to_known_units(base, func) ** power
return new_unit
elif isinstance(unit, core.NamedUnit):
try:
func(unit)
except ValueError:
if isinstance(unit, core.Unit):
return decompose_to_known_units(unit._represents, func)
raise
return unit
else:
raise TypeError(
f"unit argument must be a 'NamedUnit' or 'CompositeUnit', not {type(unit)}"
)
def format_power(power):
"""
Converts a value for a power (which may be floating point or a
`fractions.Fraction` object), into a string looking like either
an integer or a fraction, if the power is close to that.
"""
if not hasattr(power, "denominator"):
power = maybe_simple_fraction(power)
if getattr(power, "denonimator", None) == 1:
power = power.numerator
return str(power)
def _try_decomposed(unit, format_decomposed):
represents = getattr(unit, "_represents", None)
if represents is not None:
try:
represents_string = format_decomposed(represents)
except ValueError:
pass
else:
return represents_string
decomposed = unit.decompose()
if decomposed is not unit:
try:
decompose_string = format_decomposed(decomposed)
except ValueError:
pass
else:
return decompose_string
return None
def did_you_mean_units(s, all_units, deprecated_units, format_decomposed):
"""
A wrapper around `astropy.utils.misc.did_you_mean` that deals with
the display of deprecated units.
Parameters
----------
s : str
The invalid unit string
all_units : dict
A mapping from valid unit names to unit objects.
deprecated_units : sequence
The deprecated unit names
format_decomposed : callable
A function to turn a decomposed version of the unit into a
string. Should return `None` if not possible
Returns
-------
msg : str
A string message with a list of alternatives, or the empty
string.
"""
def fix_deprecated(x):
if x in deprecated_units:
results = [x + " (deprecated)"]
decomposed = _try_decomposed(all_units[x], format_decomposed)
if decomposed is not None:
results.append(decomposed)
return results
return (x,)
return did_you_mean(s, all_units, fix=fix_deprecated)
def unit_deprecation_warning(s, unit, standard_name, format_decomposed):
"""
Raises a UnitsWarning about a deprecated unit in a given format.
Suggests a decomposed alternative if one is available.
Parameters
----------
s : str
The deprecated unit name.
unit : astropy.units.core.UnitBase
The unit object.
standard_name : str
The name of the format for which the unit is deprecated.
format_decomposed : callable
A function to turn a decomposed version of the unit into a
string. Should return `None` if not possible
"""
from astropy.units.core import UnitsWarning
message = f"The unit '{s}' has been deprecated in the {standard_name} standard."
decomposed = _try_decomposed(unit, format_decomposed)
if decomposed is not None:
message += f" Suggested: {decomposed}."
warnings.warn(message, UnitsWarning)
|
b07d517b548d53e8c12be201a8f598db61bb3bae1b530c9a68d172887e793fe1 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# This file was automatically generated from ply. To re-generate this file,
# remove it from this folder, then build astropy and run the tests in-place:
#
# python setup.py build_ext --inplace
# pytest astropy/units
#
# You can then commit the changes to this file.
# cds_parsetab.py
# This file is automatically generated. Do not edit.
# pylint: disable=W,C,R
_tabversion = '3.10'
_lr_method = 'LALR'
_lr_signature = 'CLOSE_BRACKET CLOSE_PAREN DIMENSIONLESS DIVISION OPEN_BRACKET OPEN_PAREN PRODUCT SIGN UFLOAT UINT UNIT X\n main : factor combined_units\n | combined_units\n | DIMENSIONLESS\n | OPEN_BRACKET combined_units CLOSE_BRACKET\n | OPEN_BRACKET DIMENSIONLESS CLOSE_BRACKET\n | factor\n \n combined_units : product_of_units\n | division_of_units\n \n product_of_units : unit_expression PRODUCT combined_units\n | unit_expression\n \n division_of_units : DIVISION unit_expression\n | combined_units DIVISION unit_expression\n \n unit_expression : unit_with_power\n | OPEN_PAREN combined_units CLOSE_PAREN\n \n factor : signed_float X UINT signed_int\n | UINT X UINT signed_int\n | UINT signed_int\n | UINT\n | signed_float\n \n unit_with_power : UNIT numeric_power\n | UNIT\n \n numeric_power : sign UINT\n \n sign : SIGN\n |\n \n signed_int : SIGN UINT\n \n signed_float : sign UINT\n | sign UFLOAT\n '
_lr_action_items = {'DIMENSIONLESS':([0,5,],[4,20,]),'OPEN_BRACKET':([0,],[5,]),'UINT':([0,10,13,16,21,22,24,31,],[7,25,-23,-24,35,36,37,40,]),'DIVISION':([0,2,3,5,6,7,8,9,11,14,15,16,17,19,23,25,26,27,28,29,30,32,37,38,39,40,41,42,],[12,12,18,12,-19,-18,-7,-8,-10,-13,12,-21,18,18,-17,-26,-27,12,-11,18,-20,-12,-25,18,-14,-22,-15,-16,]),'SIGN':([0,7,16,35,36,],[13,24,13,24,24,]),'UFLOAT':([0,10,13,],[-24,26,-23,]),'OPEN_PAREN':([0,2,5,6,7,12,15,18,23,25,26,27,37,41,42,],[15,15,15,-19,-18,15,15,15,-17,-26,-27,15,-25,-15,-16,]),'UNIT':([0,2,5,6,7,12,15,18,23,25,26,27,37,41,42,],[16,16,16,-19,-18,16,16,16,-17,-26,-27,16,-25,-15,-16,]),'$end':([1,2,3,4,6,7,8,9,11,14,16,17,23,25,26,28,30,32,33,34,37,38,39,40,41,42,],[0,-6,-2,-3,-19,-18,-7,-8,-10,-13,-21,-1,-17,-26,-27,-11,-20,-12,-4,-5,-25,-9,-14,-22,-15,-16,]),'X':([6,7,25,26,],[21,22,-26,-27,]),'CLOSE_BRACKET':([8,9,11,14,16,19,20,28,30,32,38,39,40,],[-7,-8,-10,-13,-21,33,34,-11,-20,-12,-9,-14,-22,]),'CLOSE_PAREN':([8,9,11,14,16,28,29,30,32,38,39,40,],[-7,-8,-10,-13,-21,-11,39,-20,-12,-9,-14,-22,]),'PRODUCT':([11,14,16,30,39,40,],[27,-13,-21,-20,-14,-22,]),}
_lr_action = {}
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_action: _lr_action[_x] = {}
_lr_action[_x][_k] = _y
del _lr_action_items
_lr_goto_items = {'main':([0,],[1,]),'factor':([0,],[2,]),'combined_units':([0,2,5,15,27,],[3,17,19,29,38,]),'signed_float':([0,],[6,]),'product_of_units':([0,2,5,15,27,],[8,8,8,8,8,]),'division_of_units':([0,2,5,15,27,],[9,9,9,9,9,]),'sign':([0,16,],[10,31,]),'unit_expression':([0,2,5,12,15,18,27,],[11,11,11,28,11,32,11,]),'unit_with_power':([0,2,5,12,15,18,27,],[14,14,14,14,14,14,14,]),'signed_int':([7,35,36,],[23,41,42,]),'numeric_power':([16,],[30,]),}
_lr_goto = {}
for _k, _v in _lr_goto_items.items():
for _x, _y in zip(_v[0], _v[1]):
if not _x in _lr_goto: _lr_goto[_x] = {}
_lr_goto[_x][_k] = _y
del _lr_goto_items
_lr_productions = [
("S' -> main","S'",1,None,None,None),
('main -> factor combined_units','main',2,'p_main','cds.py',147),
('main -> combined_units','main',1,'p_main','cds.py',148),
('main -> DIMENSIONLESS','main',1,'p_main','cds.py',149),
('main -> OPEN_BRACKET combined_units CLOSE_BRACKET','main',3,'p_main','cds.py',150),
('main -> OPEN_BRACKET DIMENSIONLESS CLOSE_BRACKET','main',3,'p_main','cds.py',151),
('main -> factor','main',1,'p_main','cds.py',152),
('combined_units -> product_of_units','combined_units',1,'p_combined_units','cds.py',166),
('combined_units -> division_of_units','combined_units',1,'p_combined_units','cds.py',167),
('product_of_units -> unit_expression PRODUCT combined_units','product_of_units',3,'p_product_of_units','cds.py',173),
('product_of_units -> unit_expression','product_of_units',1,'p_product_of_units','cds.py',174),
('division_of_units -> DIVISION unit_expression','division_of_units',2,'p_division_of_units','cds.py',183),
('division_of_units -> combined_units DIVISION unit_expression','division_of_units',3,'p_division_of_units','cds.py',184),
('unit_expression -> unit_with_power','unit_expression',1,'p_unit_expression','cds.py',193),
('unit_expression -> OPEN_PAREN combined_units CLOSE_PAREN','unit_expression',3,'p_unit_expression','cds.py',194),
('factor -> signed_float X UINT signed_int','factor',4,'p_factor','cds.py',203),
('factor -> UINT X UINT signed_int','factor',4,'p_factor','cds.py',204),
('factor -> UINT signed_int','factor',2,'p_factor','cds.py',205),
('factor -> UINT','factor',1,'p_factor','cds.py',206),
('factor -> signed_float','factor',1,'p_factor','cds.py',207),
('unit_with_power -> UNIT numeric_power','unit_with_power',2,'p_unit_with_power','cds.py',222),
('unit_with_power -> UNIT','unit_with_power',1,'p_unit_with_power','cds.py',223),
('numeric_power -> sign UINT','numeric_power',2,'p_numeric_power','cds.py',232),
('sign -> SIGN','sign',1,'p_sign','cds.py',238),
('sign -> <empty>','sign',0,'p_sign','cds.py',239),
('signed_int -> SIGN UINT','signed_int',2,'p_signed_int','cds.py',248),
('signed_float -> sign UINT','signed_float',2,'p_signed_float','cds.py',254),
('signed_float -> sign UFLOAT','signed_float',2,'p_signed_float','cds.py',255),
]
|
a3b4ab090120d3d06dd86da152fa7ff015966e13ef976e5637fe0c35c9fbf6bc | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Handles the "FITS" unit format.
"""
import copy
import keyword
import numpy as np
from . import core, generic, utils
class Fits(generic.Generic):
"""
The FITS standard unit format.
This supports the format defined in the Units section of the `FITS
Standard <https://fits.gsfc.nasa.gov/fits_standard.html>`_.
"""
@staticmethod
def _generate_unit_names():
from astropy import units as u
# add some units up-front for which we don't want to use prefixes
# and that have different names from the astropy default.
names = {
"Celsius": u.deg_C,
"deg C": u.deg_C,
}
deprecated_names = set()
bases = [
"m", "g", "s", "rad", "sr", "K", "A", "mol", "cd",
"Hz", "J", "W", "V", "N", "Pa", "C", "Ohm", "S",
"F", "Wb", "T", "H", "lm", "lx", "a", "yr", "eV",
"pc", "Jy", "mag", "R", "bit", "byte", "G", "barn",
] # fmt: skip
deprecated_bases = []
prefixes = [
"y", "z", "a", "f", "p", "n", "u", "m", "c", "d",
"", "da", "h", "k", "M", "G", "T", "P", "E", "Z", "Y",
] # fmt: skip
special_cases = {"dbyte": u.Unit("dbyte", 0.1 * u.byte)}
for base in bases + deprecated_bases:
for prefix in prefixes:
key = prefix + base
if keyword.iskeyword(key):
continue
elif key in special_cases:
names[key] = special_cases[key]
else:
names[key] = getattr(u, key)
for base in deprecated_bases:
for prefix in prefixes:
deprecated_names.add(prefix + base)
simple_units = [
"deg", "arcmin", "arcsec", "mas", "min", "h", "d", "Ry",
"solMass", "u", "solLum", "solRad", "AU", "lyr", "count",
"ct", "photon", "ph", "pixel", "pix", "D", "Sun", "chan",
"bin", "voxel", "adu", "beam", "erg", "Angstrom", "angstrom",
] # fmt: skip
deprecated_units = []
for unit in simple_units + deprecated_units:
names[unit] = getattr(u, unit)
for unit in deprecated_units:
deprecated_names.add(unit)
return names, deprecated_names, []
@classmethod
def _validate_unit(cls, unit, detailed_exception=True):
if unit not in cls._units:
if detailed_exception:
raise ValueError(
f"Unit '{unit}' not supported by the FITS standard. "
+ utils.did_you_mean_units(
unit,
cls._units,
cls._deprecated_units,
cls._to_decomposed_alternative,
),
)
else:
raise ValueError()
if unit in cls._deprecated_units:
utils.unit_deprecation_warning(
unit, cls._units[unit], "FITS", cls._to_decomposed_alternative
)
@classmethod
def _parse_unit(cls, unit, detailed_exception=True):
cls._validate_unit(unit, detailed_exception=detailed_exception)
return cls._units[unit]
@classmethod
def _get_unit_name(cls, unit):
name = super()._get_unit_name(unit)
cls._validate_unit(name)
return name
@classmethod
def to_string(cls, unit, fraction=False):
# Remove units that aren't known to the format
unit = utils.decompose_to_known_units(unit, cls._get_unit_name)
parts = []
base = np.log10(unit.scale)
if base % 1.0 != 0.0:
raise core.UnitScaleError(
"The FITS unit format is not able to represent scales "
"that are not powers of 10. Multiply your data by "
f"{unit.scale:e}."
)
elif unit.scale != 1.0:
# We could override format_exponential_notation to set the
# scale factor but that would give the wrong impression that
# all values in FITS are set that way. So, instead do it
# here, and use a unity-scale unit for the rest.
parts.append(f"10**{int(base)}")
unit = core.CompositeUnit(1, unit.bases, unit.powers)
if unit.bases:
parts.append(super().to_string(unit, fraction=fraction))
return cls._scale_unit_separator.join(parts)
@classmethod
def _to_decomposed_alternative(cls, unit):
try:
s = cls.to_string(unit)
except core.UnitScaleError:
scale = unit.scale
unit = copy.copy(unit)
unit._scale = 1.0
return f"{cls.to_string(unit)} (with data multiplied by {scale})"
return s
@classmethod
def parse(cls, s, debug=False):
result = super().parse(s, debug)
if hasattr(result, "function_unit"):
raise ValueError("Function units are not yet supported for FITS units.")
return result
|
5e82eeec1fa1a6451d2a4a1962ced9ad1d1805f855fb18502468b2e632326e0a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# This module includes files automatically generated from ply (these end in
# _lextab.py and _parsetab.py). To generate these files, remove them from this
# folder, then build astropy and run the tests in-place:
#
# python setup.py build_ext --inplace
# pytest astropy/units
#
# You can then commit the changes to the re-generated _lextab.py and
# _parsetab.py files.
"""
Handles a "generic" string format for units
"""
import re
import unicodedata
import warnings
from fractions import Fraction
from astropy.utils import classproperty, deprecated, parsing
from astropy.utils.misc import did_you_mean
from . import core
from .base import Base
class Generic(Base):
"""
A "generic" format.
The syntax of the format is based directly on the FITS standard,
but instead of only supporting the units that FITS knows about, it
supports any unit available in the `astropy.units` namespace.
"""
_tokens = (
"COMMA",
"DOUBLE_STAR",
"STAR",
"PERIOD",
"SOLIDUS",
"CARET",
"OPEN_PAREN",
"CLOSE_PAREN",
"FUNCNAME",
"UNIT",
"SIGN",
"UINT",
"UFLOAT",
)
@classproperty(lazy=True)
def _all_units(cls):
return cls._generate_unit_names()
@classproperty(lazy=True)
def _units(cls):
return cls._all_units[0]
@classproperty(lazy=True)
def _deprecated_units(cls):
return cls._all_units[1]
@classproperty(lazy=True)
def _functions(cls):
return cls._all_units[2]
@classproperty(lazy=True)
def _parser(cls):
return cls._make_parser()
@classproperty(lazy=True)
def _lexer(cls):
return cls._make_lexer()
@classmethod
def _make_lexer(cls):
tokens = cls._tokens
t_COMMA = r"\,"
t_STAR = r"\*"
t_PERIOD = r"\."
t_SOLIDUS = r"/"
t_DOUBLE_STAR = r"\*\*"
t_CARET = r"\^"
t_OPEN_PAREN = r"\("
t_CLOSE_PAREN = r"\)"
# NOTE THE ORDERING OF THESE RULES IS IMPORTANT!!
# Regular expression rules for simple tokens
def t_UFLOAT(t):
r"((\d+\.?\d*)|(\.\d+))([eE][+-]?\d+)?"
if not re.search(r"[eE\.]", t.value):
t.type = "UINT"
t.value = int(t.value)
elif t.value.endswith("."):
t.type = "UINT"
t.value = int(t.value[:-1])
else:
t.value = float(t.value)
return t
def t_UINT(t):
r"\d+"
t.value = int(t.value)
return t
def t_SIGN(t):
r"[+-](?=\d)"
t.value = int(t.value + "1")
return t
# This needs to be a function so we can force it to happen
# before t_UNIT
def t_FUNCNAME(t):
r"((sqrt)|(ln)|(exp)|(log)|(mag)|(dB)|(dex))(?=\ *\()"
return t
def t_UNIT(t):
"%|([YZEPTGMkhdcmu\N{MICRO SIGN}npfazy]?'((?!\\d)\\w)+')|((?!\\d)\\w)+"
t.value = cls._get_unit(t)
return t
t_ignore = " "
# Error handling rule
def t_error(t):
raise ValueError(f"Invalid character at col {t.lexpos}")
return parsing.lex(
lextab="generic_lextab", package="astropy/units", reflags=int(re.UNICODE)
)
@classmethod
def _make_parser(cls):
"""
The grammar here is based on the description in the `FITS
standard
<http://fits.gsfc.nasa.gov/standard30/fits_standard30aa.pdf>`_,
Section 4.3, which is not terribly precise. The exact grammar
is here is based on the YACC grammar in the `unity library
<https://bitbucket.org/nxg/unity/>`_.
This same grammar is used by the `"fits"` and `"vounit"`
formats, the only difference being the set of available unit
strings.
"""
tokens = cls._tokens
def p_main(p):
"""
main : unit
| structured_unit
| structured_subunit
"""
if isinstance(p[1], tuple):
# Unpack possible StructuredUnit inside a tuple, ie.,
# ignore any set of very outer parentheses.
p[0] = p[1][0]
else:
p[0] = p[1]
def p_structured_subunit(p):
"""
structured_subunit : OPEN_PAREN structured_unit CLOSE_PAREN
"""
# We hide a structured unit enclosed by parentheses inside
# a tuple, so that we can easily distinguish units like
# "(au, au/day), yr" from "au, au/day, yr".
p[0] = (p[2],)
def p_structured_unit(p):
"""
structured_unit : subunit COMMA
| subunit COMMA subunit
"""
from astropy.units.structured import StructuredUnit
inputs = (p[1],) if len(p) == 3 else (p[1], p[3])
units = ()
for subunit in inputs:
if isinstance(subunit, tuple):
# Structured unit that should be its own entry in the
# new StructuredUnit (was enclosed in parentheses).
units += subunit
elif isinstance(subunit, StructuredUnit):
# Structured unit whose entries should be
# individiually added to the new StructuredUnit.
units += subunit.values()
else:
# Regular unit to be added to the StructuredUnit.
units += (subunit,)
p[0] = StructuredUnit(units)
def p_subunit(p):
"""
subunit : unit
| structured_unit
| structured_subunit
"""
p[0] = p[1]
def p_unit(p):
"""
unit : product_of_units
| factor product_of_units
| factor product product_of_units
| division_product_of_units
| factor division_product_of_units
| factor product division_product_of_units
| inverse_unit
| factor inverse_unit
| factor product inverse_unit
| factor
"""
from astropy.units.core import Unit
if len(p) == 2:
p[0] = Unit(p[1])
elif len(p) == 3:
p[0] = Unit(p[1] * p[2])
elif len(p) == 4:
p[0] = Unit(p[1] * p[3])
def p_division_product_of_units(p):
"""
division_product_of_units : division_product_of_units division product_of_units
| product_of_units
"""
from astropy.units.core import Unit
if len(p) == 4:
p[0] = Unit(p[1] / p[3])
else:
p[0] = p[1]
def p_inverse_unit(p):
"""
inverse_unit : division unit_expression
"""
p[0] = p[2] ** -1
def p_factor(p):
"""
factor : factor_fits
| factor_float
| factor_int
"""
p[0] = p[1]
def p_factor_float(p):
"""
factor_float : signed_float
| signed_float UINT signed_int
| signed_float UINT power numeric_power
"""
if cls.name == "fits":
raise ValueError("Numeric factor not supported by FITS")
if len(p) == 4:
p[0] = p[1] * p[2] ** float(p[3])
elif len(p) == 5:
p[0] = p[1] * p[2] ** float(p[4])
elif len(p) == 2:
p[0] = p[1]
def p_factor_int(p):
"""
factor_int : UINT
| UINT signed_int
| UINT power numeric_power
| UINT UINT signed_int
| UINT UINT power numeric_power
"""
if cls.name == "fits":
raise ValueError("Numeric factor not supported by FITS")
if len(p) == 2:
p[0] = p[1]
elif len(p) == 3:
p[0] = p[1] ** float(p[2])
elif len(p) == 4:
if isinstance(p[2], int):
p[0] = p[1] * p[2] ** float(p[3])
else:
p[0] = p[1] ** float(p[3])
elif len(p) == 5:
p[0] = p[1] * p[2] ** p[4]
def p_factor_fits(p):
"""
factor_fits : UINT power OPEN_PAREN signed_int CLOSE_PAREN
| UINT power OPEN_PAREN UINT CLOSE_PAREN
| UINT power signed_int
| UINT power UINT
| UINT SIGN UINT
| UINT OPEN_PAREN signed_int CLOSE_PAREN
"""
if p[1] != 10:
if cls.name == "fits":
raise ValueError("Base must be 10")
else:
return
if len(p) == 4:
if p[2] in ("**", "^"):
p[0] = 10 ** p[3]
else:
p[0] = 10 ** (p[2] * p[3])
elif len(p) == 5:
p[0] = 10 ** p[3]
elif len(p) == 6:
p[0] = 10 ** p[4]
def p_product_of_units(p):
"""
product_of_units : unit_expression product product_of_units
| unit_expression product_of_units
| unit_expression
"""
if len(p) == 2:
p[0] = p[1]
elif len(p) == 3:
p[0] = p[1] * p[2]
else:
p[0] = p[1] * p[3]
def p_unit_expression(p):
"""
unit_expression : function
| unit_with_power
| OPEN_PAREN product_of_units CLOSE_PAREN
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = p[2]
def p_unit_with_power(p):
"""
unit_with_power : UNIT power numeric_power
| UNIT numeric_power
| UNIT
"""
if len(p) == 2:
p[0] = p[1]
elif len(p) == 3:
p[0] = p[1] ** p[2]
else:
p[0] = p[1] ** p[3]
def p_numeric_power(p):
"""
numeric_power : sign UINT
| OPEN_PAREN paren_expr CLOSE_PAREN
"""
if len(p) == 3:
p[0] = p[1] * p[2]
elif len(p) == 4:
p[0] = p[2]
def p_paren_expr(p):
"""
paren_expr : sign UINT
| signed_float
| frac
"""
if len(p) == 3:
p[0] = p[1] * p[2]
else:
p[0] = p[1]
def p_frac(p):
"""
frac : sign UINT division sign UINT
"""
p[0] = Fraction(p[1] * p[2], p[4] * p[5])
def p_sign(p):
"""
sign : SIGN
|
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = 1
def p_product(p):
"""
product : STAR
| PERIOD
"""
pass
def p_division(p):
"""
division : SOLIDUS
"""
pass
def p_power(p):
"""
power : DOUBLE_STAR
| CARET
"""
p[0] = p[1]
def p_signed_int(p):
"""
signed_int : SIGN UINT
"""
p[0] = p[1] * p[2]
def p_signed_float(p):
"""
signed_float : sign UINT
| sign UFLOAT
"""
p[0] = p[1] * p[2]
def p_function_name(p):
"""
function_name : FUNCNAME
"""
p[0] = p[1]
def p_function(p):
"""
function : function_name OPEN_PAREN main CLOSE_PAREN
"""
if p[1] == "sqrt":
p[0] = p[3] ** 0.5
return
elif p[1] in ("mag", "dB", "dex"):
function_unit = cls._parse_unit(p[1])
# In Generic, this is callable, but that does not have to
# be the case in subclasses (e.g., in VOUnit it is not).
if callable(function_unit):
p[0] = function_unit(p[3])
return
raise ValueError(f"'{p[1]}' is not a recognized function")
def p_error(p):
raise ValueError()
return parsing.yacc(tabmodule="generic_parsetab", package="astropy/units")
@classmethod
def _get_unit(cls, t):
try:
return cls._parse_unit(t.value)
except ValueError as e:
registry = core.get_current_unit_registry()
if t.value in registry.aliases:
return registry.aliases[t.value]
raise ValueError(f"At col {t.lexpos}, {str(e)}")
@classmethod
def _parse_unit(cls, s, detailed_exception=True):
registry = core.get_current_unit_registry().registry
if s in cls._unit_symbols:
s = cls._unit_symbols[s]
elif not s.isascii():
if s[0] == "\N{MICRO SIGN}":
s = "u" + s[1:]
if s[-1] in cls._prefixable_unit_symbols:
s = s[:-1] + cls._prefixable_unit_symbols[s[-1]]
elif len(s) > 1 and s[-1] in cls._unit_suffix_symbols:
s = s[:-1] + cls._unit_suffix_symbols[s[-1]]
elif s.endswith("R\N{INFINITY}"):
s = s[:-2] + "Ry"
if s in registry:
return registry[s]
if detailed_exception:
raise ValueError(f"{s} is not a valid unit. {did_you_mean(s, registry)}")
else:
raise ValueError()
_unit_symbols = {
"%": "percent",
"\N{PRIME}": "arcmin",
"\N{DOUBLE PRIME}": "arcsec",
"\N{MODIFIER LETTER SMALL H}": "hourangle",
"e\N{SUPERSCRIPT MINUS}": "electron",
}
_prefixable_unit_symbols = {
"\N{GREEK CAPITAL LETTER OMEGA}": "Ohm",
"\N{LATIN CAPITAL LETTER A WITH RING ABOVE}": "Angstrom",
"\N{SCRIPT SMALL L}": "l",
}
_unit_suffix_symbols = {
"\N{CIRCLED DOT OPERATOR}": "sun",
"\N{SUN}": "sun",
"\N{CIRCLED PLUS}": "earth",
"\N{EARTH}": "earth",
"\N{JUPITER}": "jupiter",
"\N{LATIN SUBSCRIPT SMALL LETTER E}": "_e",
"\N{LATIN SUBSCRIPT SMALL LETTER P}": "_p",
}
_translations = str.maketrans(
{
"\N{GREEK SMALL LETTER MU}": "\N{MICRO SIGN}",
"\N{MINUS SIGN}": "-",
}
)
"""Character translations that should be applied before parsing a string.
Note that this does explicitly *not* generally translate MICRO SIGN to u,
since then a string like 'µ' would be interpreted as unit mass.
"""
_superscripts = (
"\N{SUPERSCRIPT MINUS}"
"\N{SUPERSCRIPT PLUS SIGN}"
"\N{SUPERSCRIPT ZERO}"
"\N{SUPERSCRIPT ONE}"
"\N{SUPERSCRIPT TWO}"
"\N{SUPERSCRIPT THREE}"
"\N{SUPERSCRIPT FOUR}"
"\N{SUPERSCRIPT FIVE}"
"\N{SUPERSCRIPT SIX}"
"\N{SUPERSCRIPT SEVEN}"
"\N{SUPERSCRIPT EIGHT}"
"\N{SUPERSCRIPT NINE}"
)
_superscript_translations = str.maketrans(_superscripts, "-+0123456789")
_regex_superscript = re.compile(f"[{_superscripts}]?[{_superscripts[2:]}]+")
_regex_deg = re.compile("°([CF])?")
@classmethod
def _convert_superscript(cls, m):
return f"({m.group().translate(cls._superscript_translations)})"
@classmethod
def _convert_deg(cls, m):
if len(m.string) == 1:
return "deg"
return m.string.replace("°", "deg_")
@classmethod
def parse(cls, s, debug=False):
if not isinstance(s, str):
s = s.decode("ascii")
elif not s.isascii():
# common normalization of unicode strings to avoid
# having to deal with multiple representations of
# the same character. This normalizes to "composed" form
# and will e.g. convert OHM SIGN to GREEK CAPITAL LETTER OMEGA
s = unicodedata.normalize("NFC", s)
# Translate some basic unicode items that we'd like to support on
# input but are not standard.
s = s.translate(cls._translations)
# TODO: might the below be better done in the parser/lexer?
# Translate superscripts to parenthesized numbers; this ensures
# that mixes of superscripts and regular numbers fail.
s = cls._regex_superscript.sub(cls._convert_superscript, s)
# Translate possible degrees.
s = cls._regex_deg.sub(cls._convert_deg, s)
result = cls._do_parse(s, debug=debug)
# Check for excess solidi, but exclude fractional exponents (accepted)
n_slashes = s.count("/")
if n_slashes > 1 and (n_slashes - len(re.findall(r"\(\d+/\d+\)", s))) > 1:
warnings.warn(
"'{}' contains multiple slashes, which is "
"discouraged by the FITS standard".format(s),
core.UnitsWarning,
)
return result
@classmethod
def _do_parse(cls, s, debug=False):
try:
# This is a short circuit for the case where the string
# is just a single unit name
return cls._parse_unit(s, detailed_exception=False)
except ValueError as e:
try:
return cls._parser.parse(s, lexer=cls._lexer, debug=debug)
except ValueError as e:
if str(e):
raise
else:
raise ValueError(f"Syntax error parsing unit '{s}'")
# 2023-02-18: The statement in the docstring is no longer true, the class is not used
# anywhere so can be safely removed in 6.0.
@deprecated("5.3", alternative="Generic")
class Unscaled(Generic):
"""
A format that doesn't display the scale part of the unit, other
than that, it is identical to the `Generic` format.
This is used in some error messages where the scale is irrelevant.
"""
@classmethod
def to_string(cls, unit):
if unit.scale != 1:
unit = core.Unit(unit / unit.scale)
return super().to_string(unit)
|
9456e35b7d78b28f8b4aaba78f2b0b129fd6b887db67004fd0b31f67e5ad051a | # Licensed under a 3-clause BSD style license. See LICENSE.rst except
# for parts explicitly labelled as being (largely) copies of numpy
# implementations; for those, see licenses/NUMPY_LICENSE.rst.
"""Helpers for overriding numpy functions.
We override numpy functions in `~astropy.units.Quantity.__array_function__`.
In this module, the numpy functions are split in four groups, each of
which has an associated `set` or `dict`:
1. SUBCLASS_SAFE_FUNCTIONS (set), if the numpy implementation
supports Quantity; we pass on to ndarray.__array_function__.
2. FUNCTION_HELPERS (dict), if the numpy implementation is usable
after converting quantities to arrays with suitable units,
and possibly setting units on the result.
3. DISPATCHED_FUNCTIONS (dict), if the function makes sense but
requires a Quantity-specific implementation
4. UNSUPPORTED_FUNCTIONS (set), if the function does not make sense.
For the FUNCTION_HELPERS `dict`, the value is a function that does the
unit conversion. It should take the same arguments as the numpy
function would (though one can use ``*args`` and ``**kwargs``) and
return a tuple of ``args, kwargs, unit, out``, where ``args`` and
``kwargs`` will be will be passed on to the numpy implementation,
``unit`` is a possible unit of the result (`None` if it should not be
converted to Quantity), and ``out`` is a possible output Quantity passed
in, which will be filled in-place.
For the DISPATCHED_FUNCTIONS `dict`, the value is a function that
implements the numpy functionality for Quantity input. It should
return a tuple of ``result, unit, out``, where ``result`` is generally
a plain array with the result, and ``unit`` and ``out`` are as above.
If unit is `None`, result gets returned directly, so one can also
return a Quantity directly using ``quantity_result, None, None``.
"""
import functools
import operator
import numpy as np
from numpy.lib import recfunctions as rfn
from astropy.units.core import (
UnitConversionError,
UnitsError,
UnitTypeError,
dimensionless_unscaled,
)
from astropy.utils import isiterable
from astropy.utils.compat import NUMPY_LT_1_22, NUMPY_LT_1_23
# In 1.17, overrides are enabled by default, but it is still possible to
# turn them off using an environment variable. We use getattr since it
# is planned to remove that possibility in later numpy versions.
ARRAY_FUNCTION_ENABLED = getattr(np.core.overrides, "ENABLE_ARRAY_FUNCTION", True)
SUBCLASS_SAFE_FUNCTIONS = set()
"""Functions with implementations supporting subclasses like Quantity."""
FUNCTION_HELPERS = {}
"""Functions with implementations usable with proper unit conversion."""
DISPATCHED_FUNCTIONS = {}
"""Functions for which we provide our own implementation."""
UNSUPPORTED_FUNCTIONS = set()
"""Functions that cannot sensibly be used with quantities."""
SUBCLASS_SAFE_FUNCTIONS |= {
np.shape, np.size, np.ndim,
np.reshape, np.ravel, np.moveaxis, np.rollaxis, np.swapaxes,
np.transpose, np.atleast_1d, np.atleast_2d, np.atleast_3d,
np.expand_dims, np.squeeze, np.broadcast_to, np.broadcast_arrays,
np.flip, np.fliplr, np.flipud, np.rot90,
np.argmin, np.argmax, np.argsort, np.lexsort, np.searchsorted,
np.nonzero, np.argwhere, np.flatnonzero,
np.diag_indices_from, np.triu_indices_from, np.tril_indices_from,
np.real, np.imag, np.diagonal, np.diagflat, np.empty_like,
np.compress, np.extract, np.delete, np.trim_zeros, np.roll, np.take,
np.put, np.fill_diagonal, np.tile, np.repeat,
np.split, np.array_split, np.hsplit, np.vsplit, np.dsplit,
np.stack, np.column_stack, np.hstack, np.vstack, np.dstack,
np.max, np.min, np.amax, np.amin, np.ptp, np.sum, np.cumsum,
np.prod, np.product, np.cumprod, np.cumproduct,
np.round, np.around,
np.round_, # Alias for np.round in NUMPY_LT_1_25, but deprecated since.
np.fix, np.angle, np.i0, np.clip,
np.isposinf, np.isneginf, np.isreal, np.iscomplex,
np.average, np.mean, np.std, np.var, np.trace,
np.nanmax, np.nanmin, np.nanargmin, np.nanargmax, np.nanmean,
np.nanmedian, np.nansum, np.nancumsum, np.nanstd, np.nanvar,
np.nanprod, np.nancumprod,
np.einsum_path, np.trapz, np.linspace,
np.sort, np.msort, np.partition, np.meshgrid,
np.common_type, np.result_type, np.can_cast, np.min_scalar_type,
np.iscomplexobj, np.isrealobj,
np.shares_memory, np.may_share_memory,
np.apply_along_axis, np.take_along_axis, np.put_along_axis,
np.linalg.cond, np.linalg.multi_dot,
} # fmt: skip
if not NUMPY_LT_1_22:
SUBCLASS_SAFE_FUNCTIONS |= {np.median}
# Implemented as methods on Quantity:
# np.ediff1d is from setops, but we support it anyway; the others
# currently return NotImplementedError.
# TODO: move latter to UNSUPPORTED? Would raise TypeError instead.
SUBCLASS_SAFE_FUNCTIONS |= {np.ediff1d}
UNSUPPORTED_FUNCTIONS |= {
np.packbits, np.unpackbits, np.unravel_index,
np.ravel_multi_index, np.ix_, np.cov, np.corrcoef,
np.busday_count, np.busday_offset, np.datetime_as_string,
np.is_busday, np.all, np.any, np.sometrue, np.alltrue,
} # fmt: skip
# Could be supported if we had a natural logarithm unit.
UNSUPPORTED_FUNCTIONS |= {np.linalg.slogdet}
TBD_FUNCTIONS = {
rfn.drop_fields, rfn.rename_fields, rfn.append_fields, rfn.join_by,
rfn.apply_along_fields, rfn.assign_fields_by_name,
rfn.find_duplicates, rfn.recursive_fill_fields, rfn.require_fields,
rfn.repack_fields, rfn.stack_arrays,
} # fmt: skip
UNSUPPORTED_FUNCTIONS |= TBD_FUNCTIONS
IGNORED_FUNCTIONS = {
# I/O - useless for Quantity, since no way to store the unit.
np.save, np.savez, np.savetxt, np.savez_compressed,
# Polynomials
np.poly, np.polyadd, np.polyder, np.polydiv, np.polyfit, np.polyint,
np.polymul, np.polysub, np.polyval, np.roots, np.vander,
# functions taking record arrays (which are deprecated)
rfn.rec_append_fields, rfn.rec_drop_fields, rfn.rec_join,
} # fmt: skip
if NUMPY_LT_1_23:
IGNORED_FUNCTIONS |= {
# Deprecated, removed in numpy 1.23
np.asscalar,
np.alen,
}
UNSUPPORTED_FUNCTIONS |= IGNORED_FUNCTIONS
class FunctionAssigner:
def __init__(self, assignments):
self.assignments = assignments
def __call__(self, f=None, helps=None, module=np):
"""Add a helper to a numpy function.
Normally used as a decorator.
If ``helps`` is given, it should be the numpy function helped (or an
iterable of numpy functions helped).
If ``helps`` is not given, it is assumed the function helped is the
numpy function with the same name as the decorated function.
"""
if f is not None:
if helps is None:
helps = getattr(module, f.__name__)
if not isiterable(helps):
helps = (helps,)
for h in helps:
self.assignments[h] = f
return f
elif helps is not None or module is not np:
return functools.partial(self.__call__, helps=helps, module=module)
else: # pragma: no cover
raise ValueError("function_helper requires at least one argument.")
function_helper = FunctionAssigner(FUNCTION_HELPERS)
dispatched_function = FunctionAssigner(DISPATCHED_FUNCTIONS)
# fmt: off
@function_helper(
helps={
np.copy, np.asfarray, np.real_if_close, np.sort_complex, np.resize,
np.fft.fft, np.fft.ifft, np.fft.rfft, np.fft.irfft,
np.fft.fft2, np.fft.ifft2, np.fft.rfft2, np.fft.irfft2,
np.fft.fftn, np.fft.ifftn, np.fft.rfftn, np.fft.irfftn,
np.fft.hfft, np.fft.ihfft,
np.linalg.eigvals, np.linalg.eigvalsh,
}
)
# fmt: on
def invariant_a_helper(a, *args, **kwargs):
return (a.view(np.ndarray),) + args, kwargs, a.unit, None
@function_helper(helps={np.tril, np.triu})
def invariant_m_helper(m, *args, **kwargs):
return (m.view(np.ndarray),) + args, kwargs, m.unit, None
@function_helper(helps={np.fft.fftshift, np.fft.ifftshift})
def invariant_x_helper(x, *args, **kwargs):
return (x.view(np.ndarray),) + args, kwargs, x.unit, None
# Note that ones_like does *not* work by default since if one creates an empty
# array with a unit, one cannot just fill it with unity. Indeed, in this
# respect, it is a bit of an odd function for Quantity. On the other hand, it
# matches the idea that a unit is the same as the quantity with that unit and
# value of 1. Also, it used to work without __array_function__.
# zeros_like does work by default for regular quantities, because numpy first
# creates an empty array with the unit and then fills it with 0 (which can have
# any unit), but for structured dtype this fails (0 cannot have an arbitrary
# structured unit), so we include it here too.
@function_helper(helps={np.ones_like, np.zeros_like})
def like_helper(a, *args, **kwargs):
subok = args[2] if len(args) > 2 else kwargs.pop("subok", True)
unit = a.unit if subok else None
return (a.view(np.ndarray),) + args, kwargs, unit, None
@function_helper
def sinc(x):
from astropy.units.si import radian
try:
x = x.to_value(radian)
except UnitsError:
raise UnitTypeError(
"Can only apply 'sinc' function to quantities with angle units"
)
return (x,), {}, dimensionless_unscaled, None
@dispatched_function
def unwrap(p, discont=None, axis=-1):
from astropy.units.si import radian
if discont is None:
discont = np.pi << radian
p, discont = _as_quantities(p, discont)
result = np.unwrap.__wrapped__(
p.to_value(radian), discont.to_value(radian), axis=axis
)
result = radian.to(p.unit, result)
return result, p.unit, None
@function_helper
def argpartition(a, *args, **kwargs):
return (a.view(np.ndarray),) + args, kwargs, None, None
@function_helper
def full_like(a, fill_value, *args, **kwargs):
unit = a.unit if kwargs.get("subok", True) else None
return (a.view(np.ndarray), a._to_own_unit(fill_value)) + args, kwargs, unit, None
@function_helper
def putmask(a, mask, values):
from astropy.units import Quantity
if isinstance(a, Quantity):
return (a.view(np.ndarray), mask, a._to_own_unit(values)), {}, a.unit, None
elif isinstance(values, Quantity):
return (a, mask, values.to_value(dimensionless_unscaled)), {}, None, None
else:
raise NotImplementedError
@function_helper
def place(arr, mask, vals):
from astropy.units import Quantity
if isinstance(arr, Quantity):
return (arr.view(np.ndarray), mask, arr._to_own_unit(vals)), {}, arr.unit, None
elif isinstance(vals, Quantity):
return (arr, mask, vals.to_value(dimensionless_unscaled)), {}, None, None
else:
raise NotImplementedError
@function_helper
def copyto(dst, src, *args, **kwargs):
from astropy.units import Quantity
if isinstance(dst, Quantity):
return (dst.view(np.ndarray), dst._to_own_unit(src)) + args, kwargs, None, None
elif isinstance(src, Quantity):
return (dst, src.to_value(dimensionless_unscaled)) + args, kwargs, None, None
else:
raise NotImplementedError
@function_helper
def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None):
nan = x._to_own_unit(nan)
if posinf is not None:
posinf = x._to_own_unit(posinf)
if neginf is not None:
neginf = x._to_own_unit(neginf)
return (
(x.view(np.ndarray),),
dict(copy=True, nan=nan, posinf=posinf, neginf=neginf),
x.unit,
None,
)
def _as_quantity(a):
"""Convert argument to a Quantity (or raise NotImplementedError)."""
from astropy.units import Quantity
try:
return Quantity(a, copy=False, subok=True)
except Exception:
# If we cannot convert to Quantity, we should just bail.
raise NotImplementedError
def _as_quantities(*args):
"""Convert arguments to Quantity (or raise NotImplentedError)."""
from astropy.units import Quantity
try:
# Note: this should keep the dtype the same
return tuple(Quantity(a, copy=False, subok=True, dtype=None) for a in args)
except Exception:
# If we cannot convert to Quantity, we should just bail.
raise NotImplementedError
def _quantities2arrays(*args, unit_from_first=False):
"""Convert to arrays in units of the first argument that has a unit.
If unit_from_first, take the unit of the first argument regardless
whether it actually defined a unit (e.g., dimensionless for arrays).
"""
# Turn first argument into a quantity.
q = _as_quantity(args[0])
if len(args) == 1:
return (q.value,), q.unit
# If we care about the unit being explicit, then check whether this
# argument actually had a unit, or was likely inferred.
if not unit_from_first and (
q.unit is q._default_unit and not hasattr(args[0], "unit")
):
# Here, the argument could still be things like [10*u.one, 11.*u.one]),
# i.e., properly dimensionless. So, we only override with anything
# that has a unit not equivalent to dimensionless (fine to ignore other
# dimensionless units pass, even if explicitly given).
for arg in args[1:]:
trial = _as_quantity(arg)
if not trial.unit.is_equivalent(q.unit):
# Use any explicit unit not equivalent to dimensionless.
q = trial
break
# We use the private _to_own_unit method here instead of just
# converting everything to quantity and then do .to_value(qs0.unit)
# as we want to allow arbitrary unit for 0, inf, and nan.
try:
arrays = tuple((q._to_own_unit(arg)) for arg in args)
except TypeError:
raise NotImplementedError
return arrays, q.unit
def _iterable_helper(*args, out=None, **kwargs):
"""Convert arguments to Quantity, and treat possible 'out'."""
from astropy.units import Quantity
if out is not None:
if isinstance(out, Quantity):
kwargs["out"] = out.view(np.ndarray)
else:
# TODO: for an ndarray output, we could in principle
# try converting all Quantity to dimensionless.
raise NotImplementedError
arrays, unit = _quantities2arrays(*args)
return arrays, kwargs, unit, out
if NUMPY_LT_1_22:
@function_helper
def median(a, axis=None, out=None, overwrite_input=False, keepdims=False):
kwargs = {"overwrite_input": overwrite_input, "keepdims": keepdims}
if out is not None:
from astropy.units import Quantity
if not isinstance(out, Quantity):
raise NotImplementedError
# We may get here just because of out, so ensure input is Quantity.
a = _as_quantity(a)
kwargs["out"] = out.view(np.ndarray)
return (a.value, axis), kwargs, a.unit, out
@function_helper
def concatenate(arrays, axis=0, out=None, **kwargs):
# TODO: make this smarter by creating an appropriately shaped
# empty output array and just filling it.
arrays, kwargs, unit, out = _iterable_helper(*arrays, out=out, axis=axis, **kwargs)
return (arrays,), kwargs, unit, out
@dispatched_function
def block(arrays):
# We need to override block since the numpy implementation can take two
# different paths, one for concatenation, one for creating a large empty
# result array in which parts are set. Each assumes array input and
# cannot be used directly. Since it would be very costly to inspect all
# arrays and then turn them back into a nested list, we just copy here the
# second implementation, np.core.shape_base._block_slicing, since it is
# shortest and easiest.
(arrays, list_ndim, result_ndim, final_size) = np.core.shape_base._block_setup(
arrays
)
shape, slices, arrays = np.core.shape_base._block_info_recursion(
arrays, list_ndim, result_ndim
)
# Here, one line of difference!
arrays, unit = _quantities2arrays(*arrays)
# Back to _block_slicing
dtype = np.result_type(*[arr.dtype for arr in arrays])
F_order = all(arr.flags["F_CONTIGUOUS"] for arr in arrays)
C_order = all(arr.flags["C_CONTIGUOUS"] for arr in arrays)
order = "F" if F_order and not C_order else "C"
result = np.empty(shape=shape, dtype=dtype, order=order)
for the_slice, arr in zip(slices, arrays):
result[(Ellipsis,) + the_slice] = arr
return result, unit, None
@function_helper
def choose(a, choices, out=None, **kwargs):
choices, kwargs, unit, out = _iterable_helper(*choices, out=out, **kwargs)
return (a, choices), kwargs, unit, out
@function_helper
def select(condlist, choicelist, default=0):
choicelist, kwargs, unit, out = _iterable_helper(*choicelist)
if default != 0:
default = (1 * unit)._to_own_unit(default)
return (condlist, choicelist, default), kwargs, unit, out
@dispatched_function
def piecewise(x, condlist, funclist, *args, **kw):
from astropy.units import Quantity
# Copied implementation from numpy.lib.function_base.piecewise,
# taking care of units of function outputs.
n2 = len(funclist)
# undocumented: single condition is promoted to a list of one condition
if np.isscalar(condlist) or (
not isinstance(condlist[0], (list, np.ndarray)) and x.ndim != 0
):
condlist = [condlist]
if any(isinstance(c, Quantity) for c in condlist):
raise NotImplementedError
condlist = np.array(condlist, dtype=bool)
n = len(condlist)
if n == n2 - 1: # compute the "otherwise" condition.
condelse = ~np.any(condlist, axis=0, keepdims=True)
condlist = np.concatenate([condlist, condelse], axis=0)
n += 1
elif n != n2:
raise ValueError(
f"with {n} condition(s), either {n} or {n + 1} functions are expected"
)
y = np.zeros(x.shape, x.dtype)
where = []
what = []
for k in range(n):
item = funclist[k]
if not callable(item):
where.append(condlist[k])
what.append(item)
else:
vals = x[condlist[k]]
if vals.size > 0:
where.append(condlist[k])
what.append(item(vals, *args, **kw))
what, unit = _quantities2arrays(*what)
for item, value in zip(where, what):
y[item] = value
return y, unit, None
@function_helper
def append(arr, values, *args, **kwargs):
arrays, unit = _quantities2arrays(arr, values, unit_from_first=True)
return arrays + args, kwargs, unit, None
@function_helper
def insert(arr, obj, values, *args, **kwargs):
from astropy.units import Quantity
if isinstance(obj, Quantity):
raise NotImplementedError
(arr, values), unit = _quantities2arrays(arr, values, unit_from_first=True)
return (arr, obj, values) + args, kwargs, unit, None
@function_helper
def pad(array, pad_width, mode="constant", **kwargs):
# pad dispatches only on array, so that must be a Quantity.
for key in "constant_values", "end_values":
value = kwargs.pop(key, None)
if value is None:
continue
if not isinstance(value, tuple):
value = (value,)
new_value = []
for v in value:
new_value.append(
tuple(array._to_own_unit(_v) for _v in v)
if isinstance(v, tuple)
else array._to_own_unit(v)
)
kwargs[key] = new_value
return (array.view(np.ndarray), pad_width, mode), kwargs, array.unit, None
@function_helper
def where(condition, *args):
from astropy.units import Quantity
if isinstance(condition, Quantity) or len(args) != 2:
raise NotImplementedError
args, unit = _quantities2arrays(*args)
return (condition,) + args, {}, unit, None
@function_helper(helps=({np.quantile, np.nanquantile}))
def quantile(a, q, *args, _q_unit=dimensionless_unscaled, **kwargs):
if len(args) >= 2:
out = args[1]
args = args[:1] + args[2:]
else:
out = kwargs.pop("out", None)
from astropy.units import Quantity
if isinstance(q, Quantity):
q = q.to_value(_q_unit)
(a,), kwargs, unit, out = _iterable_helper(a, out=out, **kwargs)
return (a, q) + args, kwargs, unit, out
@function_helper(helps={np.percentile, np.nanpercentile})
def percentile(a, q, *args, **kwargs):
from astropy.units import percent
return quantile(a, q, *args, _q_unit=percent, **kwargs)
@function_helper
def count_nonzero(a, *args, **kwargs):
return (a.value,) + args, kwargs, None, None
@function_helper(helps={np.isclose, np.allclose})
def close(a, b, rtol=1e-05, atol=1e-08, *args, **kwargs):
from astropy.units import Quantity
(a, b), unit = _quantities2arrays(a, b, unit_from_first=True)
# Allow number without a unit as having the unit.
atol = Quantity(atol, unit).value
return (a, b, rtol, atol) + args, kwargs, None, None
@dispatched_function
def array_equal(a1, a2, equal_nan=False):
try:
args, unit = _quantities2arrays(a1, a2)
except UnitConversionError:
return False, None, None
return np.array_equal(*args, equal_nan=equal_nan), None, None
@dispatched_function
def array_equiv(a1, a2):
try:
args, unit = _quantities2arrays(a1, a2)
except UnitConversionError:
return False, None, None
return np.array_equiv(*args), None, None
@function_helper(helps={np.dot, np.outer})
def dot_like(a, b, out=None):
from astropy.units import Quantity
a, b = _as_quantities(a, b)
unit = a.unit * b.unit
if out is not None:
if not isinstance(out, Quantity):
raise NotImplementedError
return tuple(x.view(np.ndarray) for x in (a, b, out)), {}, unit, out
else:
return (a.view(np.ndarray), b.view(np.ndarray)), {}, unit, None
@function_helper(
helps={
np.cross,
np.inner,
np.vdot,
np.tensordot,
np.kron,
np.correlate,
np.convolve,
}
)
def cross_like(a, b, *args, **kwargs):
a, b = _as_quantities(a, b)
unit = a.unit * b.unit
return (a.view(np.ndarray), b.view(np.ndarray)) + args, kwargs, unit, None
@function_helper
def einsum(subscripts, *operands, out=None, **kwargs):
from astropy.units import Quantity
if not isinstance(subscripts, str):
raise ValueError('only "subscripts" string mode supported for einsum.')
if out is not None:
if not isinstance(out, Quantity):
raise NotImplementedError
else:
kwargs["out"] = out.view(np.ndarray)
qs = _as_quantities(*operands)
unit = functools.reduce(operator.mul, (q.unit for q in qs), dimensionless_unscaled)
arrays = tuple(q.view(np.ndarray) for q in qs)
return (subscripts,) + arrays, kwargs, unit, out
@function_helper
def bincount(x, weights=None, minlength=0):
from astropy.units import Quantity
if isinstance(x, Quantity):
raise NotImplementedError
return (x, weights.value, minlength), {}, weights.unit, None
@function_helper
def digitize(x, bins, *args, **kwargs):
arrays, unit = _quantities2arrays(x, bins, unit_from_first=True)
return arrays + args, kwargs, None, None
def _check_bins(bins, unit):
from astropy.units import Quantity
check = _as_quantity(bins)
if check.ndim > 0:
return check.to_value(unit)
elif isinstance(bins, Quantity):
# bins should be an integer (or at least definitely not a Quantity).
raise NotImplementedError
else:
return bins
def _check_range(range, unit):
range = _as_quantity(range)
range = range.to_value(unit)
return range
@function_helper
def histogram(a, bins=10, range=None, weights=None, density=None):
if weights is not None:
weights = _as_quantity(weights)
unit = weights.unit
weights = weights.value
else:
unit = None
a = _as_quantity(a)
if not isinstance(bins, str):
bins = _check_bins(bins, a.unit)
if range is not None:
range = _check_range(range, a.unit)
if density:
unit = (unit or 1) / a.unit
return (
(a.value, bins, range),
{"weights": weights, "density": density},
(unit, a.unit),
None,
)
@function_helper(helps=np.histogram_bin_edges)
def histogram_bin_edges(a, bins=10, range=None, weights=None):
# weights is currently unused
a = _as_quantity(a)
if not isinstance(bins, str):
bins = _check_bins(bins, a.unit)
if range is not None:
range = _check_range(range, a.unit)
return (a.value, bins, range, weights), {}, a.unit, None
@function_helper
def histogram2d(x, y, bins=10, range=None, weights=None, density=None):
from astropy.units import Quantity
if weights is not None:
weights = _as_quantity(weights)
unit = weights.unit
weights = weights.value
else:
unit = None
x, y = _as_quantities(x, y)
try:
n = len(bins)
except TypeError:
# bins should be an integer (or at least definitely not a Quantity).
if isinstance(bins, Quantity):
raise NotImplementedError
else:
if n == 1:
raise NotImplementedError
elif n == 2 and not isinstance(bins, Quantity):
bins = [_check_bins(b, unit) for (b, unit) in zip(bins, (x.unit, y.unit))]
else:
bins = _check_bins(bins, x.unit)
y = y.to(x.unit)
if range is not None:
range = tuple(
_check_range(r, unit) for (r, unit) in zip(range, (x.unit, y.unit))
)
if density:
unit = (unit or 1) / x.unit / y.unit
return (
(x.value, y.value, bins, range),
{"weights": weights, "density": density},
(unit, x.unit, y.unit),
None,
)
@function_helper
def histogramdd(sample, bins=10, range=None, weights=None, density=None):
if weights is not None:
weights = _as_quantity(weights)
unit = weights.unit
weights = weights.value
else:
unit = None
try:
# Sample is an ND-array.
_, D = sample.shape
except (AttributeError, ValueError):
# Sample is a sequence of 1D arrays.
sample = _as_quantities(*sample)
sample_units = [s.unit for s in sample]
sample = [s.value for s in sample]
D = len(sample)
else:
sample = _as_quantity(sample)
sample_units = [sample.unit] * D
try:
M = len(bins)
except TypeError:
# bins should be an integer
from astropy.units import Quantity
if isinstance(bins, Quantity):
raise NotImplementedError
else:
if M != D:
raise ValueError(
"The dimension of bins must be equal to the dimension of the sample x."
)
bins = [_check_bins(b, unit) for (b, unit) in zip(bins, sample_units)]
if range is not None:
range = tuple(_check_range(r, unit) for (r, unit) in zip(range, sample_units))
if density:
unit = functools.reduce(operator.truediv, sample_units, (unit or 1))
return (
(sample, bins, range),
{"weights": weights, "density": density},
(unit, sample_units),
None,
)
@function_helper
def diff(a, n=1, axis=-1, prepend=np._NoValue, append=np._NoValue):
a = _as_quantity(a)
if prepend is not np._NoValue:
prepend = _as_quantity(prepend).to_value(a.unit)
if append is not np._NoValue:
append = _as_quantity(append).to_value(a.unit)
return (a.value, n, axis, prepend, append), {}, a.unit, None
@function_helper
def gradient(f, *varargs, **kwargs):
f = _as_quantity(f)
axis = kwargs.get("axis", None)
if axis is None:
n_axis = f.ndim
elif isinstance(axis, tuple):
n_axis = len(axis)
else:
n_axis = 1
if varargs:
varargs = _as_quantities(*varargs)
if len(varargs) == 1 and n_axis > 1:
varargs = varargs * n_axis
if varargs:
units = [f.unit / q.unit for q in varargs]
varargs = tuple(q.value for q in varargs)
else:
units = [f.unit] * n_axis
if len(units) == 1:
units = units[0]
return (f.value,) + varargs, kwargs, units, None
@function_helper
def logspace(start, stop, *args, **kwargs):
from astropy.units import LogQuantity, dex
if not isinstance(start, LogQuantity) or not isinstance(stop, LogQuantity):
raise NotImplementedError
# Get unit from end point as for linspace.
stop = stop.to(dex(stop.unit.physical_unit))
start = start.to(stop.unit)
unit = stop.unit.physical_unit
return (start.value, stop.value) + args, kwargs, unit, None
@function_helper
def geomspace(start, stop, *args, **kwargs):
# Get unit from end point as for linspace.
(stop, start), unit = _quantities2arrays(stop, start)
return (start, stop) + args, kwargs, unit, None
@function_helper
def interp(x, xp, fp, *args, **kwargs):
from astropy.units import Quantity
(x, xp), _ = _quantities2arrays(x, xp)
if isinstance(fp, Quantity):
unit = fp.unit
fp = fp.value
else:
unit = None
return (x, xp, fp) + args, kwargs, unit, None
@function_helper
def unique(
ar, return_index=False, return_inverse=False, return_counts=False, axis=None
):
unit = ar.unit
n_index = sum(bool(i) for i in (return_index, return_inverse, return_counts))
if n_index:
unit = [unit] + n_index * [None]
return (ar.value, return_index, return_inverse, return_counts, axis), {}, unit, None
@function_helper
def intersect1d(ar1, ar2, assume_unique=False, return_indices=False):
(ar1, ar2), unit = _quantities2arrays(ar1, ar2)
if return_indices:
unit = [unit, None, None]
return (ar1, ar2, assume_unique, return_indices), {}, unit, None
@function_helper(helps=(np.setxor1d, np.union1d, np.setdiff1d))
def twosetop(ar1, ar2, *args, **kwargs):
(ar1, ar2), unit = _quantities2arrays(ar1, ar2)
return (ar1, ar2) + args, kwargs, unit, None
@function_helper(helps=(np.isin, np.in1d))
def setcheckop(ar1, ar2, *args, **kwargs):
# This tests whether ar1 is in ar2, so we should change the unit of
# a1 to that of a2.
(ar2, ar1), unit = _quantities2arrays(ar2, ar1)
return (ar1, ar2) + args, kwargs, None, None
@dispatched_function
def apply_over_axes(func, a, axes):
# Copied straight from numpy/lib/shape_base, just to omit its
# val = asarray(a); if only it had been asanyarray, or just not there
# since a is assumed to an an array in the next line...
# Which is what we do here - we can only get here if it is a Quantity.
val = a
N = a.ndim
if np.array(axes).ndim == 0:
axes = (axes,)
for axis in axes:
if axis < 0:
axis = N + axis
args = (val, axis)
res = func(*args)
if res.ndim == val.ndim:
val = res
else:
res = np.expand_dims(res, axis)
if res.ndim == val.ndim:
val = res
else:
raise ValueError(
"function is not returning an array of the correct shape"
)
# Returning unit is None to signal nothing should happen to
# the output.
return val, None, None
@dispatched_function
def array_repr(arr, *args, **kwargs):
# TODO: The addition of "unit='...'" doesn't worry about line
# length. Could copy & adapt _array_repr_implementation from
# numpy.core.arrayprint.py
cls_name = arr.__class__.__name__
fake_name = "_" * len(cls_name)
fake_cls = type(fake_name, (np.ndarray,), {})
no_unit = np.array_repr(arr.view(fake_cls), *args, **kwargs).replace(
fake_name, cls_name
)
unit_part = f"unit='{arr.unit}'"
pre, dtype, post = no_unit.rpartition("dtype")
if dtype:
return f"{pre}{unit_part}, {dtype}{post}", None, None
else:
return f"{no_unit[:-1]}, {unit_part})", None, None
@dispatched_function
def array_str(arr, *args, **kwargs):
# TODO: The addition of the unit doesn't worry about line length.
# Could copy & adapt _array_repr_implementation from
# numpy.core.arrayprint.py
no_unit = np.array_str(arr.value, *args, **kwargs)
return no_unit + arr._unitstr, None, None
@function_helper
def array2string(a, *args, **kwargs):
# array2string breaks on quantities as it tries to turn individual
# items into float, which works only for dimensionless. Since the
# defaults would not keep any unit anyway, this is rather pointless -
# we're better off just passing on the array view. However, one can
# also work around this by passing on a formatter (as is done in Angle).
# So, we do nothing if the formatter argument is present and has the
# relevant formatter for our dtype.
formatter = args[6] if len(args) >= 7 else kwargs.get("formatter", None)
if formatter is None:
a = a.value
else:
# See whether it covers our dtype.
from numpy.core.arrayprint import _get_format_function
with np.printoptions(formatter=formatter) as options:
try:
ff = _get_format_function(a.value, **options)
except Exception:
# Shouldn't happen, but possibly we're just not being smart
# enough, so let's pass things on as is.
pass
else:
# If the selected format function is that of numpy, we know
# things will fail
if "numpy" in ff.__module__:
a = a.value
return (a,) + args, kwargs, None, None
@function_helper
def diag(v, *args, **kwargs):
# Function works for *getting* the diagonal, but not *setting*.
# So, override always.
return (v.value,) + args, kwargs, v.unit, None
@function_helper(module=np.linalg)
def svd(a, full_matrices=True, compute_uv=True, hermitian=False):
unit = a.unit
if compute_uv:
unit = (None, unit, None)
return ((a.view(np.ndarray), full_matrices, compute_uv, hermitian), {}, unit, None)
def _interpret_tol(tol, unit):
from astropy.units import Quantity
return Quantity(tol, unit).value
@function_helper(module=np.linalg)
def matrix_rank(M, tol=None, *args, **kwargs):
if tol is not None:
tol = _interpret_tol(tol, M.unit)
return (M.view(np.ndarray), tol) + args, kwargs, None, None
@function_helper(helps={np.linalg.inv, np.linalg.tensorinv})
def inv(a, *args, **kwargs):
return (a.view(np.ndarray),) + args, kwargs, 1 / a.unit, None
@function_helper(module=np.linalg)
def pinv(a, rcond=1e-15, *args, **kwargs):
rcond = _interpret_tol(rcond, a.unit)
return (a.view(np.ndarray), rcond) + args, kwargs, 1 / a.unit, None
@function_helper(module=np.linalg)
def det(a):
return (a.view(np.ndarray),), {}, a.unit ** a.shape[-1], None
@function_helper(helps={np.linalg.solve, np.linalg.tensorsolve})
def solve(a, b, *args, **kwargs):
a, b = _as_quantities(a, b)
return (
(a.view(np.ndarray), b.view(np.ndarray)) + args,
kwargs,
b.unit / a.unit,
None,
)
@function_helper(module=np.linalg)
def lstsq(a, b, rcond="warn"):
a, b = _as_quantities(a, b)
if rcond not in (None, "warn", -1):
rcond = _interpret_tol(rcond, a.unit)
return (
(a.view(np.ndarray), b.view(np.ndarray), rcond),
{},
(b.unit / a.unit, b.unit**2, None, a.unit),
None,
)
@function_helper(module=np.linalg)
def norm(x, ord=None, *args, **kwargs):
if ord == 0:
from astropy.units import dimensionless_unscaled
unit = dimensionless_unscaled
else:
unit = x.unit
return (x.view(np.ndarray), ord) + args, kwargs, unit, None
@function_helper(module=np.linalg)
def matrix_power(a, n):
return (a.value, n), {}, a.unit**n, None
@function_helper(module=np.linalg)
def cholesky(a):
return (a.value,), {}, a.unit**0.5, None
@function_helper(module=np.linalg)
def qr(a, mode="reduced"):
if mode.startswith("e"):
units = None
elif mode == "r":
units = a.unit
else:
from astropy.units import dimensionless_unscaled
units = (dimensionless_unscaled, a.unit)
return (a.value, mode), {}, units, None
@function_helper(helps={np.linalg.eig, np.linalg.eigh})
def eig(a, *args, **kwargs):
from astropy.units import dimensionless_unscaled
return (a.value,) + args, kwargs, (a.unit, dimensionless_unscaled), None
# ======================= np.lib.recfunctions =======================
@function_helper(module=np.lib.recfunctions)
def structured_to_unstructured(arr, *args, **kwargs):
"""
Convert a structured quantity to an unstructured one.
This only works if all the units are compatible.
"""
from astropy.units import StructuredUnit
target_unit = arr.unit.values()[0]
def replace_unit(x):
if isinstance(x, StructuredUnit):
return x._recursively_apply(replace_unit)
else:
return target_unit
to_unit = arr.unit._recursively_apply(replace_unit)
return (arr.to_value(to_unit),) + args, kwargs, target_unit, None
def _build_structured_unit(dtype, unit):
"""Build structured unit from dtype.
Parameters
----------
dtype : `numpy.dtype`
unit : `astropy.units.Unit`
Returns
-------
`astropy.units.Unit` or tuple
"""
if dtype.fields is None:
return unit
return tuple(_build_structured_unit(v[0], unit) for v in dtype.fields.values())
@function_helper(module=np.lib.recfunctions)
def unstructured_to_structured(arr, dtype, *args, **kwargs):
from astropy.units import StructuredUnit
target_unit = StructuredUnit(_build_structured_unit(dtype, arr.unit))
return (arr.to_value(arr.unit), dtype) + args, kwargs, target_unit, None
def _izip_units_flat(iterable):
"""Returns an iterator of collapsing any nested unit structure.
Parameters
----------
iterable : Iterable[StructuredUnit | Unit] or StructuredUnit
A structured unit or iterable thereof.
Yields
------
unit
"""
from astropy.units import StructuredUnit
# Make Structured unit (pass-through if it is already).
units = StructuredUnit(iterable)
# Yield from structured unit.
for v in units.values():
if isinstance(v, StructuredUnit):
yield from _izip_units_flat(v)
else:
yield v
@function_helper(helps=rfn.merge_arrays)
def merge_arrays(
seqarrays,
fill_value=-1,
flatten=False,
usemask=False,
asrecarray=False,
):
"""Merge structured Quantities field by field.
Like :func:`numpy.lib.recfunctions.merge_arrays`. Note that ``usemask`` and
``asrecarray`` are not supported at this time and will raise a ValueError if
not `False`.
"""
from astropy.units import Quantity, StructuredUnit
if asrecarray:
# TODO? implement if Quantity ever supports rec.array
raise ValueError("asrecarray=True is not supported.")
if usemask:
# TODO: use MaskedQuantity for this case
raise ValueError("usemask=True is not supported.")
# Do we have a single Quantity as input?
if isinstance(seqarrays, Quantity):
seqarrays = (seqarrays,)
# Note: this also converts ndarray -> Quantity[dimensionless]
seqarrays = _as_quantities(*seqarrays)
arrays = tuple(q.value for q in seqarrays)
units = tuple(q.unit for q in seqarrays)
if flatten:
unit = StructuredUnit(tuple(_izip_units_flat(units)))
elif len(arrays) == 1:
unit = StructuredUnit(units[0])
else:
unit = StructuredUnit(units)
return (
(arrays,),
dict(
fill_value=fill_value,
flatten=flatten,
usemask=usemask,
asrecarray=asrecarray,
),
unit,
None,
)
|
0478d84c0341a4893ca2399f494e4bf226c9532cdf4d0b70c8c38cb2053d4253 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# STDLIB
import typing as T
# THIRD PARTY
import pytest
# LOCAL
from astropy import units as u
from astropy.units import Quantity
from astropy.units._typing import HAS_ANNOTATED
def test_ignore_generic_type_annotations():
"""Test annotations that are not unit related are ignored.
This test passes if the function works.
"""
# one unit, one not (should be ignored)
@u.quantity_input
def func(x: u.m, y: str):
return x, y
i_q, i_str = 2 * u.m, "cool string"
o_q, o_str = func(i_q, i_str) # if this doesn't fail, it worked.
assert i_q == o_q
assert i_str == o_str
@pytest.mark.skipif(not HAS_ANNOTATED, reason="need `Annotated`")
class TestQuantityUnitAnnotations:
"""Test Quantity[Unit] type annotation."""
def test_simple_annotation(self):
@u.quantity_input
def func(x: Quantity[u.m], y: str):
return x, y
i_q, i_str = 2 * u.m, "cool string"
o_q, o_str = func(i_q, i_str)
assert i_q == o_q
assert i_str == o_str
# checks the input on the 1st arg
with pytest.raises(u.UnitsError):
func(1 * u.s, i_str)
# but not the second
o_q, o_str = func(i_q, {"not": "a string"})
assert i_q == o_q
assert i_str != o_str
def test_multiple_annotation(self):
@u.quantity_input
def multi_func(a: Quantity[u.km]) -> Quantity[u.m]:
return a
i_q = 2 * u.km
o_q = multi_func(i_q)
assert o_q == i_q
assert o_q.unit == u.m
@pytest.mark.skipif(not HAS_ANNOTATED, reason="need `Annotated`")
def test_optional_and_annotated(self):
@u.quantity_input
def opt_func(x: T.Optional[Quantity[u.m]] = None) -> Quantity[u.km]:
if x is None:
return 1 * u.km
return x
i_q = 250 * u.m
o_q = opt_func(i_q)
assert o_q.unit == u.km
assert o_q == i_q
i_q = None
o_q = opt_func(i_q)
assert o_q == 1 * u.km
@pytest.mark.skipif(not HAS_ANNOTATED, reason="need `Annotated`")
def test_union_and_annotated(self):
# Union and Annotated
@u.quantity_input
def union_func(x: T.Union[Quantity[u.m], Quantity[u.s], None]):
if x is None:
return None
else:
return 2 * x
i_q = 1 * u.m
o_q = union_func(i_q)
assert o_q == 2 * i_q
i_q = 1 * u.s
o_q = union_func(i_q)
assert o_q == 2 * i_q
i_q = None
o_q = union_func(i_q)
assert o_q is None
def test_not_unit_or_ptype(self):
with pytest.raises(TypeError, match="unit annotation is not"):
Quantity["definitely not a unit"]
@pytest.mark.skipif(HAS_ANNOTATED, reason="requires py3.8 behavior")
def test_not_unit_or_ptype():
"""
Same as above test, but different behavior for python 3.8 b/c it passes
Quantity right through.
"""
with pytest.warns(Warning):
annot = Quantity[u.km]
assert annot == u.km
@pytest.mark.parametrize(
"solarx_unit,solary_unit", [(u.arcsec, u.arcsec), ("angle", "angle")]
)
def test_args3(solarx_unit, solary_unit):
@u.quantity_input
def myfunc_args(solarx: solarx_unit, solary: solary_unit):
return solarx, solary
solarx, solary = myfunc_args(1 * u.arcsec, 1 * u.arcsec)
assert isinstance(solarx, Quantity)
assert isinstance(solary, Quantity)
assert solarx.unit == u.arcsec
assert solary.unit == u.arcsec
@pytest.mark.parametrize(
"solarx_unit,solary_unit", [(u.arcsec, u.arcsec), ("angle", "angle")]
)
def test_args_noconvert3(solarx_unit, solary_unit):
@u.quantity_input()
def myfunc_args(solarx: solarx_unit, solary: solary_unit):
return solarx, solary
solarx, solary = myfunc_args(1 * u.deg, 1 * u.arcmin)
assert isinstance(solarx, Quantity)
assert isinstance(solary, Quantity)
assert solarx.unit == u.deg
assert solary.unit == u.arcmin
@pytest.mark.parametrize("solarx_unit", [u.arcsec, "angle"])
def test_args_nonquantity3(solarx_unit):
@u.quantity_input
def myfunc_args(solarx: solarx_unit, solary):
return solarx, solary
solarx, solary = myfunc_args(1 * u.arcsec, 100)
assert isinstance(solarx, Quantity)
assert isinstance(solary, int)
assert solarx.unit == u.arcsec
@pytest.mark.parametrize(
"solarx_unit,solary_unit", [(u.arcsec, u.eV), ("angle", "energy")]
)
def test_arg_equivalencies3(solarx_unit, solary_unit):
@u.quantity_input(equivalencies=u.mass_energy())
def myfunc_args(solarx: solarx_unit, solary: solary_unit):
return solarx, solary + (10 * u.J) # Add an energy to check equiv is working
solarx, solary = myfunc_args(1 * u.arcsec, 100 * u.gram)
assert isinstance(solarx, Quantity)
assert isinstance(solary, Quantity)
assert solarx.unit == u.arcsec
assert solary.unit == u.gram
@pytest.mark.parametrize(
"solarx_unit,solary_unit", [(u.arcsec, u.deg), ("angle", "angle")]
)
def test_wrong_unit3(solarx_unit, solary_unit):
@u.quantity_input
def myfunc_args(solarx: solarx_unit, solary: solary_unit):
return solarx, solary
with pytest.raises(
u.UnitsError,
match=(
"Argument 'solary' to function 'myfunc_args' must be in units "
f"convertible to '{str(solary_unit)}'."
),
):
solarx, solary = myfunc_args(1 * u.arcsec, 100 * u.km)
@pytest.mark.parametrize(
"solarx_unit,solary_unit", [(u.arcsec, u.deg), ("angle", "angle")]
)
def test_not_quantity3(solarx_unit, solary_unit):
@u.quantity_input
def myfunc_args(solarx: solarx_unit, solary: solary_unit):
return solarx, solary
with pytest.raises(
TypeError,
match=(
"Argument 'solary' to function 'myfunc_args' has no 'unit' "
"attribute. You should pass in an astropy Quantity instead."
),
):
solarx, solary = myfunc_args(1 * u.arcsec, 100)
def test_decorator_override():
@u.quantity_input(solarx=u.arcsec)
def myfunc_args(solarx: u.km, solary: u.arcsec):
return solarx, solary
solarx, solary = myfunc_args(1 * u.arcsec, 1 * u.arcsec)
assert isinstance(solarx, Quantity)
assert isinstance(solary, Quantity)
assert solarx.unit == u.arcsec
assert solary.unit == u.arcsec
@pytest.mark.parametrize(
"solarx_unit,solary_unit", [(u.arcsec, u.deg), ("angle", "angle")]
)
def test_kwargs3(solarx_unit, solary_unit):
@u.quantity_input
def myfunc_args(solarx: solarx_unit, solary, myk: solary_unit = 1 * u.arcsec):
return solarx, solary, myk
solarx, solary, myk = myfunc_args(1 * u.arcsec, 100, myk=100 * u.deg)
assert isinstance(solarx, Quantity)
assert isinstance(solary, int)
assert isinstance(myk, Quantity)
assert myk.unit == u.deg
@pytest.mark.parametrize(
"solarx_unit,solary_unit", [(u.arcsec, u.deg), ("angle", "angle")]
)
def test_unused_kwargs3(solarx_unit, solary_unit):
@u.quantity_input
def myfunc_args(
solarx: solarx_unit, solary, myk: solary_unit = 1 * u.arcsec, myk2=1000
):
return solarx, solary, myk, myk2
solarx, solary, myk, myk2 = myfunc_args(1 * u.arcsec, 100, myk=100 * u.deg, myk2=10)
assert isinstance(solarx, Quantity)
assert isinstance(solary, int)
assert isinstance(myk, Quantity)
assert isinstance(myk2, int)
assert myk.unit == u.deg
assert myk2 == 10
@pytest.mark.parametrize("solarx_unit,energy", [(u.arcsec, u.eV), ("angle", "energy")])
def test_kwarg_equivalencies3(solarx_unit, energy):
@u.quantity_input(equivalencies=u.mass_energy())
def myfunc_args(solarx: solarx_unit, energy: energy = 10 * u.eV):
return solarx, energy + (10 * u.J) # Add an energy to check equiv is working
solarx, energy = myfunc_args(1 * u.arcsec, 100 * u.gram)
assert isinstance(solarx, Quantity)
assert isinstance(energy, Quantity)
assert solarx.unit == u.arcsec
assert energy.unit == u.gram
@pytest.mark.parametrize(
"solarx_unit,solary_unit", [(u.arcsec, u.deg), ("angle", "angle")]
)
def test_kwarg_wrong_unit3(solarx_unit, solary_unit):
@u.quantity_input
def myfunc_args(solarx: solarx_unit, solary: solary_unit = 10 * u.deg):
return solarx, solary
with pytest.raises(
u.UnitsError,
match=(
"Argument 'solary' to function 'myfunc_args' must be in "
f"units convertible to '{str(solary_unit)}'."
),
):
solarx, solary = myfunc_args(1 * u.arcsec, solary=100 * u.km)
@pytest.mark.parametrize(
"solarx_unit,solary_unit", [(u.arcsec, u.deg), ("angle", "angle")]
)
def test_kwarg_not_quantity3(solarx_unit, solary_unit):
@u.quantity_input
def myfunc_args(solarx: solarx_unit, solary: solary_unit = 10 * u.deg):
return solarx, solary
with pytest.raises(
TypeError,
match=(
"Argument 'solary' to function 'myfunc_args' has no 'unit' attribute. "
"You should pass in an astropy Quantity instead."
),
):
solarx, solary = myfunc_args(1 * u.arcsec, solary=100)
@pytest.mark.parametrize(
"solarx_unit,solary_unit", [(u.arcsec, u.deg), ("angle", "angle")]
)
def test_kwarg_default3(solarx_unit, solary_unit):
@u.quantity_input
def myfunc_args(solarx: solarx_unit, solary: solary_unit = 10 * u.deg):
return solarx, solary
solarx, solary = myfunc_args(1 * u.arcsec)
def test_return_annotation():
@u.quantity_input
def myfunc_args(solarx: u.arcsec) -> u.deg:
return solarx
solarx = myfunc_args(1 * u.arcsec)
assert solarx.unit is u.deg
def test_return_annotation_none():
@u.quantity_input
def myfunc_args(solarx: u.arcsec) -> None:
pass
solarx = myfunc_args(1 * u.arcsec)
assert solarx is None
def test_return_annotation_notUnit():
@u.quantity_input
def myfunc_args(solarx: u.arcsec) -> int:
return 0
solarx = myfunc_args(1 * u.arcsec)
assert solarx == 0
def test_enum_annotation():
# Regression test for gh-9932
from enum import Enum, auto
class BasicEnum(Enum):
AnOption = auto()
@u.quantity_input
def myfunc_args(a: BasicEnum, b: u.arcsec) -> None:
pass
myfunc_args(BasicEnum.AnOption, 1 * u.arcsec)
|
69883d015a1d56a100221116af41a4652e2017a48d9e55a70c4ed08fad6134b4 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test the propagation of info on Quantity during operations."""
import copy
import numpy as np
from astropy import units as u
def assert_info_equal(a, b, ignore=set()):
a_info = a.info
b_info = b.info
for attr in (a_info.attr_names | b_info.attr_names) - ignore:
if attr == "unit":
assert a_info.unit.is_equivalent(b_info.unit)
else:
assert getattr(a_info, attr, None) == getattr(b_info, attr, None)
def assert_no_info(a):
assert "info" not in a.__dict__
class TestQuantityInfo:
@classmethod
def setup_class(self):
self.q = u.Quantity(np.arange(1.0, 5.0), "m/s")
self.q.info.name = "v"
self.q.info.description = "air speed of a african swallow"
def test_copy(self):
q_copy1 = self.q.copy()
assert_info_equal(q_copy1, self.q)
q_copy2 = copy.copy(self.q)
assert_info_equal(q_copy2, self.q)
q_copy3 = copy.deepcopy(self.q)
assert_info_equal(q_copy3, self.q)
def test_slice(self):
q_slice = self.q[1:3]
assert_info_equal(q_slice, self.q)
q_take = self.q.take([0, 1])
assert_info_equal(q_take, self.q)
def test_item(self):
# Scalars do not get info set (like for Column); TODO: is this OK?
q1 = self.q[1]
assert_no_info(q1)
q_item = self.q.item(1)
assert_no_info(q_item)
def test_iter(self):
# Scalars do not get info set.
for q in self.q:
assert_no_info(q)
for q in iter(self.q):
assert_no_info(q)
def test_change_to_equivalent_unit(self):
q1 = self.q.to(u.km / u.hr)
assert_info_equal(q1, self.q)
q2 = self.q.si
assert_info_equal(q2, self.q)
q3 = self.q.cgs
assert_info_equal(q3, self.q)
q4 = self.q.decompose()
assert_info_equal(q4, self.q)
def test_reshape(self):
q = self.q.reshape(-1, 1, 2)
assert_info_equal(q, self.q)
q2 = q.squeeze()
assert_info_equal(q2, self.q)
def test_insert(self):
q = self.q.copy()
q.insert(1, 1 * u.cm / u.hr)
assert_info_equal(q, self.q)
def test_unary_op(self):
q = -self.q
assert_no_info(q)
def test_binary_op(self):
q = self.q + self.q
assert_no_info(q)
def test_unit_change(self):
q = self.q * u.s
assert_no_info(q)
q2 = u.s / self.q
assert_no_info(q)
def test_inplace_unit_change(self):
# Not sure if it is logical to keep info here!
q = self.q.copy()
q *= u.s
assert_info_equal(q, self.q, ignore={"unit"})
class TestStructuredQuantity:
@classmethod
def setup_class(self):
value = np.array([(1.0, 2.0), (3.0, 4.0)], dtype=[("p", "f8"), ("v", "f8")])
self.q = u.Quantity(value, "m, m/s")
self.q.info.name = "pv"
self.q.info.description = "Location and speed"
def test_keying(self):
q_p = self.q["p"]
assert_no_info(q_p)
def test_slicing(self):
q = self.q[:1]
assert_info_equal(q, self.q)
def test_item(self):
# Scalars do not get info set.
q = self.q[1]
assert_no_info(q)
class TestQuantitySubclass:
"""Regression test for gh-14514: _new_view should __array_finalize__.
But info should be propagated only for slicing, etc.
"""
@classmethod
def setup_class(self):
class MyQuantity(u.Quantity):
def __array_finalize__(self, obj):
super().__array_finalize__(obj)
if hasattr(obj, "swallow"):
self.swallow = obj.swallow
self.my_q = MyQuantity([10.0, 20.0], u.m / u.s)
self.my_q.swallow = "African"
self.my_q_w_info = self.my_q.copy()
self.my_q_w_info.info.name = "swallow"
def test_setup(self):
assert_no_info(self.my_q)
assert self.my_q_w_info.swallow == self.my_q.swallow
assert self.my_q_w_info.info.name == "swallow"
def test_slice(self):
slc1 = self.my_q[:1]
assert slc1.swallow == self.my_q.swallow
assert_no_info(slc1)
slc2 = self.my_q_w_info[1:]
assert slc2.swallow == self.my_q.swallow
assert_info_equal(slc2, self.my_q_w_info)
def test_op(self):
square1 = self.my_q**2
assert square1.swallow == self.my_q.swallow
assert_no_info(square1)
square2 = self.my_q_w_info**2
assert square2.swallow == self.my_q.swallow
assert_no_info(square2)
|
8a6ec59d800ed98359b143f29165c9d9888f708b0d83e2b1ce39be09b1f7809b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Regression tests for the units.format package
"""
import warnings
from contextlib import nullcontext
from fractions import Fraction
import numpy as np
import pytest
from numpy.testing import assert_allclose
from astropy import units as u
from astropy.constants import si
from astropy.units import PrefixUnit, Unit, UnitBase, UnitsWarning, dex
from astropy.units import format as u_format
from astropy.units.utils import is_effectively_unity
@pytest.mark.parametrize(
"strings, unit",
[
(["m s", "m*s", "m.s"], u.m * u.s),
(["m/s", "m*s**-1", "m /s", "m / s", "m/ s"], u.m / u.s),
(["m**2", "m2", "m**(2)", "m**+2", "m+2", "m^(+2)"], u.m**2),
(["m**-3", "m-3", "m^(-3)", "/m3"], u.m**-3),
(["m**(1.5)", "m(3/2)", "m**(3/2)", "m^(3/2)"], u.m**1.5),
(["2.54 cm"], u.Unit(u.cm * 2.54)),
(["10+8m"], u.Unit(u.m * 1e8)),
# This is the VOUnits documentation, but doesn't seem to follow the
# unity grammar (["3.45 10**(-4)Jy"], 3.45 * 1e-4 * u.Jy)
(["sqrt(m)"], u.m**0.5),
(["dB(mW)", "dB (mW)"], u.DecibelUnit(u.mW)),
(["mag"], u.mag),
(["mag(ct/s)"], u.MagUnit(u.ct / u.s)),
(["dex"], u.dex),
(["dex(cm s**-2)", "dex(cm/s2)"], u.DexUnit(u.cm / u.s**2)),
],
)
def test_unit_grammar(strings, unit):
for s in strings:
print(s)
unit2 = u_format.Generic.parse(s)
assert unit2 == unit
@pytest.mark.parametrize(
"string", ["sin( /pixel /s)", "mag(mag)", "dB(dB(mW))", "dex()"]
)
def test_unit_grammar_fail(string):
with pytest.raises(ValueError):
print(string)
u_format.Generic.parse(string)
@pytest.mark.parametrize(
"strings, unit",
[
(["0.1nm"], u.AA),
(["mW/m2"], u.Unit(u.erg / u.cm**2 / u.s)),
(["mW/(m2)"], u.Unit(u.erg / u.cm**2 / u.s)),
(["km/s", "km.s-1"], u.km / u.s),
(["km/s/Mpc"], u.km / u.s / u.Mpc),
(["km/(s.Mpc)"], u.km / u.s / u.Mpc),
(["10+3J/m/s/kpc2"], u.Unit(1e3 * u.W / (u.m * u.kpc**2))),
(["10pix/nm"], u.Unit(10 * u.pix / u.nm)),
(["1.5x10+11m"], u.Unit(1.5e11 * u.m)),
(["1.5×10+11/m"], u.Unit(1.5e11 / u.m)),
(["/s"], u.s**-1),
(["m2"], u.m**2),
(["10+21m"], u.Unit(u.m * 1e21)),
(["2.54cm"], u.Unit(u.cm * 2.54)),
(["20%"], 0.20 * u.dimensionless_unscaled),
(["10+9"], 1.0e9 * u.dimensionless_unscaled),
(["2x10-9"], 2.0e-9 * u.dimensionless_unscaled),
(["---"], u.dimensionless_unscaled),
(["ma"], u.ma),
(["mAU"], u.mAU),
(["uarcmin"], u.uarcmin),
(["uarcsec"], u.uarcsec),
(["kbarn"], u.kbarn),
(["Gbit"], u.Gbit),
(["Gibit"], 2**30 * u.bit),
(["kbyte"], u.kbyte),
(["mRy"], 0.001 * u.Ry),
(["mmag"], u.mmag),
(["Mpc"], u.Mpc),
(["Gyr"], u.Gyr),
(["°"], u.degree),
(["°/s"], u.degree / u.s),
(["Å"], u.AA),
(["Å/s"], u.AA / u.s),
(["\\h"], si.h),
(["[cm/s2]"], dex(u.cm / u.s**2)),
(["[K]"], dex(u.K)),
(["[-]"], dex(u.dimensionless_unscaled)),
],
)
def test_cds_grammar(strings, unit):
for s in strings:
print(s)
unit2 = u_format.CDS.parse(s)
assert unit2 == unit
@pytest.mark.parametrize(
"string",
[
"0.1 nm",
"solMass(3/2)",
"km / s",
"km s-1",
"km/s.Mpc-1",
"/s.Mpc",
"pix0.1nm",
"pix/(0.1nm)",
"km*s",
"km**2",
"5x8+3m",
"0.1---",
"---m",
"m---",
"--",
"0.1-",
"-m",
"m-",
"mag(s-1)",
"dB(mW)",
"dex(cm s-2)",
"[--]",
],
)
def test_cds_grammar_fail(string):
with pytest.raises(ValueError):
print(string)
u_format.CDS.parse(string)
def test_cds_dimensionless():
assert u.Unit("---", format="cds") == u.dimensionless_unscaled
assert u.dimensionless_unscaled.to_string(format="cds") == "---"
def test_cds_log10_dimensionless():
assert u.Unit("[-]", format="cds") == u.dex(u.dimensionless_unscaled)
assert u.dex(u.dimensionless_unscaled).to_string(format="cds") == "[-]"
# These examples are taken from the EXAMPLES section of
# https://heasarc.gsfc.nasa.gov/docs/heasarc/ofwg/docs/general/ogip_93_001/
@pytest.mark.parametrize(
"strings, unit",
[
(
["count /s", "count/s", "count s**(-1)", "count / s", "count /s "],
u.count / u.s,
),
(
["/pixel /s", "/(pixel * s)"],
(u.pixel * u.s) ** -1,
),
(
[
"count /m**2 /s /eV",
"count m**(-2) * s**(-1) * eV**(-1)",
"count /(m**2 * s * eV)",
],
u.count * u.m**-2 * u.s**-1 * u.eV**-1,
),
(
["erg /pixel /s /GHz", "erg /s /GHz /pixel", "erg /pixel /(s * GHz)"],
u.erg / (u.s * u.GHz * u.pixel),
),
(
["keV**2 /yr /angstrom", "10**(10) keV**2 /yr /m"],
# Though this is given as an example, it seems to violate the rules
# of not raising scales to powers, so I'm just excluding it
# "(10**2 MeV)**2 /yr /m"
u.keV**2 / (u.yr * u.angstrom),
),
(
[
"10**(46) erg /s",
"10**46 erg /s",
"10**(39) J /s",
"10**(39) W",
"10**(15) YW",
"YJ /fs",
],
10**46 * u.erg / u.s,
),
(
[
"10**(-7) J /cm**2 /MeV",
"10**(-9) J m**(-2) eV**(-1)",
"nJ m**(-2) eV**(-1)",
"nJ /m**2 /eV",
],
10**-7 * u.J * u.cm**-2 * u.MeV**-1,
),
(
[
"sqrt(erg /pixel /s /GHz)",
"(erg /pixel /s /GHz)**(0.5)",
"(erg /pixel /s /GHz)**(1/2)",
"erg**(0.5) pixel**(-0.5) s**(-0.5) GHz**(-0.5)",
],
(u.erg * u.pixel**-1 * u.s**-1 * u.GHz**-1) ** 0.5,
),
(
[
"(count /s) (/pixel /s)",
"(count /s) * (/pixel /s)",
"count /pixel /s**2",
],
(u.count / u.s) * (1.0 / (u.pixel * u.s)),
),
],
)
def test_ogip_grammar(strings, unit):
for s in strings:
print(s)
unit2 = u_format.OGIP.parse(s)
assert unit2 == unit
@pytest.mark.parametrize(
"string",
[
"log(photon /m**2 /s /Hz)",
"sin( /pixel /s)",
"log(photon /cm**2 /s /Hz) /(sin( /pixel /s))",
"log(photon /cm**2 /s /Hz) (sin( /pixel /s))**(-1)",
"dB(mW)",
"dex(cm/s**2)",
],
)
def test_ogip_grammar_fail(string):
with pytest.raises(ValueError):
print(string)
u_format.OGIP.parse(string)
class RoundtripBase:
deprecated_units = set()
def check_roundtrip(self, unit, output_format=None):
if output_format is None:
output_format = self.format_
with warnings.catch_warnings():
warnings.simplefilter("ignore") # Same warning shows up multiple times
s = unit.to_string(output_format)
if s in self.deprecated_units:
with pytest.warns(UnitsWarning, match="deprecated") as w:
a = Unit(s, format=self.format_)
assert len(w) == 1
else:
a = Unit(s, format=self.format_) # No warning
assert_allclose(a.decompose().scale, unit.decompose().scale, rtol=1e-9)
def check_roundtrip_decompose(self, unit):
ud = unit.decompose()
s = ud.to_string(self.format_)
assert " " not in s
a = Unit(s, format=self.format_)
assert_allclose(a.decompose().scale, ud.scale, rtol=1e-5)
class TestRoundtripGeneric(RoundtripBase):
format_ = "generic"
@pytest.mark.parametrize(
"unit",
[
unit
for unit in u.__dict__.values()
if (isinstance(unit, UnitBase) and not isinstance(unit, PrefixUnit))
],
)
def test_roundtrip(self, unit):
self.check_roundtrip(unit)
self.check_roundtrip(unit, output_format="unicode")
self.check_roundtrip_decompose(unit)
class TestRoundtripVOUnit(RoundtripBase):
format_ = "vounit"
deprecated_units = u_format.VOUnit._deprecated_units
@pytest.mark.parametrize(
"unit",
[
unit
for unit in u_format.VOUnit._units.values()
if (isinstance(unit, UnitBase) and not isinstance(unit, PrefixUnit))
],
)
def test_roundtrip(self, unit):
self.check_roundtrip(unit)
if unit not in (u.mag, u.dB):
self.check_roundtrip_decompose(unit)
class TestRoundtripFITS(RoundtripBase):
format_ = "fits"
deprecated_units = u_format.Fits._deprecated_units
@pytest.mark.parametrize(
"unit",
[
unit
for unit in u_format.Fits._units.values()
if (isinstance(unit, UnitBase) and not isinstance(unit, PrefixUnit))
],
)
def test_roundtrip(self, unit):
self.check_roundtrip(unit)
class TestRoundtripCDS(RoundtripBase):
format_ = "cds"
@pytest.mark.parametrize(
"unit",
[
unit
for unit in u_format.CDS._units.values()
if (isinstance(unit, UnitBase) and not isinstance(unit, PrefixUnit))
],
)
def test_roundtrip(self, unit):
self.check_roundtrip(unit)
if unit == u.mag:
# Skip mag: decomposes into dex, which is unknown to CDS.
return
self.check_roundtrip_decompose(unit)
@pytest.mark.parametrize(
"unit", [u.dex(unit) for unit in (u.cm / u.s**2, u.K, u.Lsun)]
)
def test_roundtrip_dex(self, unit):
string = unit.to_string(format="cds")
recovered = u.Unit(string, format="cds")
assert recovered == unit
class TestRoundtripOGIP(RoundtripBase):
format_ = "ogip"
deprecated_units = u_format.OGIP._deprecated_units | {"d"}
@pytest.mark.parametrize(
"unit",
[
unit
for unit in u_format.OGIP._units.values()
if (isinstance(unit, UnitBase) and not isinstance(unit, PrefixUnit))
],
)
def test_roundtrip(self, unit):
if str(unit) in ("d", "0.001 Crab"):
# Special-case day, which gets auto-converted to hours, and mCrab,
# which the default check does not recognize as a deprecated unit.
with pytest.warns(UnitsWarning):
s = unit.to_string(self.format_)
a = Unit(s, format=self.format_)
assert_allclose(a.decompose().scale, unit.decompose().scale, rtol=1e-9)
else:
self.check_roundtrip(unit)
if str(unit) in ("mag", "byte", "Crab"):
# Skip mag and byte, which decompose into dex and bit, resp.,
# both of which are unknown to OGIP, as well as Crab, which does
# not decompose, and thus gives a deprecated unit warning.
return
power_of_ten = np.log10(unit.decompose().scale)
if abs(power_of_ten - round(power_of_ten)) > 1e-3:
ctx = pytest.warns(UnitsWarning, match="power of 10")
elif str(unit) == "0.001 Crab":
ctx = pytest.warns(UnitsWarning, match="deprecated")
else:
ctx = nullcontext()
with ctx:
self.check_roundtrip_decompose(unit)
def test_fits_units_available():
u_format.Fits._units
def test_vo_units_available():
u_format.VOUnit._units
def test_cds_units_available():
u_format.CDS._units
def test_cds_non_ascii_unit():
"""Regression test for #5350. This failed with a decoding error as
μas could not be represented in ascii."""
from astropy.units import cds
with cds.enable():
u.radian.find_equivalent_units(include_prefix_units=True)
def test_latex():
fluxunit = u.erg / (u.cm**2 * u.s)
assert fluxunit.to_string("latex") == r"$\mathrm{\frac{erg}{s\,cm^{2}}}$"
def test_new_style_latex():
fluxunit = u.erg / (u.cm**2 * u.s)
assert f"{fluxunit:latex}" == r"$\mathrm{\frac{erg}{s\,cm^{2}}}$"
def test_latex_scale():
fluxunit = u.Unit(1.0e-24 * u.erg / (u.cm**2 * u.s * u.Hz))
latex = r"$\mathrm{1 \times 10^{-24}\,\frac{erg}{Hz\,s\,cm^{2}}}$"
assert fluxunit.to_string("latex") == latex
def test_latex_inline_scale():
fluxunit = u.Unit(1.0e-24 * u.erg / (u.cm**2 * u.s * u.Hz))
latex_inline = r"$\mathrm{1 \times 10^{-24}\,erg\,Hz^{-1}\,s^{-1}\,cm^{-2}}$"
assert fluxunit.to_string("latex_inline") == latex_inline
@pytest.mark.parametrize(
"format_spec, string, decomposed",
[
("generic", "erg / (Angstrom s cm2)", "1e+07 kg / (m s3)"),
("s", "erg / (Angstrom s cm2)", "1e+07 kg / (m s3)"),
("console", "erg Angstrom^-1 s^-1 cm^-2", "10000000 kg m^-1 s^-3"),
(
"latex",
r"$\mathrm{\frac{erg}{\mathring{A}\,s\,cm^{2}}}$",
r"$\mathrm{10000000\,\frac{kg}{m\,s^{3}}}$",
),
(
"latex_inline",
r"$\mathrm{erg\,\mathring{A}^{-1}\,s^{-1}\,cm^{-2}}$",
r"$\mathrm{10000000\,kg\,m^{-1}\,s^{-3}}$",
),
("unicode", "erg Å⁻¹ s⁻¹ cm⁻²", "10000000 kg m⁻¹ s⁻³"),
(">25s", " erg / (Angstrom s cm2)", " 1e+07 kg / (m s3)"),
("cds", "erg.Angstrom-1.s-1.cm-2", "10000000kg.m-1.s-3"),
("ogip", "10 erg / (nm s cm**2)", "1e+07 kg / (m s**3)"),
("fits", "erg Angstrom-1 s-1 cm-2", "10**7 kg m-1 s-3"),
("vounit", "erg.Angstrom**-1.s**-1.cm**-2", "10000000kg.m**-1.s**-3"),
# TODO: make fits and vounit less awful!
],
)
def test_format_styles(format_spec, string, decomposed):
fluxunit = u.erg / (u.cm**2 * u.s * u.Angstrom)
if format_spec == "vounit":
# erg is deprecated in vounit.
with pytest.warns(UnitsWarning, match="deprecated"):
formatted = format(fluxunit, format_spec)
else:
formatted = format(fluxunit, format_spec)
assert formatted == string
# Decomposed mostly to test that scale factors are dealt with properly
# in the various formats.
assert format(fluxunit.decompose(), format_spec) == decomposed
@pytest.mark.parametrize(
"format_spec, fraction, string, decomposed",
[
("generic", False, "erg s-1 cm-2", "0.001 kg s-3"),
(
"console",
"multiline",
" erg \n------\ns cm^2",
" kg \n0.001 ---\n s^3",
),
("console", "inline", "erg / (s cm^2)", "0.001 kg / s^3"),
("unicode", "multiline", " erg \n─────\ns cm²", " kg\n0.001 ──\n s³"),
("unicode", "inline", "erg / (s cm²)", "0.001 kg / s³"),
(
"latex",
False,
r"$\mathrm{erg\,s^{-1}\,cm^{-2}}$",
r"$\mathrm{0.001\,kg\,s^{-3}}$",
),
(
"latex",
"inline",
r"$\mathrm{erg / (s\,cm^{2})}$",
r"$\mathrm{0.001\,kg / s^{3}}$",
),
# TODO: make generic with fraction=False less awful!
],
)
def test_format_styles_non_default_fraction(format_spec, fraction, string, decomposed):
fluxunit = u.erg / (u.cm**2 * u.s)
assert fluxunit.to_string(format_spec, fraction=fraction) == string
assert fluxunit.decompose().to_string(format_spec, fraction=fraction) == decomposed
@pytest.mark.parametrize("format_spec", ["generic", "cds", "fits", "ogip", "vounit"])
def test_no_multiline_fraction(format_spec):
fluxunit = u.W / u.m**2
with pytest.raises(ValueError, match="only supports.*not fraction='multiline'"):
fluxunit.to_string(format_spec, fraction="multiline")
@pytest.mark.parametrize(
"format_spec",
["generic", "cds", "fits", "ogip", "vounit", "latex", "console", "unicode"],
)
def test_unknown_fraction_style(format_spec):
fluxunit = u.W / u.m**2
with pytest.raises(ValueError, match="only supports.*parrot"):
fluxunit.to_string(format_spec, fraction="parrot")
def test_flatten_to_known():
myunit = u.def_unit("FOOBAR_One", u.erg / u.Hz)
assert myunit.to_string("fits") == "erg Hz-1"
myunit2 = myunit * u.bit**3
assert myunit2.to_string("fits") == "bit3 erg Hz-1"
def test_flatten_impossible():
myunit = u.def_unit("FOOBAR_Two")
with u.add_enabled_units(myunit), pytest.raises(ValueError):
myunit.to_string("fits")
def test_console_out():
"""
Issue #436.
"""
u.Jy.decompose().to_string("console")
@pytest.mark.parametrize(
"format,string",
[
("generic", "10"),
("console", "10"),
("unicode", "10"),
("cds", "10"),
("latex", r"$\mathrm{10}$"),
],
)
def test_scale_only(format, string):
unit = u.Unit(10)
assert unit.to_string(format) == string
def test_flexible_float():
assert u.min._represents.to_string("latex") == r"$\mathrm{60\,s}$"
def test_fits_to_string_function_error():
"""Test function raises TypeError on bad input.
This instead of returning None, see gh-11825.
"""
with pytest.raises(TypeError, match="unit argument must be"):
u_format.Fits.to_string(None)
def test_fraction_repr():
area = u.cm**2.0
assert "." not in area.to_string("latex")
fractional = u.cm**2.5
assert "5/2" in fractional.to_string("latex")
assert fractional.to_string("unicode") == "cm⁵⸍²"
def test_scale_effectively_unity():
"""Scale just off unity at machine precision level is OK.
Ensures #748 does not recur
"""
a = (3.0 * u.N).cgs
assert is_effectively_unity(a.unit.scale)
assert len(a.__repr__().split()) == 3
def test_percent():
"""Test that the % unit is properly recognized. Since % is a special
symbol, this goes slightly beyond the round-tripping tested above."""
assert u.Unit("%") == u.percent == u.Unit(0.01)
assert u.Unit("%", format="cds") == u.Unit(0.01)
assert u.Unit(0.01).to_string("cds") == "%"
with pytest.raises(ValueError):
u.Unit("%", format="fits")
with pytest.raises(ValueError):
u.Unit("%", format="vounit")
def test_scaled_dimensionless():
"""Test that scaled dimensionless units are properly recognized in generic
and CDS, but not in fits and vounit."""
assert u.Unit("0.1") == u.Unit(0.1) == 0.1 * u.dimensionless_unscaled
assert u.Unit("1.e-4") == u.Unit(1.0e-4)
assert u.Unit("10-4", format="cds") == u.Unit(1.0e-4)
assert u.Unit("10+8").to_string("cds") == "10+8"
with pytest.raises(ValueError):
u.Unit(0.15).to_string("fits")
assert u.Unit(0.1).to_string("fits") == "10**-1"
with pytest.raises(ValueError):
u.Unit(0.1).to_string("vounit")
def test_deprecated_did_you_mean_units():
with pytest.raises(ValueError) as exc_info:
u.Unit("ANGSTROM", format="fits")
assert "Did you mean Angstrom or angstrom?" in str(exc_info.value)
with pytest.raises(ValueError) as exc_info:
u.Unit("crab", format="ogip")
assert "Crab (deprecated)" in str(exc_info.value)
assert "mCrab (deprecated)" in str(exc_info.value)
with pytest.warns(
UnitsWarning,
match=r".* Did you mean 0\.1nm, Angstrom "
r"\(deprecated\) or angstrom \(deprecated\)\?",
) as w:
u.Unit("ANGSTROM", format="vounit")
assert len(w) == 1
assert str(w[0].message).count("0.1nm") == 1
with pytest.warns(UnitsWarning, match=r".* 0\.1nm\.") as w:
u.Unit("angstrom", format="vounit")
assert len(w) == 1
@pytest.mark.parametrize("string", ["mag(ct/s)", "dB(mW)", "dex(cm s**-2)"])
def test_fits_function(string):
# Function units cannot be written, so ensure they're not parsed either.
with pytest.raises(ValueError):
print(string)
u_format.Fits().parse(string)
@pytest.mark.parametrize("string", ["mag(ct/s)", "dB(mW)", "dex(cm s**-2)"])
def test_vounit_function(string):
# Function units cannot be written, so ensure they're not parsed either.
with pytest.raises(ValueError), warnings.catch_warnings():
# ct, dex also raise warnings - irrelevant here.
warnings.simplefilter("ignore")
u_format.VOUnit().parse(string)
def test_vounit_binary_prefix():
u.Unit("KiB", format="vounit") == u.Unit("1024 B")
u.Unit("Kibyte", format="vounit") == u.Unit("1024 B")
u.Unit("Kibit", format="vounit") == u.Unit("1024 B")
with pytest.warns(UnitsWarning) as w:
u.Unit("kibibyte", format="vounit")
assert len(w) == 1
def test_vounit_unknown():
assert u.Unit("unknown", format="vounit") is None
assert u.Unit("UNKNOWN", format="vounit") is None
assert u.Unit("", format="vounit") is u.dimensionless_unscaled
def test_vounit_details():
with pytest.warns(UnitsWarning, match="deprecated") as w:
assert u.Unit("Pa", format="vounit") is u.Pascal
assert len(w) == 1
# The da- prefix is not allowed, and the d- prefix is discouraged
assert u.dam.to_string("vounit") == "10m"
assert u.Unit("dam dag").to_string("vounit") == "100g.m"
# Parse round-trip
with pytest.warns(UnitsWarning, match="deprecated"):
flam = u.erg / u.cm / u.cm / u.s / u.AA
x = u.format.VOUnit.to_string(flam)
assert x == "erg.Angstrom**-1.s**-1.cm**-2"
new_flam = u.format.VOUnit.parse(x)
assert new_flam == flam
@pytest.mark.parametrize(
"unit, vounit, number, scale, voscale",
[
("nm", "nm", 0.1, "10^-1", "0.1"),
("fm", "fm", 100.0, "10+2", "100"),
("m^2", "m**2", 100.0, "100.0", "100"),
("cm", "cm", 2.54, "2.54", "2.54"),
("kg", "kg", 1.898124597e27, "1.898124597E27", "1.8981246e+27"),
("m/s", "m.s**-1", 299792458.0, "299792458", "2.9979246e+08"),
("cm2", "cm**2", 1.0e-20, "10^(-20)", "1e-20"),
],
)
def test_vounit_scale_factor(unit, vounit, number, scale, voscale):
x = u.Unit(f"{scale} {unit}")
assert x == number * u.Unit(unit)
assert x.to_string(format="vounit") == voscale + vounit
def test_vounit_custom():
x = u.Unit("'foo' m", format="vounit")
x_vounit = x.to_string("vounit")
assert x_vounit == "'foo'.m"
x_string = x.to_string()
assert x_string == "foo m"
x = u.Unit("m'foo' m", format="vounit")
assert x.bases[1]._represents.scale == 0.001
x_vounit = x.to_string("vounit")
assert x_vounit == "m.m'foo'"
x_string = x.to_string()
assert x_string == "m mfoo"
def test_vounit_implicit_custom():
# Yikes, this becomes "femto-urlong"... But at least there's a warning.
with pytest.warns(UnitsWarning) as w:
x = u.Unit("furlong/week", format="vounit")
assert x.bases[0]._represents.scale == 1e-15
assert x.bases[0]._represents.bases[0].name == "urlong"
assert len(w) == 2
assert "furlong" in str(w[0].message)
assert "week" in str(w[1].message)
@pytest.mark.parametrize(
"scale, number, string",
[
("10+2", 100, "10**2"),
("10(+2)", 100, "10**2"),
("10**+2", 100, "10**2"),
("10**(+2)", 100, "10**2"),
("10^+2", 100, "10**2"),
("10^(+2)", 100, "10**2"),
("10**2", 100, "10**2"),
("10**(2)", 100, "10**2"),
("10^2", 100, "10**2"),
("10^(2)", 100, "10**2"),
("10-20", 10 ** (-20), "10**-20"),
("10(-20)", 10 ** (-20), "10**-20"),
("10**-20", 10 ** (-20), "10**-20"),
("10**(-20)", 10 ** (-20), "10**-20"),
("10^-20", 10 ** (-20), "10**-20"),
("10^(-20)", 10 ** (-20), "10**-20"),
],
)
def test_fits_scale_factor(scale, number, string):
x = u.Unit(scale + " erg/(s cm**2 Angstrom)", format="fits")
assert x == number * (u.erg / u.s / u.cm**2 / u.Angstrom)
assert x.to_string(format="fits") == string + " erg Angstrom-1 s-1 cm-2"
x = u.Unit(scale + "*erg/(s cm**2 Angstrom)", format="fits")
assert x == number * (u.erg / u.s / u.cm**2 / u.Angstrom)
assert x.to_string(format="fits") == string + " erg Angstrom-1 s-1 cm-2"
def test_fits_scale_factor_errors():
with pytest.raises(ValueError):
x = u.Unit("1000 erg/(s cm**2 Angstrom)", format="fits")
with pytest.raises(ValueError):
x = u.Unit("12 erg/(s cm**2 Angstrom)", format="fits")
x = u.Unit(1.2 * u.erg)
with pytest.raises(ValueError):
x.to_string(format="fits")
x = u.Unit(100.0 * u.erg)
assert x.to_string(format="fits") == "10**2 erg"
def test_double_superscript():
"""Regression test for #5870, #8699, #9218; avoid double superscripts."""
assert (u.deg).to_string("latex") == r"$\mathrm{{}^{\circ}}$"
assert (u.deg**2).to_string("latex") == r"$\mathrm{deg^{2}}$"
assert (u.arcmin).to_string("latex") == r"$\mathrm{{}^{\prime}}$"
assert (u.arcmin**2).to_string("latex") == r"$\mathrm{arcmin^{2}}$"
assert (u.arcsec).to_string("latex") == r"$\mathrm{{}^{\prime\prime}}$"
assert (u.arcsec**2).to_string("latex") == r"$\mathrm{arcsec^{2}}$"
assert (u.hourangle).to_string("latex") == r"$\mathrm{{}^{h}}$"
assert (u.hourangle**2).to_string("latex") == r"$\mathrm{hourangle^{2}}$"
assert (u.electron).to_string("latex") == r"$\mathrm{e^{-}}$"
assert (u.electron**2).to_string("latex") == r"$\mathrm{electron^{2}}$"
def test_no_prefix_superscript():
"""Regression test for gh-911 and #14419."""
assert u.mdeg.to_string("latex") == r"$\mathrm{mdeg}$"
assert u.narcmin.to_string("latex") == r"$\mathrm{narcmin}$"
assert u.parcsec.to_string("latex") == r"$\mathrm{parcsec}$"
assert u.mdeg.to_string("unicode") == "mdeg"
assert u.narcmin.to_string("unicode") == "narcmin"
assert u.parcsec.to_string("unicode") == "parcsec"
@pytest.mark.parametrize(
"power,expected",
(
(1.0, "m"),
(2.0, "m2"),
(-10, "1 / m10"),
(1.5, "m(3/2)"),
(2 / 3, "m(2/3)"),
(7 / 11, "m(7/11)"),
(-1 / 64, "1 / m(1/64)"),
(1 / 100, "m(1/100)"),
(2 / 101, "m(0.019801980198019802)"),
(Fraction(2, 101), "m(2/101)"),
),
)
def test_powers(power, expected):
"""Regression test for #9279 - powers should not be oversimplified."""
unit = u.m**power
s = unit.to_string()
assert s == expected
assert unit == s
@pytest.mark.parametrize(
"string,unit",
[
("\N{MICRO SIGN}g", u.microgram),
("\N{GREEK SMALL LETTER MU}g", u.microgram),
("g\N{MINUS SIGN}1", u.g ** (-1)),
("m\N{SUPERSCRIPT MINUS}\N{SUPERSCRIPT ONE}", 1 / u.m),
("m s\N{SUPERSCRIPT MINUS}\N{SUPERSCRIPT ONE}", u.m / u.s),
("m\N{SUPERSCRIPT TWO}", u.m**2),
("m\N{SUPERSCRIPT PLUS SIGN}\N{SUPERSCRIPT TWO}", u.m**2),
("m\N{SUPERSCRIPT THREE}", u.m**3),
("m\N{SUPERSCRIPT ONE}\N{SUPERSCRIPT ZERO}", u.m**10),
("\N{GREEK CAPITAL LETTER OMEGA}", u.ohm),
("\N{OHM SIGN}", u.ohm), # deprecated but for compatibility
("\N{MICRO SIGN}\N{GREEK CAPITAL LETTER OMEGA}", u.microOhm),
("\N{ANGSTROM SIGN}", u.Angstrom),
("\N{ANGSTROM SIGN} \N{OHM SIGN}", u.Angstrom * u.Ohm),
("\N{LATIN CAPITAL LETTER A WITH RING ABOVE}", u.Angstrom),
("\N{LATIN CAPITAL LETTER A}\N{COMBINING RING ABOVE}", u.Angstrom),
("m\N{ANGSTROM SIGN}", u.milliAngstrom),
("°C", u.deg_C),
("°", u.deg),
("M⊙", u.Msun), # \N{CIRCLED DOT OPERATOR}
("L☉", u.Lsun), # \N{SUN}
("M⊕", u.Mearth), # normal earth symbol = \N{CIRCLED PLUS}
("M♁", u.Mearth), # be generous with \N{EARTH}
("R♃", u.Rjup), # \N{JUPITER}
("′", u.arcmin), # \N{PRIME}
("R∞", u.Ry),
("Mₚ", u.M_p),
],
)
def test_unicode(string, unit):
assert u_format.Generic.parse(string) == unit
assert u.Unit(string) == unit
@pytest.mark.parametrize(
"string",
[
"g\N{MICRO SIGN}",
"g\N{MINUS SIGN}",
"m\N{SUPERSCRIPT MINUS}1",
"m+\N{SUPERSCRIPT ONE}",
"m\N{MINUS SIGN}\N{SUPERSCRIPT ONE}",
"k\N{ANGSTROM SIGN}",
],
)
def test_unicode_failures(string):
with pytest.raises(ValueError):
u.Unit(string)
@pytest.mark.parametrize("format_", ("unicode", "latex", "latex_inline"))
def test_parse_error_message_for_output_only_format(format_):
with pytest.raises(NotImplementedError, match="not parse"):
u.Unit("m", format=format_)
def test_unknown_parser():
with pytest.raises(ValueError, match=r"Unknown.*unicode'\] for output only"):
u.Unit("m", format="foo")
def test_celsius_fits():
assert u.Unit("Celsius", format="fits") == u.deg_C
assert u.Unit("deg C", format="fits") == u.deg_C
# check that compounds do what we expect: what do we expect?
assert u.Unit("deg C kg-1", format="fits") == u.C * u.deg / u.kg
assert u.Unit("Celsius kg-1", format="fits") == u.deg_C / u.kg
assert u.deg_C.to_string("fits") == "Celsius"
@pytest.mark.parametrize(
"format_spec, string",
[
("generic", "dB(1 / m)"),
("latex", r"$\mathrm{dB}$$\mathrm{\left( \mathrm{\frac{1}{m}} \right)}$"),
("latex_inline", r"$\mathrm{dB}$$\mathrm{\left( \mathrm{m^{-1}} \right)}$"),
("console", "dB(m^-1)"),
("unicode", "dB(m⁻¹)"),
],
)
def test_function_format_styles(format_spec, string):
dbunit = u.decibel(u.m**-1)
assert dbunit.to_string(format_spec) == string
assert f"{dbunit:{format_spec}}" == string
@pytest.mark.parametrize(
"format_spec, fraction, string",
[
("console", "multiline", " 1\ndB(-)\n m"),
("console", "inline", "dB(1 / m)"),
("unicode", "multiline", " 1\ndB(─)\n m"),
("unicode", "inline", "dB(1 / m)"),
("latex", False, r"$\mathrm{dB}$$\mathrm{\left( \mathrm{m^{-1}} \right)}$"),
("latex", "inline", r"$\mathrm{dB}$$\mathrm{\left( \mathrm{1 / m} \right)}$"),
],
)
def test_function_format_styles_non_default_fraction(format_spec, fraction, string):
dbunit = u.decibel(u.m**-1)
assert dbunit.to_string(format_spec, fraction=fraction) == string
|
c44395f535130ffe3641ec49408b7277fb39f67ef9d39ad4258fc7c3fec4a08f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Regression tests for the units package."""
import pickle
from fractions import Fraction
import numpy as np
import pytest
from numpy.testing import assert_allclose
from astropy import constants as c
from astropy import units as u
from astropy.units import utils
def test_initialisation():
assert u.Unit(u.m) is u.m
ten_meter = u.Unit(10.0 * u.m)
assert ten_meter == u.CompositeUnit(10.0, [u.m], [1])
assert u.Unit(ten_meter) is ten_meter
assert u.Unit(10.0 * ten_meter) == u.CompositeUnit(100.0, [u.m], [1])
foo = u.Unit("foo", (10.0 * ten_meter) ** 2, namespace=locals())
assert foo == u.CompositeUnit(10000.0, [u.m], [2])
assert u.Unit("m") == u.m
assert u.Unit("") == u.dimensionless_unscaled
assert u.one == u.dimensionless_unscaled
assert u.Unit("10 m") == ten_meter
assert u.Unit(10.0) == u.CompositeUnit(10.0, [], [])
assert u.Unit() == u.dimensionless_unscaled
def test_invalid_power():
x = u.m ** Fraction(1, 3)
assert isinstance(x.powers[0], Fraction)
x = u.m ** Fraction(1, 2)
assert isinstance(x.powers[0], float)
# Test the automatic conversion to a fraction
x = u.m ** (1.0 / 3.0)
assert isinstance(x.powers[0], Fraction)
def test_invalid_compare():
assert not (u.m == u.s)
def test_convert():
assert u.h._get_converter(u.s)(1) == 3600
def test_convert_fail():
with pytest.raises(u.UnitsError):
u.cm.to(u.s, 1)
with pytest.raises(u.UnitsError):
(u.cm / u.s).to(u.m, 1)
def test_composite():
assert (u.cm / u.s * u.h)._get_converter(u.m)(1) == 36
assert u.cm * u.cm == u.cm**2
assert u.cm * u.cm * u.cm == u.cm**3
assert u.Hz.to(1000 * u.Hz, 1) == 0.001
def test_str():
assert str(u.cm) == "cm"
def test_repr():
assert repr(u.cm) == 'Unit("cm")'
def test_represents():
assert u.m.represents is u.m
assert u.km.represents.scale == 1000.0
assert u.km.represents.bases == [u.m]
assert u.Ry.scale == 1.0 and u.Ry.bases == [u.Ry]
assert_allclose(u.Ry.represents.scale, 13.605692518464949)
assert u.Ry.represents.bases == [u.eV]
bla = u.def_unit("bla", namespace=locals())
assert bla.represents is bla
blabla = u.def_unit("blabla", 10 * u.hr, namespace=locals())
assert blabla.represents.scale == 10.0
assert blabla.represents.bases == [u.hr]
assert blabla.decompose().scale == 10 * 3600
assert blabla.decompose().bases == [u.s]
def test_units_conversion():
assert_allclose(u.kpc.to(u.Mpc), 0.001)
assert_allclose(u.Mpc.to(u.kpc), 1000)
assert_allclose(u.yr.to(u.Myr), 1.0e-6)
assert_allclose(u.AU.to(u.pc), 4.84813681e-6)
assert_allclose(u.cycle.to(u.rad), 6.283185307179586)
assert_allclose(u.spat.to(u.sr), 12.56637061435917)
def test_units_manipulation():
# Just do some manipulation and check it's happy
(u.kpc * u.yr) ** Fraction(1, 3) / u.Myr
(u.AA * u.erg) ** 9
def test_decompose():
assert u.Ry == u.Ry.decompose()
def test_dimensionless_to_si():
"""
Issue #1150: Test for conversion of dimensionless quantities
to the SI system
"""
testunit = (1.0 * u.kpc) / (1.0 * u.Mpc)
assert testunit.unit.physical_type == "dimensionless"
assert_allclose(testunit.si, 0.001)
def test_dimensionless_to_cgs():
"""
Issue #1150: Test for conversion of dimensionless quantities
to the CGS system
"""
testunit = (1.0 * u.m) / (1.0 * u.km)
assert testunit.unit.physical_type == "dimensionless"
assert_allclose(testunit.cgs, 0.001)
def test_unknown_unit():
with pytest.warns(u.UnitsWarning, match="FOO"):
u.Unit("FOO", parse_strict="warn")
def test_multiple_solidus():
with pytest.warns(
u.UnitsWarning,
match="'m/s/kg' contains multiple slashes, which is discouraged",
):
assert u.Unit("m/s/kg").to_string() == "m / (kg s)"
with pytest.raises(ValueError):
u.Unit("m/s/kg", format="vounit")
# Regression test for #9000: solidi in exponents do not count towards this.
x = u.Unit("kg(3/10) * m(5/2) / s", format="vounit")
assert x.to_string() == "m(5/2) kg(3/10) / s"
def test_unknown_unit3():
unit = u.Unit("FOO", parse_strict="silent")
assert isinstance(unit, u.UnrecognizedUnit)
assert unit.name == "FOO"
unit2 = u.Unit("FOO", parse_strict="silent")
assert unit == unit2
assert unit.is_equivalent(unit2)
unit3 = u.Unit("BAR", parse_strict="silent")
assert unit != unit3
assert not unit.is_equivalent(unit3)
# Also test basic (in)equalities.
assert unit == "FOO"
assert unit != u.m
# next two from gh-7603.
assert unit != None
assert unit not in (None, u.m)
with pytest.raises(ValueError):
unit._get_converter(unit3)
_ = unit.to_string("latex")
_ = unit2.to_string("cgs")
with pytest.raises(ValueError):
u.Unit("BAR", parse_strict="strict")
with pytest.raises(TypeError):
u.Unit(None)
def test_invalid_scale():
with pytest.raises(TypeError):
["a", "b", "c"] * u.m
def test_cds_power():
unit = u.Unit("10+22/cm2", format="cds", parse_strict="silent")
assert unit.scale == 1e22
def test_register():
foo = u.def_unit("foo", u.m**3, namespace=locals())
assert "foo" in locals()
with u.add_enabled_units(foo):
assert "foo" in u.get_current_unit_registry().registry
assert "foo" not in u.get_current_unit_registry().registry
def test_in_units():
speed_unit = u.cm / u.s
_ = speed_unit.in_units(u.pc / u.hour, 1)
def test_null_unit():
assert (u.m / u.m) == u.Unit(1)
def test_unrecognized_equivalency():
assert u.m.is_equivalent("foo") is False
assert u.m.is_equivalent("pc") is True
def test_convertible_exception():
with pytest.raises(u.UnitsError, match=r"length.+ are not convertible"):
u.AA.to(u.h * u.s**2)
def test_convertible_exception2():
with pytest.raises(u.UnitsError, match=r"length. and .+time.+ are not convertible"):
u.m.to(u.s)
def test_invalid_type():
class A:
pass
with pytest.raises(TypeError):
u.Unit(A())
def test_steradian():
"""
Issue #599
"""
assert u.sr.is_equivalent(u.rad * u.rad)
results = u.sr.compose(units=u.cgs.bases)
assert results[0].bases[0] is u.rad
results = u.sr.compose(units=u.cgs.__dict__)
assert results[0].bases[0] is u.sr
def test_decompose_bases():
"""
From issue #576
"""
from astropy.constants import e
from astropy.units import cgs
d = e.esu.unit.decompose(bases=cgs.bases)
assert d._bases == [u.cm, u.g, u.s]
assert d._powers == [Fraction(3, 2), 0.5, -1]
assert d._scale == 1.0
def test_complex_compose():
complex = u.cd * u.sr * u.Wb
composed = complex.compose()
assert set(composed[0]._bases) == {u.lm, u.Wb}
def test_equiv_compose():
composed = u.m.compose(equivalencies=u.spectral())
assert any([u.Hz] == x.bases for x in composed)
def test_empty_compose():
with pytest.raises(u.UnitsError):
u.m.compose(units=[])
def _unit_as_str(unit):
# This function serves two purposes - it is used to sort the units to
# test alphabetically, and it is also use to allow pytest to show the unit
# in the [] when running the parametrized tests.
return str(unit)
# We use a set to make sure we don't have any duplicates.
COMPOSE_ROUNDTRIP = set()
for val in u.__dict__.values():
if isinstance(val, u.UnitBase) and not isinstance(val, u.PrefixUnit):
COMPOSE_ROUNDTRIP.add(val)
@pytest.mark.parametrize(
"unit", sorted(COMPOSE_ROUNDTRIP, key=_unit_as_str), ids=_unit_as_str
)
def test_compose_roundtrip(unit):
composed_list = unit.decompose().compose()
found = False
for composed in composed_list:
if len(composed.bases):
if composed.bases[0] is unit:
found = True
break
elif len(unit.bases) == 0:
found = True
break
assert found
# We use a set to make sure we don't have any duplicates.
COMPOSE_CGS_TO_SI = set()
for val in u.cgs.__dict__.values():
# Can't decompose Celsius
if (
isinstance(val, u.UnitBase)
and not isinstance(val, u.PrefixUnit)
and val != u.cgs.deg_C
):
COMPOSE_CGS_TO_SI.add(val)
@pytest.mark.parametrize(
"unit", sorted(COMPOSE_CGS_TO_SI, key=_unit_as_str), ids=_unit_as_str
)
def test_compose_cgs_to_si(unit):
si = unit.to_system(u.si)
assert [x.is_equivalent(unit) for x in si]
assert si[0] == unit.si
# We use a set to make sure we don't have any duplicates.
COMPOSE_SI_TO_CGS = set()
for val in u.si.__dict__.values():
# Can't decompose Celsius
if (
isinstance(val, u.UnitBase)
and not isinstance(val, u.PrefixUnit)
and val != u.si.deg_C
):
COMPOSE_SI_TO_CGS.add(val)
@pytest.mark.parametrize(
"unit", sorted(COMPOSE_SI_TO_CGS, key=_unit_as_str), ids=_unit_as_str
)
def test_compose_si_to_cgs(unit):
# Can't convert things with Ampere to CGS without more context
try:
cgs = unit.to_system(u.cgs)
except u.UnitsError:
if u.A in unit.decompose().bases:
pass
else:
raise
else:
assert [x.is_equivalent(unit) for x in cgs]
assert cgs[0] == unit.cgs
def test_to_si():
"""Check units that are not official derived units.
Should not appear on its own or as part of a composite unit.
"""
# TODO: extend to all units not listed in Tables 1--6 of
# https://physics.nist.gov/cuu/Units/units.html
# See gh-10585.
# This was always the case
assert u.bar.si is not u.bar
# But this used to fail.
assert u.bar not in (u.kg / (u.s**2 * u.sr * u.nm)).si._bases
def test_to_cgs():
assert u.Pa.to_system(u.cgs)[1]._bases[0] is u.Ba
assert u.Pa.to_system(u.cgs)[1]._scale == 10.0
def test_decompose_to_cgs():
from astropy.units import cgs
assert u.m.decompose(bases=cgs.bases)._bases[0] is cgs.cm
def test_compose_issue_579():
unit = u.kg * u.s**2 / u.m
result = unit.compose(units=[u.N, u.s, u.m])
assert len(result) == 1
assert result[0]._bases == [u.s, u.N, u.m]
assert result[0]._powers == [4, 1, -2]
def test_compose_prefix_unit():
x = u.m.compose(units=(u.m,))
assert x[0].bases[0] is u.m
assert x[0].scale == 1.0
x = u.m.compose(units=[u.km], include_prefix_units=True)
assert x[0].bases[0] is u.km
assert x[0].scale == 0.001
x = u.m.compose(units=[u.km])
assert x[0].bases[0] is u.km
assert x[0].scale == 0.001
x = (u.km / u.s).compose(units=(u.pc, u.Myr))
assert x[0].bases == [u.pc, u.Myr]
assert_allclose(x[0].scale, 1.0227121650537077)
with pytest.raises(u.UnitsError):
(u.km / u.s).compose(units=(u.pc, u.Myr), include_prefix_units=False)
def test_self_compose():
unit = u.kg * u.s
assert len(unit.compose(units=[u.g, u.s])) == 1
def test_compose_failed():
unit = u.kg
with pytest.raises(u.UnitsError):
unit.compose(units=[u.N])
def test_compose_fractional_powers():
# Warning: with a complicated unit, this test becomes very slow;
# e.g., x = (u.kg / u.s ** 3 * u.au ** 2.5 / u.yr ** 0.5 / u.sr ** 2)
# takes 3 s
x = u.m**0.5 / u.yr**1.5
factored = x.compose()
for unit in factored:
assert x.decompose() == unit.decompose()
factored = x.compose(units=u.cgs)
for unit in factored:
assert x.decompose() == unit.decompose()
factored = x.compose(units=u.si)
for unit in factored:
assert x.decompose() == unit.decompose()
def test_compose_best_unit_first():
results = u.l.compose()
assert len(results[0].bases) == 1
assert results[0].bases[0] is u.l
results = (u.s**-1).compose()
assert results[0].bases[0] in (u.Hz, u.Bq)
results = (u.Ry.decompose()).compose()
assert results[0].bases[0] is u.Ry
def test_compose_no_duplicates():
new = u.kg / u.s**3 * u.au**2.5 / u.yr**0.5 / u.sr**2
composed = new.compose(units=u.cgs.bases)
assert len(composed) == 1
def test_long_int():
"""
Issue #672
"""
sigma = 10**21 * u.M_p / u.cm**2
sigma.to(u.M_sun / u.pc**2)
def test_endian_independence():
"""
Regression test for #744
A logic issue in the units code meant that big endian arrays could not be
converted because the dtype is '>f4', not 'float32', and the code was
looking for the strings 'float' or 'int'.
"""
for endian in ["<", ">"]:
for ntype in ["i", "f"]:
for byte in ["4", "8"]:
x = np.array([1, 2, 3], dtype=(endian + ntype + byte))
u.m.to(u.cm, x)
def test_radian_base():
"""
Issue #863
"""
assert (1 * u.degree).si.unit == u.rad
def test_no_as():
# We don't define 'as', since it is a keyword, but we
# do want to define the long form (`attosecond`).
assert not hasattr(u, "as")
assert hasattr(u, "attosecond")
def test_no_duplicates_in_names():
# Regression test for #5036
assert u.ct.names == ["ct", "count"]
assert u.ct.short_names == ["ct", "count"]
assert u.ct.long_names == ["count"]
assert set(u.ph.names) == set(u.ph.short_names) | set(u.ph.long_names)
def test_pickling():
p = pickle.dumps(u.m)
other = pickle.loads(p)
assert other is u.m
new_unit = u.IrreducibleUnit(["foo"], format={"baz": "bar"})
# This is local, so the unit should not be registered.
assert "foo" not in u.get_current_unit_registry().registry
# Test pickling of this unregistered unit.
p = pickle.dumps(new_unit)
new_unit_copy = pickle.loads(p)
assert new_unit_copy is not new_unit
assert new_unit_copy.names == ["foo"]
assert new_unit_copy.get_format_name("baz") == "bar"
# It should still not be registered.
assert "foo" not in u.get_current_unit_registry().registry
# Now try the same with a registered unit.
with u.add_enabled_units([new_unit]):
p = pickle.dumps(new_unit)
assert "foo" in u.get_current_unit_registry().registry
new_unit_copy = pickle.loads(p)
assert new_unit_copy is new_unit
# Check that a registered unit can be loaded and that it gets re-enabled.
with u.add_enabled_units([]):
assert "foo" not in u.get_current_unit_registry().registry
new_unit_copy = pickle.loads(p)
assert new_unit_copy is not new_unit
assert new_unit_copy.names == ["foo"]
assert new_unit_copy.get_format_name("baz") == "bar"
assert "foo" in u.get_current_unit_registry().registry
# And just to be sure, that it gets removed outside of the context.
assert "foo" not in u.get_current_unit_registry().registry
def test_pickle_between_sessions():
"""We cannot really test between sessions easily, so fake it.
This test can be changed if the pickle protocol or the code
changes enough that it no longer works.
"""
hash_m = hash(u.m)
unit = pickle.loads(
b"\x80\x04\x95\xd6\x00\x00\x00\x00\x00\x00\x00\x8c\x12"
b"astropy.units.core\x94\x8c\x1a_recreate_irreducible_unit"
b"\x94\x93\x94h\x00\x8c\x0fIrreducibleUnit\x94\x93\x94]\x94"
b"(\x8c\x01m\x94\x8c\x05meter\x94e\x88\x87\x94R\x94}\x94(\x8c\x06"
b"_names\x94]\x94(h\x06h\x07e\x8c\x0c_short_names"
b"\x94]\x94h\x06a\x8c\x0b_long_names\x94]\x94h\x07a\x8c\x07"
b"_format\x94}\x94\x8c\x07__doc__\x94\x8c "
b"meter: base unit of length in SI\x94ub."
)
assert unit is u.m
assert hash(u.m) == hash_m
@pytest.mark.parametrize(
"unit",
[u.IrreducibleUnit(["foo"], format={"baz": "bar"}), u.Unit("m_per_s", u.m / u.s)],
)
def test_pickle_does_not_keep_memoized_hash(unit):
"""
Tests private attribute since the problem with _hash being pickled
and restored only appeared if the unpickling was done in another
session, for which the hash no longer was valid, and it is difficult
to mimic separate sessions in a simple test. See gh-11872.
"""
unit_hash = hash(unit)
assert unit._hash is not None
unit_copy = pickle.loads(pickle.dumps(unit))
# unit is not registered so we get a copy.
assert unit_copy is not unit
assert unit_copy._hash is None
assert hash(unit_copy) == unit_hash
with u.add_enabled_units([unit]):
# unit is registered, so we get a reference.
unit_ref = pickle.loads(pickle.dumps(unit))
if isinstance(unit, u.IrreducibleUnit):
assert unit_ref is unit
else:
assert unit_ref is not unit
# pickle.load used to override the hash, although in this case
# it would be the same anyway, so not clear this tests much.
assert hash(unit) == unit_hash
def test_pickle_unrecognized_unit():
"""
Issue #2047
"""
a = u.Unit("asdf", parse_strict="silent")
pickle.loads(pickle.dumps(a))
def test_duplicate_define():
with pytest.raises(ValueError):
u.def_unit("m", namespace=u.__dict__)
def test_all_units():
from astropy.units.core import get_current_unit_registry
registry = get_current_unit_registry()
assert len(registry.all_units) > len(registry.non_prefix_units)
def test_repr_latex():
assert u.m._repr_latex_() == u.m.to_string("latex")
def test_operations_with_strings():
assert u.m / "5s" == (u.m / (5.0 * u.s))
assert u.m * "5s" == (5.0 * u.m * u.s)
def test_comparison():
assert u.m > u.cm
assert u.m >= u.cm
assert u.cm < u.m
assert u.cm <= u.m
with pytest.raises(u.UnitsError):
u.m > u.kg
def test_compose_into_arbitrary_units():
# Issue #1438
from astropy.constants import G
G.decompose([u.kg, u.km, u.Unit("15 s")])
def test_unit_multiplication_with_string():
"""Check that multiplication with strings produces the correct unit."""
u1 = u.cm
us = "kg"
assert us * u1 == u.Unit(us) * u1
assert u1 * us == u1 * u.Unit(us)
def test_unit_division_by_string():
"""Check that multiplication with strings produces the correct unit."""
u1 = u.cm
us = "kg"
assert us / u1 == u.Unit(us) / u1
assert u1 / us == u1 / u.Unit(us)
def test_sorted_bases():
"""See #1616."""
assert (u.m * u.Jy).bases == (u.Jy * u.m).bases
def test_megabit():
"""See #1543"""
assert u.Mbit is u.Mb
assert u.megabit is u.Mb
assert u.Mbyte is u.MB
assert u.megabyte is u.MB
def test_composite_unit_get_format_name():
"""See #1576"""
unit1 = u.Unit("nrad/s")
unit2 = u.Unit("Hz(1/2)")
assert str(u.CompositeUnit(1, [unit1, unit2], [1, -1])) == "nrad / (Hz(1/2) s)"
def test_unicode_policy():
from astropy.tests.helper import assert_follows_unicode_guidelines
assert_follows_unicode_guidelines(u.degree, roundtrip=u.__dict__)
def test_suggestions():
for search, matches in [
("microns", "micron"),
("s/microns", "micron"),
("M", "m"),
("metre", "meter"),
("angstroms", "Angstrom or angstrom"),
("milimeter", "millimeter"),
("ångström", "Angstrom, angstrom, mAngstrom or mangstrom"),
("kev", "EV, eV, kV or keV"),
]:
with pytest.raises(ValueError, match=f"Did you mean {matches}"):
u.Unit(search)
def test_fits_hst_unit():
"""See #1911."""
with pytest.warns(u.UnitsWarning, match="multiple slashes") as w:
x = u.Unit("erg /s /cm**2 /angstrom")
assert x == u.erg * u.s**-1 * u.cm**-2 * u.angstrom**-1
assert len(w) == 1
def test_barn_prefixes():
"""Regression test for https://github.com/astropy/astropy/issues/3753"""
assert u.fbarn is u.femtobarn
assert u.pbarn is u.picobarn
def test_fractional_powers():
"""See #2069"""
m = 1e9 * u.Msun
tH = 1.0 / (70.0 * u.km / u.s / u.Mpc)
vc = 200 * u.km / u.s
x = (c.G**2 * m**2 * tH.cgs) ** Fraction(1, 3) / vc
v1 = x.to("pc")
x = (c.G**2 * m**2 * tH) ** Fraction(1, 3) / vc
v2 = x.to("pc")
x = (c.G**2 * m**2 * tH.cgs) ** (1.0 / 3.0) / vc
v3 = x.to("pc")
x = (c.G**2 * m**2 * tH) ** (1.0 / 3.0) / vc
v4 = x.to("pc")
assert_allclose(v1, v2)
assert_allclose(v2, v3)
assert_allclose(v3, v4)
x = u.m ** (1.0 / 101.0)
assert isinstance(x.powers[0], float)
x = u.m ** (3.0 / 7.0)
assert isinstance(x.powers[0], Fraction)
assert x.powers[0].numerator == 3
assert x.powers[0].denominator == 7
x = u.cm ** Fraction(1, 2) * u.cm ** Fraction(2, 3)
assert isinstance(x.powers[0], Fraction)
assert x.powers[0] == Fraction(7, 6)
# Regression test for #9258.
x = (u.TeV ** (-2.2)) ** (1 / -2.2)
assert isinstance(x.powers[0], Fraction)
assert x.powers[0] == Fraction(1, 1)
def test_sqrt_mag():
sqrt_mag = u.mag**0.5
assert hasattr(sqrt_mag.decompose().scale, "imag")
assert (sqrt_mag.decompose()) ** 2 == u.mag
def test_composite_compose():
# Issue #2382
composite_unit = u.s.compose(units=[u.Unit("s")])[0]
u.s.compose(units=[composite_unit])
def test_data_quantities():
assert u.byte.is_equivalent(u.bit)
def test_compare_with_none():
# Ensure that equality comparisons with `None` work, and don't
# raise exceptions. We are deliberately not using `is None` here
# because that doesn't trigger the bug. See #3108.
assert not (u.m == None)
assert u.m != None
def test_validate_power_detect_fraction():
frac = utils.validate_power(1.1666666666666665)
assert isinstance(frac, Fraction)
assert frac.numerator == 7
assert frac.denominator == 6
def test_complex_fractional_rounding_errors():
# See #3788
kappa = 0.34 * u.cm**2 / u.g
r_0 = 886221439924.7849 * u.cm
q = 1.75
rho_0 = 5e-10 * u.solMass / u.solRad**3
y = 0.5
beta = 0.19047619047619049
a = 0.47619047619047628
m_h = 1e6 * u.solMass
t1 = 2 * c.c / (kappa * np.sqrt(np.pi))
t2 = (r_0**-q) / (rho_0 * y * beta * (a * c.G * m_h) ** 0.5)
result = (t1 * t2) ** -0.8
assert result.unit.physical_type == "length"
result.to(u.solRad)
def test_fractional_rounding_errors_simple():
x = (u.m**1.5) ** Fraction(4, 5)
assert isinstance(x.powers[0], Fraction)
assert x.powers[0].numerator == 6
assert x.powers[0].denominator == 5
def test_enable_unit_groupings():
from astropy.units import cds
with cds.enable():
assert cds.geoMass in u.kg.find_equivalent_units()
from astropy.units import imperial
with imperial.enable():
assert imperial.inch in u.m.find_equivalent_units()
def test_unit_summary_prefixes():
"""
Test for a few units that the unit summary table correctly reports
whether or not that unit supports prefixes.
Regression test for https://github.com/astropy/astropy/issues/3835
"""
from astropy.units import astrophys
for summary in utils._iter_unit_summary(astrophys.__dict__):
unit, _, _, _, prefixes = summary
if unit.name == "lyr":
assert prefixes
elif unit.name == "pc":
assert prefixes
elif unit.name == "barn":
assert prefixes
elif unit.name == "cycle":
assert prefixes == "No"
elif unit.name == "spat":
assert prefixes == "No"
elif unit.name == "vox":
assert prefixes == "Yes"
def test_raise_to_negative_power():
"""Test that order of bases is changed when raising to negative power.
Regression test for https://github.com/astropy/astropy/issues/8260
"""
m2s2 = u.m**2 / u.s**2
spm = m2s2 ** (-1 / 2)
assert spm.bases == [u.s, u.m]
assert spm.powers == [1, -1]
assert spm == u.s / u.m
@pytest.mark.parametrize(
"name, symbol, multiplying_factor",
[
("quetta", "Q", 1e30),
("ronna", "R", 1e27),
("yotta", "Y", 1e24),
("zetta", "Z", 1e21),
("exa", "E", 1e18),
("peta", "P", 1e15),
("tera", "T", 1e12),
("giga", "G", 1e9),
("mega", "M", 1e6),
("kilo", "k", 1e3),
("deca", "da", 1e1),
("deci", "d", 1e-1),
("centi", "c", 1e-2),
("milli", "m", 1e-3),
("micro", "u", 1e-6),
("nano", "n", 1e-9),
("pico", "p", 1e-12),
("femto", "f", 1e-15),
("atto", "a", 1e-18),
("zepto", "z", 1e-21),
("yocto", "y", 1e-24),
("ronto", "r", 1e-27),
("quecto", "q", 1e-30),
],
)
def test_si_prefixes(name, symbol, multiplying_factor):
base = 1 * u.g
quantity_from_symbol = base.to(f"{symbol}g")
quantity_from_name = base.to(f"{name}gram")
assert u.isclose(quantity_from_name, base)
assert u.isclose(quantity_from_symbol, base)
value_ratio = base.value / quantity_from_symbol.value
assert u.isclose(value_ratio, multiplying_factor)
|
a5eed8ad7c1d9397b558eb03dd6104a69c62e3182e6d9145519d616a81b3c5ab | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import inspect
import itertools
import numpy as np
import numpy.lib.recfunctions as rfn
import pytest
from numpy.testing import assert_array_equal
from astropy import units as u
from astropy.units.quantity_helper.function_helpers import (
ARRAY_FUNCTION_ENABLED,
DISPATCHED_FUNCTIONS,
FUNCTION_HELPERS,
IGNORED_FUNCTIONS,
SUBCLASS_SAFE_FUNCTIONS,
TBD_FUNCTIONS,
UNSUPPORTED_FUNCTIONS,
)
from astropy.utils.compat import NUMPY_LT_1_23, NUMPY_LT_1_24
needs_array_function = pytest.mark.xfail(
not ARRAY_FUNCTION_ENABLED, reason="Needs __array_function__ support"
)
# To get the functions that could be covered, we look for those that
# are wrapped. Of course, this does not give a full list pre-1.17.
def get_wrapped_functions(*modules):
wrapped_functions = {}
for mod in modules:
for name, f in mod.__dict__.items():
if f is np.printoptions or name.startswith("_"):
continue
if callable(f) and hasattr(f, "__wrapped__"):
wrapped_functions[name] = f
return wrapped_functions
all_wrapped_functions = get_wrapped_functions(
np, np.fft, np.linalg, np.lib.recfunctions
)
all_wrapped = set(all_wrapped_functions.values())
class CoverageMeta(type):
"""Meta class that tracks which functions are covered by tests.
Assumes that a test is called 'test_<function_name>'.
"""
covered = set()
def __new__(mcls, name, bases, members):
for k, v in members.items():
if inspect.isfunction(v) and k.startswith("test"):
f = k.replace("test_", "")
if f in all_wrapped_functions:
mcls.covered.add(all_wrapped_functions[f])
return super().__new__(mcls, name, bases, members)
class BasicTestSetup(metaclass=CoverageMeta):
"""Test setup for functions that should not change the unit.
Also provides a default Quantity with shape (3, 3) and units of m.
"""
def setup_method(self):
self.q = np.arange(9.0).reshape(3, 3) / 4.0 * u.m
class InvariantUnitTestSetup(BasicTestSetup):
def check(self, func, *args, **kwargs):
o = func(self.q, *args, **kwargs)
expected = func(self.q.value, *args, **kwargs) * self.q.unit
assert o.shape == expected.shape
assert np.all(o == expected)
class NoUnitTestSetup(BasicTestSetup):
def check(self, func, *args, **kwargs):
out = func(self.q, *args, **kwargs)
expected = func(self.q.value, *args, *kwargs)
assert type(out) is type(expected)
if isinstance(expected, tuple):
assert all(np.all(o == x) for o, x in zip(out, expected))
else:
assert np.all(out == expected)
class TestShapeInformation(BasicTestSetup):
def test_shape(self):
assert np.shape(self.q) == (3, 3)
def test_size(self):
assert np.size(self.q) == 9
def test_ndim(self):
assert np.ndim(self.q) == 2
class TestShapeManipulation(InvariantUnitTestSetup):
# Note: do not parametrize the below, since test names are used
# to check coverage.
def test_reshape(self):
self.check(np.reshape, (9, 1))
def test_ravel(self):
self.check(np.ravel)
def test_moveaxis(self):
self.check(np.moveaxis, 0, 1)
def test_rollaxis(self):
self.check(np.rollaxis, 0, 2)
def test_swapaxes(self):
self.check(np.swapaxes, 0, 1)
def test_transpose(self):
self.check(np.transpose)
def test_atleast_1d(self):
q = 1.0 * u.m
o, so = np.atleast_1d(q, self.q)
assert o.shape == (1,)
assert o == q
expected = np.atleast_1d(self.q.value) * u.m
assert np.all(so == expected)
def test_atleast_2d(self):
q = 1.0 * u.m
o, so = np.atleast_2d(q, self.q)
assert o.shape == (1, 1)
assert o == q
expected = np.atleast_2d(self.q.value) * u.m
assert np.all(so == expected)
def test_atleast_3d(self):
q = 1.0 * u.m
o, so = np.atleast_3d(q, self.q)
assert o.shape == (1, 1, 1)
assert o == q
expected = np.atleast_3d(self.q.value) * u.m
assert np.all(so == expected)
def test_expand_dims(self):
self.check(np.expand_dims, 1)
def test_squeeze(self):
o = np.squeeze(self.q[:, np.newaxis, :])
assert o.shape == (3, 3)
assert np.all(o == self.q)
def test_flip(self):
self.check(np.flip)
def test_fliplr(self):
self.check(np.fliplr)
def test_flipud(self):
self.check(np.flipud)
def test_rot90(self):
self.check(np.rot90)
def test_broadcast_to(self):
# Decided *not* to change default for subok for Quantity, since
# that would be contrary to the docstring and might break code.
self.check(np.broadcast_to, (3, 3, 3), subok=True)
out = np.broadcast_to(self.q, (3, 3, 3))
assert type(out) is np.ndarray # NOT Quantity
def test_broadcast_arrays(self):
# Decided *not* to change default for subok for Quantity, since
# that would be contrary to the docstring and might break code.
q2 = np.ones((3, 3, 3)) / u.s
o1, o2 = np.broadcast_arrays(self.q, q2, subok=True)
assert isinstance(o1, u.Quantity)
assert isinstance(o2, u.Quantity)
assert o1.shape == o2.shape == (3, 3, 3)
assert np.all(o1 == self.q)
assert np.all(o2 == q2)
a1, a2 = np.broadcast_arrays(self.q, q2)
assert type(a1) is np.ndarray
assert type(a2) is np.ndarray
class TestArgFunctions(NoUnitTestSetup):
def test_argmin(self):
self.check(np.argmin)
def test_argmax(self):
self.check(np.argmax)
def test_argsort(self):
self.check(np.argsort)
def test_lexsort(self):
self.check(np.lexsort)
def test_searchsorted(self):
q = self.q.ravel()
q2 = np.array([150.0, 350.0]) * u.cm
out = np.searchsorted(q, q2)
expected = np.searchsorted(q.value, q2.to_value(q.unit))
assert np.all(out == expected)
def test_nonzero(self):
self.check(np.nonzero)
def test_argwhere(self):
self.check(np.argwhere)
@needs_array_function
def test_argpartition(self):
self.check(np.argpartition, 2)
def test_flatnonzero(self):
self.check(np.flatnonzero)
class TestAlongAxis(BasicTestSetup):
def test_take_along_axis(self):
indices = np.expand_dims(np.argmax(self.q, axis=0), axis=0)
out = np.take_along_axis(self.q, indices, axis=0)
expected = np.take_along_axis(self.q.value, indices, axis=0) * self.q.unit
assert np.all(out == expected)
def test_put_along_axis(self):
q = self.q.copy()
indices = np.expand_dims(np.argmax(self.q, axis=0), axis=0)
np.put_along_axis(q, indices, axis=0, values=-100 * u.cm)
expected = q.value.copy()
np.put_along_axis(expected, indices, axis=0, values=-1)
expected = expected * q.unit
assert np.all(q == expected)
@pytest.mark.parametrize("axis", (0, 1))
def test_apply_along_axis(self, axis):
out = np.apply_along_axis(np.square, axis, self.q)
expected = np.apply_along_axis(np.square, axis, self.q.value) * self.q.unit**2
assert_array_equal(out, expected)
@needs_array_function
@pytest.mark.parametrize("axes", ((1,), (0,), (0, 1)))
def test_apply_over_axes(self, axes):
def function(x, axis):
return np.sum(np.square(x), axis)
out = np.apply_over_axes(function, self.q, axes)
expected = np.apply_over_axes(function, self.q.value, axes)
expected = expected * self.q.unit ** (2 * len(axes))
assert_array_equal(out, expected)
class TestIndicesFrom(NoUnitTestSetup):
def test_diag_indices_from(self):
self.check(np.diag_indices_from)
def test_triu_indices_from(self):
self.check(np.triu_indices_from)
def test_tril_indices_from(self):
self.check(np.tril_indices_from)
class TestRealImag(InvariantUnitTestSetup):
def setup_method(self):
self.q = (np.arange(9.0).reshape(3, 3) + 1j) * u.m
def test_real(self):
self.check(np.real)
def test_imag(self):
self.check(np.imag)
class TestCopyAndCreation(InvariantUnitTestSetup):
@needs_array_function
def test_copy(self):
self.check(np.copy)
# Also as kwarg
copy = np.copy(a=self.q)
assert_array_equal(copy, self.q)
@needs_array_function
def test_asfarray(self):
self.check(np.asfarray)
farray = np.asfarray(a=self.q)
assert_array_equal(farray, self.q)
def test_empty_like(self):
o = np.empty_like(self.q)
assert o.shape == (3, 3)
assert isinstance(o, u.Quantity)
assert o.unit == self.q.unit
o2 = np.empty_like(prototype=self.q)
assert o2.shape == (3, 3)
assert isinstance(o2, u.Quantity)
assert o2.unit == self.q.unit
o3 = np.empty_like(self.q, subok=False)
assert type(o3) is np.ndarray
def test_zeros_like(self):
self.check(np.zeros_like)
o2 = np.zeros_like(a=self.q)
assert_array_equal(o2, self.q * 0.0)
def test_ones_like(self):
self.check(np.ones_like)
@needs_array_function
def test_full_like(self):
o = np.full_like(self.q, 0.5 * u.km)
expected = np.empty_like(self.q.value) * u.m
expected[...] = 0.5 * u.km
assert np.all(o == expected)
with pytest.raises(u.UnitsError):
np.full_like(self.q, 0.5 * u.s)
class TestAccessingParts(InvariantUnitTestSetup):
def test_diag(self):
self.check(np.diag)
@needs_array_function
def test_diag_1d_input(self):
# Also check 1-D case; drops unit w/o __array_function__.
q = self.q.ravel()
o = np.diag(q)
expected = np.diag(q.value) << q.unit
assert o.unit == self.q.unit
assert o.shape == expected.shape
assert_array_equal(o, expected)
def test_diagonal(self):
self.check(np.diagonal)
def test_diagflat(self):
self.check(np.diagflat)
def test_compress(self):
o = np.compress([True, False, True], self.q, axis=0)
expected = np.compress([True, False, True], self.q.value, axis=0) * self.q.unit
assert np.all(o == expected)
def test_extract(self):
o = np.extract([True, False, True], self.q)
expected = np.extract([True, False, True], self.q.value) * self.q.unit
assert np.all(o == expected)
def test_delete(self):
self.check(np.delete, slice(1, 2), 0)
self.check(np.delete, [0, 2], 1)
def test_trim_zeros(self):
q = self.q.ravel()
out = np.trim_zeros(q)
expected = np.trim_zeros(q.value) * u.m
assert np.all(out == expected)
def test_roll(self):
self.check(np.roll, 1)
self.check(np.roll, 1, axis=0)
def test_take(self):
self.check(np.take, [0, 1], axis=1)
self.check(np.take, 1)
class TestSettingParts(metaclass=CoverageMeta):
def test_put(self):
q = np.arange(3.0) * u.m
np.put(q, [0, 2], [50, 150] * u.cm)
assert q.unit == u.m
expected = [50, 100, 150] * u.cm
assert np.all(q == expected)
@needs_array_function
def test_putmask(self):
q = np.arange(3.0) * u.m
mask = [True, False, True]
values = [50, 0, 150] * u.cm
np.putmask(q, mask, values)
assert q.unit == u.m
expected = [50, 100, 150] * u.cm
assert np.all(q == expected)
with pytest.raises(u.UnitsError):
np.putmask(q, mask, values.value)
with pytest.raises(u.UnitsError):
np.putmask(q.value, mask, values)
a = np.arange(3.0)
values = [50, 0, 150] * u.percent
np.putmask(a, mask, values)
expected = np.array([0.5, 1.0, 1.5])
assert np.all(a == expected)
@needs_array_function
def test_place(self):
q = np.arange(3.0) * u.m
np.place(q, [True, False, True], [50, 150] * u.cm)
assert q.unit == u.m
expected = [50, 100, 150] * u.cm
assert np.all(q == expected)
a = np.arange(3.0)
np.place(a, [True, False, True], [50, 150] * u.percent)
assert type(a) is np.ndarray
expected = np.array([0.5, 1.0, 1.5])
assert np.all(a == expected)
@needs_array_function
def test_copyto(self):
q = np.arange(3.0) * u.m
np.copyto(q, [50, 0, 150] * u.cm, where=[True, False, True])
assert q.unit == u.m
expected = [50, 100, 150] * u.cm
assert np.all(q == expected)
a = np.arange(3.0)
np.copyto(a, [50, 0, 150] * u.percent, where=[True, False, True])
assert type(a) is np.ndarray
expected = np.array([0.5, 1.0, 1.5])
assert np.all(a == expected)
def test_fill_diagonal(self):
q = np.arange(9.0).reshape(3, 3) * u.m
expected = q.value.copy()
np.fill_diagonal(expected, 0.25)
expected = expected * u.m
np.fill_diagonal(q, 25.0 * u.cm)
assert q.unit == u.m
assert np.all(q == expected)
class TestRepeat(InvariantUnitTestSetup):
def test_tile(self):
self.check(np.tile, 2)
def test_repeat(self):
self.check(np.repeat, 2)
@needs_array_function
def test_resize(self):
self.check(np.resize, (4, 4))
class TestConcatenate(metaclass=CoverageMeta):
def setup_method(self):
self.q1 = np.arange(6.0).reshape(2, 3) * u.m
self.q2 = self.q1.to(u.cm)
def check(self, func, *args, **kwargs):
q_list = kwargs.pop("q_list", [self.q1, self.q2])
q_ref = kwargs.pop("q_ref", q_list[0])
o = func(q_list, *args, **kwargs)
v_list = [q_ref._to_own_unit(q) for q in q_list]
expected = func(v_list, *args, **kwargs) * q_ref.unit
assert o.shape == expected.shape
assert np.all(o == expected)
@needs_array_function
def test_concatenate(self):
self.check(np.concatenate)
self.check(np.concatenate, axis=1)
# regression test for gh-13322.
self.check(np.concatenate, dtype="f4")
self.check(
np.concatenate,
q_list=[np.zeros(self.q1.shape), self.q1, self.q2],
q_ref=self.q1,
)
out = np.empty((4, 3)) * u.dimensionless_unscaled
result = np.concatenate([self.q1, self.q2], out=out)
assert out is result
assert out.unit == self.q1.unit
expected = (
np.concatenate([self.q1.value, self.q2.to_value(self.q1.unit)])
* self.q1.unit
)
assert np.all(result == expected)
with pytest.raises(TypeError):
np.concatenate([self.q1, object()])
@needs_array_function
def test_stack(self):
self.check(np.stack)
@needs_array_function
def test_column_stack(self):
self.check(np.column_stack)
@needs_array_function
def test_hstack(self):
self.check(np.hstack)
@needs_array_function
def test_vstack(self):
self.check(np.vstack)
@needs_array_function
def test_dstack(self):
self.check(np.dstack)
@needs_array_function
def test_block(self):
self.check(np.block)
result = np.block([[0.0, 1.0 * u.m], [1.0 * u.cm, 2.0 * u.km]])
assert np.all(result == np.block([[0, 1.0], [0.01, 2000.0]]) << u.m)
@needs_array_function
def test_append(self):
out = np.append(self.q1, self.q2, axis=0)
assert out.unit == self.q1.unit
expected = (
np.append(self.q1.value, self.q2.to_value(self.q1.unit), axis=0)
* self.q1.unit
)
assert np.all(out == expected)
a = np.arange(3.0)
result = np.append(a, 50.0 * u.percent)
assert isinstance(result, u.Quantity)
assert result.unit == u.dimensionless_unscaled
expected = np.append(a, 0.5) * u.dimensionless_unscaled
assert np.all(result == expected)
@needs_array_function
def test_insert(self):
# Unit of inserted values is not ignored.
q = np.arange(12.0).reshape(6, 2) * u.m
out = np.insert(q, (3, 5), [50.0, 25.0] * u.cm)
assert isinstance(out, u.Quantity)
assert out.unit == q.unit
expected = np.insert(q.value, (3, 5), [0.5, 0.25]) << q.unit
assert np.all(out == expected)
# 0 can have any unit.
out2 = np.insert(q, (3, 5), 0)
expected2 = np.insert(q.value, (3, 5), 0) << q.unit
assert np.all(out2 == expected2)
a = np.arange(3.0)
result = np.insert(a, (2,), 50.0 * u.percent)
assert isinstance(result, u.Quantity)
assert result.unit == u.dimensionless_unscaled
expected = np.insert(a, (2,), 0.5) * u.dimensionless_unscaled
assert np.all(result == expected)
with pytest.raises(TypeError):
np.insert(q, 3 * u.cm, 50.0 * u.cm)
with pytest.raises(u.UnitsError):
np.insert(q, (3, 5), 0.0 * u.s)
@needs_array_function
def test_pad(self):
q = np.arange(1.0, 6.0) * u.m
out = np.pad(q, (2, 3), "constant", constant_values=(0.0, 150.0 * u.cm))
assert out.unit == q.unit
expected = (
np.pad(q.value, (2, 3), "constant", constant_values=(0.0, 1.5)) * q.unit
)
assert np.all(out == expected)
out2 = np.pad(q, (2, 3), "constant", constant_values=150.0 * u.cm)
assert out2.unit == q.unit
expected2 = np.pad(q.value, (2, 3), "constant", constant_values=1.5) * q.unit
assert np.all(out2 == expected2)
out3 = np.pad(q, (2, 3), "linear_ramp", end_values=(25.0 * u.cm, 0.0))
assert out3.unit == q.unit
expected3 = (
np.pad(q.value, (2, 3), "linear_ramp", end_values=(0.25, 0.0)) * q.unit
)
assert np.all(out3 == expected3)
class TestSplit(metaclass=CoverageMeta):
def setup_method(self):
self.q = np.arange(54.0).reshape(3, 3, 6) * u.m
def check(self, func, *args, **kwargs):
out = func(self.q, *args, **kwargs)
expected = func(self.q.value, *args, **kwargs)
expected = [x * self.q.unit for x in expected]
assert len(out) == len(expected)
assert all(o.shape == x.shape for o, x in zip(out, expected))
assert all(np.all(o == x) for o, x in zip(out, expected))
def test_split(self):
self.check(np.split, [1])
def test_array_split(self):
self.check(np.array_split, 2)
def test_hsplit(self):
self.check(np.hsplit, [1, 4])
def test_vsplit(self):
self.check(np.vsplit, [1])
def test_dsplit(self):
self.check(np.dsplit, [1])
class TestUfuncReductions(InvariantUnitTestSetup):
def test_max(self):
self.check(np.max)
def test_min(self):
self.check(np.min)
def test_amax(self):
self.check(np.amax)
def test_amin(self):
self.check(np.amin)
def test_sum(self):
self.check(np.sum)
def test_cumsum(self):
self.check(np.cumsum)
def test_any(self):
with pytest.raises(TypeError):
np.any(self.q)
def test_all(self):
with pytest.raises(TypeError):
np.all(self.q)
# NUMPY_LT_1_25
@pytest.mark.filterwarnings("ignore:`sometrue` is deprecated as of NumPy 1.25.0")
def test_sometrue(self):
with pytest.raises(TypeError):
np.sometrue(self.q)
# NUMPY_LT_1_25
@pytest.mark.filterwarnings("ignore:`alltrue` is deprecated as of NumPy 1.25.0")
def test_alltrue(self):
with pytest.raises(TypeError):
np.alltrue(self.q)
def test_prod(self):
with pytest.raises(u.UnitsError):
np.prod(self.q)
# NUMPY_LT_1_25
@pytest.mark.filterwarnings("ignore:`product` is deprecated as of NumPy 1.25.0")
def test_product(self):
with pytest.raises(u.UnitsError):
np.product(self.q)
def test_cumprod(self):
with pytest.raises(u.UnitsError):
np.cumprod(self.q)
# NUMPY_LT_1_25
@pytest.mark.filterwarnings("ignore:`cumproduct` is deprecated as of NumPy 1.25.0")
def test_cumproduct(self):
with pytest.raises(u.UnitsError):
np.cumproduct(self.q)
class TestUfuncLike(InvariantUnitTestSetup):
def test_ptp(self):
self.check(np.ptp)
self.check(np.ptp, axis=0)
def test_round(self):
self.check(np.round)
# NUMPY_LT_1_25
@pytest.mark.filterwarnings("ignore:`round_` is deprecated as of NumPy 1.25.0")
def test_round_(self):
self.check(np.round_)
def test_around(self):
self.check(np.around)
def test_fix(self):
self.check(np.fix)
def test_angle(self):
q = np.array([1 + 0j, 0 + 1j, 1 + 1j, 0 + 0j]) * u.m
out = np.angle(q)
expected = np.angle(q.value) * u.radian
assert np.all(out == expected)
def test_i0(self):
q = np.array([0.0, 10.0, 20.0]) * u.percent
out = np.i0(q)
expected = np.i0(q.to_value(u.one)) * u.one
assert isinstance(out, u.Quantity)
assert np.all(out == expected)
with pytest.raises(u.UnitsError):
np.i0(self.q)
def test_clip(self):
qmin = 200 * u.cm
qmax = [270, 280, 290] * u.cm
out = np.clip(self.q, qmin, qmax)
unit = self.q.unit
expected = (
np.clip(self.q.value, qmin.to_value(unit), qmax.to_value(unit)) * unit
)
assert np.all(out == expected)
@needs_array_function
def test_sinc(self):
q = [0.0, 3690.0, -270.0, 690.0] * u.deg
out = np.sinc(q)
expected = np.sinc(q.to_value(u.radian)) * u.one
assert isinstance(out, u.Quantity)
assert np.all(out == expected)
with pytest.raises(u.UnitsError):
np.sinc(1.0 * u.one)
@needs_array_function
def test_where(self):
out = np.where([True, False, True], self.q, 1.0 * u.km)
expected = np.where([True, False, True], self.q.value, 1000.0) * self.q.unit
assert np.all(out == expected)
@needs_array_function
def test_choose(self):
# from np.choose docstring
a = np.array([0, 1]).reshape((2, 1, 1))
q1 = np.array([1, 2, 3]).reshape((1, 3, 1)) * u.cm
q2 = np.array([-1, -2, -3, -4, -5]).reshape((1, 1, 5)) * u.m
out = np.choose(a, (q1, q2))
# result is 2x3x5, res[0,:,:]=c1, res[1,:,:]=c2
expected = np.choose(a, (q1.value, q2.to_value(q1.unit))) * u.cm
assert np.all(out == expected)
@needs_array_function
def test_select(self):
q = self.q
out = np.select(
[q < 0.55 * u.m, q > 1.0 * u.m], [q, q.to(u.cm)], default=-1.0 * u.km
)
expected = (
np.select([q.value < 0.55, q.value > 1], [q.value, q.value], default=-1000)
* u.m
)
assert np.all(out == expected)
@needs_array_function
def test_real_if_close(self):
q = np.array([1 + 0j, 0 + 1j, 1 + 1j, 0 + 0j]) * u.m
out = np.real_if_close(q)
expected = np.real_if_close(q.value) * u.m
assert np.all(out == expected)
@needs_array_function
def test_tril(self):
self.check(np.tril)
@needs_array_function
def test_triu(self):
self.check(np.triu)
@needs_array_function
def test_unwrap(self):
q = [0.0, 3690.0, -270.0, 690.0] * u.deg
out = np.unwrap(q)
expected = (np.unwrap(q.to_value(u.rad)) * u.rad).to(q.unit)
assert out.unit == expected.unit
assert np.allclose(out, expected, atol=1 * u.urad, rtol=0)
with pytest.raises(u.UnitsError):
np.unwrap([1.0, 2.0] * u.m)
with pytest.raises(u.UnitsError):
np.unwrap(q, discont=1.0 * u.m)
def test_nan_to_num(self):
q = np.array([-np.inf, +np.inf, np.nan, 3.0, 4.0]) * u.m
out = np.nan_to_num(q)
expected = np.nan_to_num(q.value) * q.unit
assert np.all(out == expected)
@needs_array_function
def test_nan_to_num_complex(self):
q = np.array([-np.inf, +np.inf, np.nan, 3.0, 4.0]) * u.m
out = np.nan_to_num(q, nan=1.0 * u.km, posinf=2.0 * u.km, neginf=-2 * u.km)
expected = [-2000.0, 2000.0, 1000.0, 3.0, 4.0] * u.m
assert np.all(out == expected)
class TestUfuncLikeTests(metaclass=CoverageMeta):
def setup_method(self):
self.q = np.array([-np.inf, +np.inf, np.nan, 3.0, 4.0]) * u.m
def check(self, func):
out = func(self.q)
expected = func(self.q.value)
assert type(out) is np.ndarray
assert out.dtype.kind == "b"
assert np.all(out == expected)
def test_isposinf(self):
self.check(np.isposinf)
def test_isneginf(self):
self.check(np.isneginf)
def test_isreal(self):
self.check(np.isreal)
assert not np.isreal([1.0 + 1j] * u.m)
def test_iscomplex(self):
self.check(np.iscomplex)
assert np.iscomplex([1.0 + 1j] * u.m)
def test_isclose(self):
q1 = np.arange(3.0) * u.m
q2 = np.array([0.0, 102.0, 199.0]) * u.cm
atol = 1.5 * u.cm
rtol = 1.0 * u.percent
out = np.isclose(q1, q2, atol=atol)
expected = np.isclose(
q1.value, q2.to_value(q1.unit), atol=atol.to_value(q1.unit)
)
assert type(out) is np.ndarray
assert out.dtype.kind == "b"
assert np.all(out == expected)
out = np.isclose(q1, q2, atol=0, rtol=rtol)
expected = np.isclose(q1.value, q2.to_value(q1.unit), atol=0, rtol=0.01)
assert type(out) is np.ndarray
assert out.dtype.kind == "b"
assert np.all(out == expected)
@needs_array_function
def test_allclose_atol_default_unit(self):
q_cm = self.q.to(u.cm)
out = np.isclose(self.q, q_cm)
expected = np.isclose(self.q.value, q_cm.to_value(u.m))
assert np.all(out == expected)
q1 = np.arange(3.0) * u.m
q2 = np.array([0.0, 101.0, 198.0]) * u.cm
out = np.isclose(q1, q2, atol=0.011, rtol=0)
expected = np.isclose(q1.value, q2.to_value(q1.unit), atol=0.011, rtol=0)
assert np.all(out == expected)
out2 = np.isclose(q2, q1, atol=0.011, rtol=0)
expected2 = np.isclose(q2.value, q1.to_value(q2.unit), atol=0.011, rtol=0)
assert np.all(out2 == expected2)
class TestReductionLikeFunctions(InvariantUnitTestSetup):
def test_average(self):
q1 = np.arange(9.0).reshape(3, 3) * u.m
q2 = np.eye(3) / u.s
o = np.average(q1, weights=q2)
expected = np.average(q1.value, weights=q2.value) * u.m
assert np.all(o == expected)
def test_mean(self):
self.check(np.mean)
def test_std(self):
self.check(np.std)
def test_var(self):
o = np.var(self.q)
expected = np.var(self.q.value) * self.q.unit**2
assert np.all(o == expected)
def test_median(self):
self.check(np.median)
def test_median_nan_scalar(self):
# See gh-12165; this dropped the unit in numpy < 1.22
data = [1.0, 2, np.nan, 3, 4] << u.km
result = np.median(data)
assert_array_equal(result, np.nan * u.km)
@needs_array_function
def test_quantile(self):
self.check(np.quantile, 0.5)
o = np.quantile(self.q, 50 * u.percent)
expected = np.quantile(self.q.value, 0.5) * u.m
assert np.all(o == expected)
# For ndarray input, we return a Quantity.
o2 = np.quantile(self.q.value, 50 * u.percent)
assert o2.unit == u.dimensionless_unscaled
assert np.all(o2 == expected.value)
o3 = 0 * o2
result = np.quantile(self.q, 50 * u.percent, out=o3)
assert result is o3
assert np.all(o3 == expected)
o4 = 0 * o2
result = np.quantile(self.q, 50 * u.percent, None, o4)
assert result is o4
assert np.all(o4 == expected)
@needs_array_function
def test_percentile(self):
self.check(np.percentile, 0.5)
o = np.percentile(self.q, 0.5 * u.one)
expected = np.percentile(self.q.value, 50) * u.m
assert np.all(o == expected)
def test_trace(self):
self.check(np.trace)
@needs_array_function
def test_count_nonzero(self):
q1 = np.arange(9.0).reshape(3, 3) * u.m
o = np.count_nonzero(q1)
assert type(o) is not u.Quantity
assert o == 8
o = np.count_nonzero(q1, axis=1)
# Returns integer Quantity with units of m
assert type(o) is np.ndarray
assert np.all(o == np.array([2, 3, 3]))
def test_allclose(self):
q1 = np.arange(3.0) * u.m
q2 = np.array([0.0, 101.0, 199.0]) * u.cm
atol = 2 * u.cm
rtol = 1.0 * u.percent
assert np.allclose(q1, q2, atol=atol)
assert np.allclose(q1, q2, atol=0.0, rtol=rtol)
@needs_array_function
def test_allclose_atol_default_unit(self):
q1 = np.arange(3.0) * u.m
q2 = np.array([0.0, 101.0, 199.0]) * u.cm
assert np.allclose(q1, q2, atol=0.011, rtol=0)
assert not np.allclose(q2, q1, atol=0.011, rtol=0)
def test_allclose_failures(self):
q1 = np.arange(3.0) * u.m
q2 = np.array([0.0, 101.0, 199.0]) * u.cm
with pytest.raises(u.UnitsError):
np.allclose(q1, q2, atol=2 * u.s, rtol=0)
with pytest.raises(u.UnitsError):
np.allclose(q1, q2, atol=0, rtol=1.0 * u.s)
@needs_array_function
def test_array_equal(self):
q1 = np.arange(3.0) * u.m
q2 = q1.to(u.cm)
assert np.array_equal(q1, q2)
q3 = q1.value * u.cm
assert not np.array_equal(q1, q3)
@pytest.mark.parametrize("equal_nan", [False, True])
def test_array_equal_nan(self, equal_nan):
q1 = np.linspace(0, 1, num=11) * u.m
q1[0] = np.nan
q2 = q1.to(u.cm)
result = np.array_equal(q1, q2, equal_nan=equal_nan)
assert result == equal_nan
def test_array_equal_incompatible_units(self):
assert not np.array_equal([1, 2] * u.m, [1, 2] * u.s)
@needs_array_function
def test_array_equiv(self):
q1 = np.array([[0.0, 1.0, 2.0]] * 3) * u.m
q2 = q1[0].to(u.cm)
assert np.array_equiv(q1, q2)
q3 = q1[0].value * u.cm
assert not np.array_equiv(q1, q3)
def test_array_equiv_incompatible_units(self):
assert not np.array_equiv([1, 1] * u.m, [1] * u.s)
class TestNanFunctions(InvariantUnitTestSetup):
def setup_method(self):
super().setup_method()
self.q[1, 1] = np.nan
def test_nanmax(self):
self.check(np.nanmax)
def test_nanmin(self):
self.check(np.nanmin)
def test_nanargmin(self):
out = np.nanargmin(self.q)
expected = np.nanargmin(self.q.value)
assert out == expected
def test_nanargmax(self):
out = np.nanargmax(self.q)
expected = np.nanargmax(self.q.value)
assert out == expected
def test_nanmean(self):
self.check(np.nanmean)
def test_nanmedian(self):
self.check(np.nanmedian)
def test_nansum(self):
self.check(np.nansum)
def test_nancumsum(self):
self.check(np.nancumsum)
def test_nanstd(self):
self.check(np.nanstd)
def test_nanvar(self):
out = np.nanvar(self.q)
expected = np.nanvar(self.q.value) * self.q.unit**2
assert np.all(out == expected)
def test_nanprod(self):
with pytest.raises(u.UnitsError):
np.nanprod(self.q)
def test_nancumprod(self):
with pytest.raises(u.UnitsError):
np.nancumprod(self.q)
@needs_array_function
def test_nanquantile(self):
self.check(np.nanquantile, 0.5)
o = np.nanquantile(self.q, 50 * u.percent)
expected = np.nanquantile(self.q.value, 0.5) * u.m
assert np.all(o == expected)
@needs_array_function
def test_nanpercentile(self):
self.check(np.nanpercentile, 0.5)
o = np.nanpercentile(self.q, 0.5 * u.one)
expected = np.nanpercentile(self.q.value, 50) * u.m
assert np.all(o == expected)
class TestVariousProductFunctions(metaclass=CoverageMeta):
"""
Test functions that are similar to gufuncs
"""
@needs_array_function
def test_cross(self):
q1 = np.arange(6.0).reshape(2, 3) * u.m
q2 = np.array([4.0, 5.0, 6.0]) / u.s
o = np.cross(q1, q2)
expected = np.cross(q1.value, q2.value) * u.m / u.s
assert np.all(o == expected)
@needs_array_function
def test_outer(self):
q1 = np.array([1, 2, 3]) * u.m
q2 = np.array([1, 2]) / u.s
o = np.outer(q1, q2)
assert np.all(o == np.array([[1, 2], [2, 4], [3, 6]]) * u.m / u.s)
o2 = 0 * o
result = np.outer(q1, q2, out=o2)
assert result is o2
assert np.all(o2 == o)
with pytest.raises(TypeError):
np.outer(q1, q2, out=object())
@needs_array_function
def test_inner(self):
q1 = np.array([1, 2, 3]) * u.m
q2 = np.array([4, 5, 6]) / u.s
o = np.inner(q1, q2)
assert o == 32 * u.m / u.s
@needs_array_function
def test_dot(self):
q1 = np.array([1.0, 2.0, 3.0]) * u.m
q2 = np.array([4.0, 5.0, 6.0]) / u.s
o = np.dot(q1, q2)
assert o == 32.0 * u.m / u.s
@needs_array_function
def test_vdot(self):
q1 = np.array([1j, 2j, 3j]) * u.m
q2 = np.array([4j, 5j, 6j]) / u.s
o = np.vdot(q1, q2)
assert o == (32.0 + 0j) * u.m / u.s
@needs_array_function
def test_tensordot(self):
# From the docstring example
a = np.arange(60.0).reshape(3, 4, 5) * u.m
b = np.arange(24.0).reshape(4, 3, 2) / u.s
c = np.tensordot(a, b, axes=([1, 0], [0, 1]))
expected = np.tensordot(a.value, b.value, axes=([1, 0], [0, 1])) * u.m / u.s
assert np.all(c == expected)
@needs_array_function
def test_kron(self):
q1 = np.eye(2) * u.m
q2 = np.ones(2) / u.s
o = np.kron(q1, q2)
expected = np.kron(q1.value, q2.value) * u.m / u.s
assert np.all(o == expected)
@needs_array_function
def test_einsum(self):
q1 = np.arange(9.0).reshape(3, 3) * u.m
o = np.einsum("...i", q1)
assert np.all(o == q1)
o = np.einsum("ii", q1)
expected = np.einsum("ii", q1.value) * u.m
assert np.all(o == expected)
q2 = np.eye(3) / u.s
o2 = np.einsum("ij,jk", q1, q2)
assert np.all(o2 == q1 / u.s)
o3 = 0 * o2
result = np.einsum("ij,jk", q1, q2, out=o3)
assert result is o3
assert np.all(o3 == o2)
def test_einsum_path(self):
q1 = np.arange(9.0).reshape(3, 3) * u.m
o = np.einsum_path("...i", q1)
assert o[0] == ["einsum_path", (0,)]
o = np.einsum_path("ii", q1)
assert o[0] == ["einsum_path", (0,)]
q2 = np.eye(3) / u.s
o = np.einsum_path("ij,jk", q1, q2)
assert o[0] == ["einsum_path", (0, 1)]
class TestIntDiffFunctions(metaclass=CoverageMeta):
def test_trapz(self):
y = np.arange(9.0) * u.m / u.s
out = np.trapz(y)
expected = np.trapz(y.value) * y.unit
assert np.all(out == expected)
dx = 10.0 * u.s
out = np.trapz(y, dx=dx)
expected = np.trapz(y.value, dx=dx.value) * y.unit * dx.unit
assert np.all(out == expected)
x = np.arange(9.0) * u.s
out = np.trapz(y, x)
expected = np.trapz(y.value, x.value) * y.unit * x.unit
assert np.all(out == expected)
def test_diff(self):
# Simple diff works out of the box.
x = np.arange(10.0) * u.m
out = np.diff(x)
expected = np.diff(x.value) * u.m
assert np.all(out == expected)
@needs_array_function
def test_diff_prepend_append(self):
x = np.arange(10.0) * u.m
out = np.diff(x, prepend=-12.5 * u.cm, append=1 * u.km)
expected = np.diff(x.value, prepend=-0.125, append=1000.0) * x.unit
assert np.all(out == expected)
x = np.arange(10.0) * u.m
out = np.diff(x, prepend=-12.5 * u.cm, append=1 * u.km, n=2)
expected = np.diff(x.value, prepend=-0.125, append=1000.0, n=2) * x.unit
assert np.all(out == expected)
with pytest.raises(TypeError):
np.diff(x, prepend=object())
def test_gradient(self):
# Simple gradient works out of the box.
x = np.arange(10.0) * u.m
out = np.gradient(x)
expected = np.gradient(x.value) * u.m
assert np.all(out == expected)
@needs_array_function
def test_gradient_spacing(self):
# Simple gradient works out of the box.
x = np.arange(10.0) * u.m
spacing = 10.0 * u.s
out = np.gradient(x, spacing)
expected = np.gradient(x.value, spacing.value) * (x.unit / spacing.unit)
assert np.all(out == expected)
f = np.array([[1, 2, 6], [3, 4, 5]]) * u.m
dx = 2.0 * u.s
y = [1.0, 1.5, 3.5] * u.GHz
dfdx, dfdy = np.gradient(f, dx, y)
exp_dfdx, exp_dfdy = np.gradient(f.value, dx.value, y.value)
exp_dfdx = exp_dfdx * f.unit / dx.unit
exp_dfdy = exp_dfdy * f.unit / y.unit
assert np.all(dfdx == exp_dfdx)
assert np.all(dfdy == exp_dfdy)
dfdx2 = np.gradient(f, dx, axis=0)
assert np.all(dfdx2 == exp_dfdx)
dfdy2 = np.gradient(f, y, axis=(1,))
assert np.all(dfdy2 == exp_dfdy)
class TestSpaceFunctions(metaclass=CoverageMeta):
def test_linspace(self):
# Note: linspace gets unit of end point, not superlogical.
out = np.linspace(1000.0 * u.m, 10.0 * u.km, 5)
expected = np.linspace(1, 10, 5) * u.km
assert np.all(out == expected)
q1 = np.arange(6.0).reshape(2, 3) * u.m
q2 = 10000.0 * u.cm
out = np.linspace(q1, q2, 5)
expected = np.linspace(q1.to_value(q2.unit), q2.value, 5) * q2.unit
assert np.all(out == expected)
@needs_array_function
def test_logspace(self):
unit = u.m / u.s**2
out = np.logspace(10.0 * u.dex(unit), 20 * u.dex(unit), 10)
expected = np.logspace(10.0, 20.0, 10) * unit
assert np.all(out == expected)
out = np.logspace(10.0 * u.STmag, 20 * u.STmag, 10)
expected = np.logspace(10.0, 20.0, 10, base=10.0 ** (-0.4)) * u.ST
assert u.allclose(out, expected)
@needs_array_function
def test_geomspace(self):
out = np.geomspace(1000.0 * u.m, 10.0 * u.km, 5)
expected = np.geomspace(1, 10, 5) * u.km
assert np.all(out == expected)
q1 = np.arange(1.0, 7.0).reshape(2, 3) * u.m
q2 = 10000.0 * u.cm
out = np.geomspace(q1, q2, 5)
expected = np.geomspace(q1.to_value(q2.unit), q2.value, 5) * q2.unit
assert np.all(out == expected)
class TestInterpolationFunctions(metaclass=CoverageMeta):
@needs_array_function
def test_interp(self):
x = np.array([1250.0, 2750.0]) * u.m
xp = np.arange(5.0) * u.km
yp = np.arange(5.0) * u.day
out = np.interp(x, xp, yp)
expected = np.interp(x.to_value(xp.unit), xp.value, yp.value) * yp.unit
assert np.all(out == expected)
out = np.interp(x, xp, yp.value)
assert type(out) is np.ndarray
assert np.all(out == expected.value)
@needs_array_function
def test_piecewise(self):
x = np.linspace(-2.5, 2.5, 6) * u.m
out = np.piecewise(x, [x < 0, x >= 0], [-1 * u.s, 1 * u.day])
expected = (
np.piecewise(x.value, [x.value < 0, x.value >= 0], [-1, 24 * 3600]) * u.s
)
assert out.unit == expected.unit
assert np.all(out == expected)
out2 = np.piecewise(
x, [x < 1 * u.m, x >= 0], [-1 * u.s, 1 * u.day, lambda x: 1 * u.hour]
)
expected2 = (
np.piecewise(x.value, [x.value < 1, x.value >= 0], [-1, 24 * 3600, 3600])
* u.s
)
assert out2.unit == expected2.unit
assert np.all(out2 == expected2)
out3 = np.piecewise(
x, [x < 1 * u.m, x >= 0], [0, 1 * u.percent, lambda x: 1 * u.one]
)
expected3 = (
np.piecewise(x.value, [x.value < 1, x.value >= 0], [0, 0.01, 1]) * u.one
)
assert out3.unit == expected3.unit
assert np.all(out3 == expected3)
with pytest.raises(TypeError): # no Quantity in condlist.
np.piecewise(x, [x], [0.0])
with pytest.raises(TypeError): # no Quantity in condlist.
np.piecewise(x.value, [x], [0.0])
class TestBincountDigitize(metaclass=CoverageMeta):
@needs_array_function
def test_bincount(self):
i = np.array([1, 1, 2, 3, 2, 4])
weights = np.arange(len(i)) * u.Jy
out = np.bincount(i, weights)
expected = np.bincount(i, weights.value) * weights.unit
assert_array_equal(out, expected)
with pytest.raises(TypeError):
np.bincount(weights)
@needs_array_function
def test_digitize(self):
x = np.array([1500.0, 2500.0, 4500.0]) * u.m
bins = np.arange(10.0) * u.km
out = np.digitize(x, bins)
expected = np.digitize(x.to_value(bins.unit), bins.value)
assert_array_equal(out, expected)
class TestHistogramFunctions(metaclass=CoverageMeta):
def setup_method(self):
self.x = np.array([1.1, 1.2, 1.3, 2.1, 5.1]) * u.m
self.y = np.array([1.2, 2.2, 2.4, 3.0, 4.0]) * u.cm
self.weights = np.arange(len(self.x)) / u.s
def check(
self,
function,
*args,
value_args=None,
value_kwargs=None,
expected_units=None,
**kwargs
):
"""Check quanties are treated correctly in the histogram function.
Test is done by applying ``function(*args, **kwargs)``, where
the argument can be quantities, and comparing the result to
``function(*value_args, **value_kwargs)``, with the outputs
converted to quantities using the ``expected_units`` (where `None`
indicates the output is expected to be a regular array).
For ``**value_kwargs``, any regular ``kwargs`` are treated as
defaults, i.e., non-quantity arguments do not have to be repeated.
"""
if value_kwargs is None:
value_kwargs = kwargs
else:
for k, v in kwargs.items():
value_kwargs.setdefault(k, v)
# Get the result, using the Quantity override.
out = function(*args, **kwargs)
# Get the comparison, with non-Quantity arguments.
expected = function(*value_args, **value_kwargs)
# All histogram functions return a tuple of the actual histogram
# and the bin edges. First, check the actual histogram.
out_h = out[0]
expected_h = expected[0]
if expected_units[0] is not None:
expected_h = expected_h * expected_units[0]
assert_array_equal(out_h, expected_h)
# Check bin edges. Here, histogramdd returns an interable of the
# bin edges as the second return argument, while histogram and
# histogram2d return the bin edges directly.
if function is np.histogramdd:
bin_slice = 1
else:
bin_slice = slice(1, None)
for o_bin, e_bin, e_unit in zip(
out[bin_slice], expected[bin_slice], expected_units[bin_slice]
):
if e_unit is not None:
e_bin = e_bin * e_unit
assert_array_equal(o_bin, e_bin)
@needs_array_function
def test_histogram(self):
x = self.x
weights = self.weights
# Plain histogram.
self.check(
np.histogram, x, value_args=(x.value,), expected_units=(None, x.unit)
)
# With bins.
self.check(
np.histogram,
x,
[125, 200] * u.cm,
value_args=(x.value, [1.25, 2.0]),
expected_units=(None, x.unit),
)
# With density.
self.check(
np.histogram,
x,
[125, 200] * u.cm,
density=True,
value_args=(x.value, [1.25, 2.0]),
expected_units=(1 / x.unit, x.unit),
)
# With weights.
self.check(
np.histogram,
x,
[125, 200] * u.cm,
weights=weights,
value_args=(x.value, [1.25, 2.0]),
value_kwargs=dict(weights=weights.value),
expected_units=(weights.unit, x.unit),
)
# With weights and density.
self.check(
np.histogram,
x,
[125, 200] * u.cm,
weights=weights,
density=True,
value_args=(x.value, [1.25, 2.0]),
value_kwargs=dict(weights=weights.value),
expected_units=(weights.unit / x.unit, x.unit),
)
with pytest.raises(u.UnitsError):
np.histogram(x, [125, 200] * u.s)
with pytest.raises(u.UnitsError):
np.histogram(x, [125, 200])
with pytest.raises(u.UnitsError):
np.histogram(x.value, [125, 200] * u.s)
@classmethod
def _range_value(cls, range, unit):
if isinstance(range, u.Quantity):
return range.to_value(unit)
else:
return [cls._range_value(r, unit) for r in range]
@pytest.mark.parametrize("range", [[2 * u.m, 500 * u.cm], [2, 5] * u.m])
@needs_array_function
def test_histogram_range(self, range):
self.check(
np.histogram,
self.x,
range=range,
value_args=[self.x.value],
value_kwargs=dict(range=self._range_value(range, self.x.unit)),
expected_units=(None, self.x.unit),
)
@needs_array_function
def test_histogram_bin_edges(self):
x = np.array([1.1, 1.2, 1.3, 2.1, 5.1]) * u.m
out_b = np.histogram_bin_edges(x)
expected_b = np.histogram_bin_edges(x.value) * x.unit
assert np.all(out_b == expected_b)
# With bins
out2_b = np.histogram_bin_edges(x, [125, 200] * u.cm)
expected2_b = np.histogram_bin_edges(x.value, [1.25, 2.0]) * x.unit
assert np.all(out2_b == expected2_b)
with pytest.raises(u.UnitsError):
np.histogram_bin_edges(x, [125, 200] * u.s)
with pytest.raises(u.UnitsError):
np.histogram_bin_edges(x, [125, 200])
with pytest.raises(u.UnitsError):
np.histogram_bin_edges(x.value, [125, 200] * u.s)
@pytest.mark.parametrize("range", [[2 * u.m, 500 * u.cm], [2, 5] * u.m])
@needs_array_function
def test_histogram_bin_edges_range(self, range):
out_b = np.histogram_bin_edges(self.x, range=range)
expected_b = np.histogram_bin_edges(
self.x.value, range=self._range_value(range, self.x.unit)
)
assert np.all(out_b.value == expected_b)
@needs_array_function
def test_histogram2d(self):
x, y = self.x, self.y
weights = self.weights
# Basic tests with X, Y.
self.check(
np.histogram2d,
x,
y,
value_args=(x.value, y.value),
expected_units=(None, x.unit, y.unit),
)
# Check units with density.
self.check(
np.histogram2d,
x,
y,
density=True,
value_args=(x.value, y.value),
expected_units=(1 / (x.unit * y.unit), x.unit, y.unit),
)
# Check units with weights.
self.check(
np.histogram2d,
x,
y,
weights=weights,
value_args=(x.value, y.value),
value_kwargs=dict(weights=weights.value),
expected_units=(weights.unit, x.unit, y.unit),
)
# Check quantity bin sizes.
inb_y = [0, 0.025, 1.0] * u.m
self.check(
np.histogram2d,
x,
y,
[5, inb_y],
value_args=(x.value, y.value, [5, np.array([0, 2.5, 100.0])]),
expected_units=(None, x.unit, y.unit),
)
# Check we dispatch on bin sizes (and check kwarg as well).
inb2_y = [0, 250, 10000.0] * u.percent
self.check(
np.histogram2d,
x.value,
y.value,
bins=[5, inb2_y],
value_args=(x.value, y.value),
value_kwargs=dict(bins=[5, np.array([0, 2.5, 100.0])]),
expected_units=(None, u.one, u.one),
)
# Single-item bins should be integer, not Quantity.
with pytest.raises(TypeError):
np.histogram2d(x, y, 125 * u.s)
with pytest.raises(TypeError):
np.histogram2d(x.value, y.value, 125 * u.s)
# Bin units need to match units of x, y.
with pytest.raises(u.UnitsError):
np.histogram2d(x, y, [125, 200] * u.s)
with pytest.raises(u.UnitsError):
np.histogram2d(x, y, ([125, 200], [125, 200]))
with pytest.raises(u.UnitsError):
np.histogram2d(x.value, y.value, [125, 200] * u.s)
@pytest.mark.parametrize(
argnames="range",
argvalues=[
[[2 * u.m, 500 * u.cm], [1 * u.cm, 40 * u.mm]],
[[200, 500] * u.cm, [10, 40] * u.mm],
[[200, 500], [1, 4]] * u.cm,
],
)
@needs_array_function
def test_histogram2d_range(self, range):
self.check(
np.histogram2d,
self.x,
self.y,
range=range,
value_args=[self.x.value, self.y.value],
value_kwargs=dict(
range=[
self._range_value(r, un)
for (r, un) in zip(range, (self.x.unit, self.y.unit))
]
),
expected_units=(None, self.x.unit, self.y.unit),
)
@needs_array_function
def test_histogramdd(self):
# First replicates of the histogram2d tests, but using the
# histogramdd override. Normally takes the sample as a tuple
# with a given number of dimensions, and returns the histogram
# as well as a tuple of bin edges.
sample = self.x, self.y
sample_units = self.x.unit, self.y.unit
sample_values = (self.x.value, self.y.value)
weights = self.weights
# Basic tests with X, Y
self.check(
np.histogramdd,
sample,
value_args=(sample_values,),
expected_units=(None, sample_units),
)
# Check units with density.
self.check(
np.histogramdd,
sample,
density=True,
value_args=(sample_values,),
expected_units=(1 / (self.x.unit * self.y.unit), sample_units),
)
# Check units with weights.
self.check(
np.histogramdd,
sample,
weights=weights,
value_args=(sample_values,),
value_kwargs=dict(weights=weights.value),
expected_units=(weights.unit, sample_units),
)
# Check quantity bin sizes.
inb_y = [0, 0.025, 1.0] * u.m
self.check(
np.histogramdd,
sample,
[5, inb_y],
value_args=(sample_values, [5, np.array([0, 2.5, 100.0])]),
expected_units=(None, sample_units),
)
# Check we dispatch on bin sizes (and check kwarg as well).
inb2_y = [0, 250, 10000.0] * u.percent
self.check(
np.histogramdd,
sample_values,
bins=[5, inb2_y],
value_args=(sample_values,),
value_kwargs=dict(bins=[5, np.array([0, 2.5, 100.0])]),
expected_units=(None, (u.one, u.one)),
)
# For quantities, it is probably not that likely one would pass
# in the sample as an array, but check that it works anyway.
# This also gives a 3-D check.
xyz = np.random.normal(size=(10, 3)) * u.m
self.check(
np.histogramdd,
xyz,
value_args=(xyz.value,),
expected_units=(None, (xyz.unit,) * 3),
)
# Passing it in as a tuple should work just as well; note the
# *last* axis contains the sample dimension.
self.check(
np.histogramdd,
(xyz[:, 0], xyz[:, 1], xyz[:, 2]),
value_args=(xyz.value,),
expected_units=(None, (xyz.unit,) * 3),
)
# Single-item bins should be integer, not Quantity.
with pytest.raises(TypeError):
np.histogramdd(sample, 125 * u.s)
# Sequence of single items should be integer.
with pytest.raises(TypeError):
np.histogramdd(sample, [125, 200] * u.s)
with pytest.raises(TypeError):
np.histogramdd(sample_values, [125, 200] * u.s)
# Units of bins should match.
with pytest.raises(u.UnitsError):
np.histogramdd(sample, ([125, 200], [125, 200]))
with pytest.raises(u.UnitsError):
np.histogramdd(sample_values, ([125, 200] * u.s, [125, 200]))
@pytest.mark.parametrize(
argnames="range",
argvalues=[
[[2 * u.m, 500 * u.cm], [1 * u.cm, 40 * u.mm]],
[[200, 500] * u.cm, [10, 40] * u.mm],
[[200, 500], [1, 4]] * u.cm,
],
)
@needs_array_function
def test_histogramdd_range(self, range):
self.check(
np.histogramdd,
(self.x, self.y),
range=range,
value_args=[(self.x.value, self.y.value)],
value_kwargs=dict(
range=[
self._range_value(r, un)
for (r, un) in zip(range, (self.x.unit, self.y.unit))
]
),
expected_units=(None, (self.x.unit, self.y.unit)),
)
@needs_array_function
def test_correlate(self):
x1 = [1, 2, 3] * u.m
x2 = [0, 1, 0.5] * u.m
out = np.correlate(x1, x2)
expected = np.correlate(x1.value, x2.value) * u.m**2
assert np.all(out == expected)
@needs_array_function
def test_convolve(self):
x1 = [1, 2, 3] * u.m
x2 = [0, 1, 0.5] * u.m
out = np.convolve(x1, x2)
expected = np.convolve(x1.value, x2.value) * u.m**2
assert np.all(out == expected)
@needs_array_function
def test_cov(self):
# Do not see how we can use cov with Quantity
x = np.array([[0, 2], [1, 1], [2, 0]]).T * u.m
with pytest.raises(TypeError):
np.cov(x)
@needs_array_function
def test_corrcoef(self):
# Do not see how we can use cov with Quantity
x = np.array([[0, 2], [1, 1], [2, 0]]).T * u.m
with pytest.raises(TypeError):
np.corrcoef(x)
class TestSortFunctions(InvariantUnitTestSetup):
def test_sort(self):
self.check(np.sort)
def test_sort_axis(self):
self.check(np.sort, axis=0)
@pytest.mark.skipif(not NUMPY_LT_1_24, reason="np.msort is deprecated")
def test_msort(self):
self.check(np.msort)
@needs_array_function
def test_sort_complex(self):
self.check(np.sort_complex)
def test_partition(self):
self.check(np.partition, 2)
class TestStringFunctions(metaclass=CoverageMeta):
# For these, making behaviour work means deviating only slightly from
# the docstring, and by default they fail miserably. So, might as well.
def setup_method(self):
self.q = np.arange(3.0) * u.Jy
@needs_array_function
def test_array2string(self):
# The default formatters cannot handle units, so if we do not pass
# a relevant formatter, we are better off just treating it as an
# array (which happens for all subtypes).
out0 = np.array2string(self.q)
expected0 = str(self.q.value)
assert out0 == expected0
# Arguments are interpreted as usual.
out1 = np.array2string(self.q, separator=", ")
expected1 = "[0., 1., 2.]"
assert out1 == expected1
# If we do pass in a formatter, though, it should be used.
out2 = np.array2string(self.q, separator=", ", formatter={"all": str})
expected2 = "[0.0 Jy, 1.0 Jy, 2.0 Jy]"
assert out2 == expected2
# Also as positional argument (no, nobody will do this!)
out3 = np.array2string(
self.q, None, None, None, ", ", "", np._NoValue, {"float": str}
)
assert out3 == expected2
# But not if the formatter is not relevant for us.
out4 = np.array2string(self.q, separator=", ", formatter={"int": str})
assert out4 == expected1
@needs_array_function
def test_array_repr(self):
out = np.array_repr(self.q)
assert out == "Quantity([0., 1., 2.], unit='Jy')"
q2 = self.q.astype("f4")
out2 = np.array_repr(q2)
assert out2 == "Quantity([0., 1., 2.], unit='Jy', dtype=float32)"
@needs_array_function
def test_array_str(self):
out = np.array_str(self.q)
expected = str(self.q)
assert out == expected
class TestBitAndIndexFunctions(metaclass=CoverageMeta):
# Index/bit functions generally fail for floats, so the usual
# float quantity are safe, but the integer ones are not.
def setup_method(self):
self.q = np.arange(3) * u.m
self.uint_q = u.Quantity(np.arange(3), "m", dtype="u1")
@needs_array_function
def test_packbits(self):
with pytest.raises(TypeError):
np.packbits(self.q)
with pytest.raises(TypeError):
np.packbits(self.uint_q)
@needs_array_function
def test_unpackbits(self):
with pytest.raises(TypeError):
np.unpackbits(self.q)
with pytest.raises(TypeError):
np.unpackbits(self.uint_q)
@needs_array_function
def test_unravel_index(self):
with pytest.raises(TypeError):
np.unravel_index(self.q, 3)
with pytest.raises(TypeError):
np.unravel_index(self.uint_q, 3)
@needs_array_function
def test_ravel_multi_index(self):
with pytest.raises(TypeError):
np.ravel_multi_index((self.q,), 3)
with pytest.raises(TypeError):
np.ravel_multi_index((self.uint_q,), 3)
@needs_array_function
def test_ix_(self):
with pytest.raises(TypeError):
np.ix_(self.q)
with pytest.raises(TypeError):
np.ix_(self.uint_q)
class TestDtypeFunctions(NoUnitTestSetup):
def test_common_type(self):
self.check(np.common_type)
def test_result_type(self):
self.check(np.result_type)
def test_can_cast(self):
self.check(np.can_cast, self.q.dtype)
self.check(np.can_cast, "f4")
def test_min_scalar_type(self):
out = np.min_scalar_type(self.q[0])
expected = np.min_scalar_type(self.q.value[0])
assert out == expected
def test_iscomplexobj(self):
self.check(np.iscomplexobj)
def test_isrealobj(self):
self.check(np.isrealobj)
class TestMeshGrid(metaclass=CoverageMeta):
def test_meshgrid(self):
q1 = np.arange(3.0) * u.m
q2 = np.arange(5.0) * u.s
o1, o2 = np.meshgrid(q1, q2)
e1, e2 = np.meshgrid(q1.value, q2.value)
assert np.all(o1 == e1 * q1.unit)
assert np.all(o2 == e2 * q2.unit)
class TestMemoryFunctions(NoUnitTestSetup):
def test_shares_memory(self):
self.check(np.shares_memory, self.q.value)
def test_may_share_memory(self):
self.check(np.may_share_memory, self.q.value)
class TestSetOpsFcuntions(metaclass=CoverageMeta):
def setup_method(self):
self.q = np.array([[0.0, 1.0, -1.0], [3.0, 5.0, 3.0], [0.0, 1.0, -1]]) * u.m
self.q2 = np.array([0.0, 100.0, 150.0, 200.0]) * u.cm
def check(self, function, qs, *args, **kwargs):
unit = kwargs.pop("unit", self.q.unit)
out = function(*qs, *args, **kwargs)
qv = tuple(q.to_value(self.q.unit) for q in qs)
expected = function(*qv, *args, **kwargs)
if isinstance(expected, tuple):
if unit:
expected = (expected[0] * unit,) + expected[1:]
for o, e in zip(out, expected):
assert_array_equal(o, e)
else:
if unit:
expected = expected * unit
assert_array_equal(out, expected)
def check1(self, function, *args, **kwargs):
self.check(function, (self.q,), *args, **kwargs)
def check2(self, function, *args, **kwargs):
self.check(function, (self.q, self.q2), *args, **kwargs)
@pytest.mark.parametrize(
"kwargs",
(
dict(return_index=True, return_inverse=True),
dict(return_counts=True),
dict(return_index=True, return_inverse=True, return_counts=True),
),
)
def test_unique(self, kwargs):
self.check1(np.unique, **kwargs)
@needs_array_function
@pytest.mark.parametrize(
"kwargs",
(
dict(axis=0),
dict(axis=1),
dict(return_counts=True, return_inverse=False, axis=1),
),
)
def test_unique_more_complex(self, kwargs):
self.check1(np.unique, **kwargs)
@needs_array_function
@pytest.mark.parametrize("kwargs", (dict(), dict(return_indices=True)))
def test_intersect1d(self, kwargs):
self.check2(np.intersect1d, **kwargs)
@needs_array_function
def test_setxor1d(self):
self.check2(np.setxor1d)
@needs_array_function
def test_union1d(self):
self.check2(np.union1d)
result = np.union1d(np.array([0.0, np.nan]), np.arange(3) << u.m)
assert result.unit is u.m
assert_array_equal(result.value, np.array([0.0, 1.0, 2.0, np.nan]))
@needs_array_function
def test_setdiff1d(self):
self.check2(np.setdiff1d)
@needs_array_function
def test_in1d(self):
self.check2(np.in1d, unit=None)
# Check zero is treated as having any unit.
assert np.in1d(np.zeros(1), self.q2)
with pytest.raises(u.UnitsError):
np.in1d(np.ones(1), self.q2)
@needs_array_function
def test_isin(self):
self.check2(np.isin, unit=None)
def test_ediff1d(self):
# ediff1d works always as it calls the Quantity method.
self.check1(np.ediff1d)
x = np.arange(10.0) * u.m
out = np.ediff1d(x, to_begin=-12.5 * u.cm, to_end=1 * u.km)
expected = np.ediff1d(x.value, to_begin=-0.125, to_end=1000.0) * x.unit
assert_array_equal(out, expected)
class TestDatetimeFunctions(BasicTestSetup):
def test_busday_count(self):
with pytest.raises(TypeError):
np.busday_count(self.q, self.q)
def test_busday_offset(self):
with pytest.raises(TypeError):
np.busday_offset(self.q, self.q)
def test_datetime_as_string(self):
with pytest.raises(TypeError):
np.datetime_as_string(self.q)
def test_is_busday(self):
with pytest.raises(TypeError):
np.is_busday(self.q)
# These functions always worked; ensure they do not regress.
# Note that they are *not* wrapped so no need to check coverage.
@pytest.mark.parametrize("function", [np.fft.fftfreq, np.fft.rfftfreq])
def test_fft_frequencies(function):
out = function(128, d=0.1 * u.s)
expected = function(128, d=0.1) / u.s
assert_array_equal(out, expected)
@needs_array_function
class TestFFT(InvariantUnitTestSetup):
# These are all trivial, just preserve the unit.
def setup_method(self):
# Use real input; gets turned into complex as needed.
self.q = np.arange(128.0).reshape(8, -1) * u.s
def test_fft(self):
self.check(np.fft.fft)
def test_ifft(self):
self.check(np.fft.ifft)
def test_rfft(self):
self.check(np.fft.rfft)
def test_irfft(self):
self.check(np.fft.irfft)
def test_fft2(self):
self.check(np.fft.fft2)
def test_ifft2(self):
self.check(np.fft.ifft2)
def test_rfft2(self):
self.check(np.fft.rfft2)
def test_irfft2(self):
self.check(np.fft.irfft2)
def test_fftn(self):
self.check(np.fft.fftn)
def test_ifftn(self):
self.check(np.fft.ifftn)
def test_rfftn(self):
self.check(np.fft.rfftn)
def test_irfftn(self):
self.check(np.fft.irfftn)
def test_hfft(self):
self.check(np.fft.hfft)
def test_ihfft(self):
self.check(np.fft.ihfft)
def test_fftshift(self):
self.check(np.fft.fftshift)
def test_ifftshift(self):
self.check(np.fft.ifftshift)
class TestLinAlg(metaclass=CoverageMeta):
def setup_method(self):
self.q = (
np.array(
[[ 1.0, -1.0, 2.0],
[ 0.0, 3.0, -1.0],
[-1.0, -1.0, 1.0]]
) << u.m
) # fmt: skip
def test_cond(self):
c = np.linalg.cond(self.q)
expected = np.linalg.cond(self.q.value)
assert c == expected
def test_matrix_rank(self):
r = np.linalg.matrix_rank(self.q)
x = np.linalg.matrix_rank(self.q.value)
assert r == x
@needs_array_function
def test_matrix_rank_with_tol(self):
# Use a matrix that is not so good, so tol=1 and tol=0.01 differ.
q = np.arange(9.0).reshape(3, 3) / 4 * u.m
tol = 1.0 * u.cm
r2 = np.linalg.matrix_rank(q, tol)
x2 = np.linalg.matrix_rank(q.value, tol.to_value(q.unit))
assert r2 == x2
def test_matrix_power(self):
q1 = np.linalg.matrix_power(self.q, 1)
assert_array_equal(q1, self.q)
q2 = np.linalg.matrix_power(self.q, 2)
assert_array_equal(q2, self.q @ self.q)
q2 = np.linalg.matrix_power(self.q, 4)
assert_array_equal(q2, self.q @ self.q @ self.q @ self.q)
@needs_array_function
def test_matrix_inv_power(self):
qinv = np.linalg.inv(self.q.value) / self.q.unit
qm1 = np.linalg.matrix_power(self.q, -1)
assert_array_equal(qm1, qinv)
qm3 = np.linalg.matrix_power(self.q, -3)
assert_array_equal(qm3, qinv @ qinv @ qinv)
@needs_array_function
def test_multi_dot(self):
q2 = np.linalg.multi_dot([self.q, self.q])
q2x = self.q @ self.q
assert_array_equal(q2, q2x)
q3 = np.linalg.multi_dot([self.q, self.q, self.q])
q3x = self.q @ self.q @ self.q
assert_array_equal(q3, q3x)
@needs_array_function
def test_svd(self):
m = np.arange(10.0) * np.arange(5.0)[:, np.newaxis] * u.m
svd_u, svd_s, svd_vt = np.linalg.svd(m, full_matrices=False)
svd_ux, svd_sx, svd_vtx = np.linalg.svd(m.value, full_matrices=False)
svd_sx <<= m.unit
assert_array_equal(svd_u, svd_ux)
assert_array_equal(svd_vt, svd_vtx)
assert_array_equal(svd_s, svd_sx)
assert u.allclose(svd_u @ np.diag(svd_s) @ svd_vt, m)
s2 = np.linalg.svd(m, compute_uv=False)
svd_s2x = np.linalg.svd(m.value, compute_uv=False) << m.unit
assert_array_equal(s2, svd_s2x)
@needs_array_function
def test_inv(self):
inv = np.linalg.inv(self.q)
expected = np.linalg.inv(self.q.value) / self.q.unit
assert_array_equal(inv, expected)
@needs_array_function
def test_pinv(self):
pinv = np.linalg.pinv(self.q)
expected = np.linalg.pinv(self.q.value) / self.q.unit
assert_array_equal(pinv, expected)
rcond = 0.01 * u.cm
pinv2 = np.linalg.pinv(self.q, rcond)
expected2 = (
np.linalg.pinv(self.q.value, rcond.to_value(self.q.unit)) / self.q.unit
)
assert_array_equal(pinv2, expected2)
@needs_array_function
def test_tensorinv(self):
inv = np.linalg.tensorinv(self.q, ind=1)
expected = np.linalg.tensorinv(self.q.value, ind=1) / self.q.unit
assert_array_equal(inv, expected)
@needs_array_function
def test_det(self):
det = np.linalg.det(self.q)
expected = np.linalg.det(self.q.value)
expected <<= self.q.unit ** self.q.shape[-1]
assert_array_equal(det, expected)
with pytest.raises(np.linalg.LinAlgError):
np.linalg.det(self.q[0]) # Not 2-D
with pytest.raises(np.linalg.LinAlgError):
np.linalg.det(self.q[:-1]) # Not square.
@needs_array_function
def test_slogdet(self):
# TODO: Could be supported if we had a natural logarithm unit.
with pytest.raises(TypeError):
logdet = np.linalg.slogdet(self.q)
assert hasattr(logdet, "unit")
@needs_array_function
def test_solve(self):
b = np.array([1.0, 2.0, 4.0]) * u.m / u.s
x = np.linalg.solve(self.q, b)
xx = np.linalg.solve(self.q.value, b.value)
xx <<= b.unit / self.q.unit
assert_array_equal(x, xx)
assert u.allclose(self.q @ x, b)
@needs_array_function
def test_tensorsolve(self):
b = np.array([1.0, 2.0, 4.0]) * u.m / u.s
x = np.linalg.tensorsolve(self.q, b)
xx = np.linalg.tensorsolve(self.q.value, b.value)
xx <<= b.unit / self.q.unit
assert_array_equal(x, xx)
assert u.allclose(self.q @ x, b)
@needs_array_function
def test_lstsq(self):
b = np.array([1.0, 2.0, 4.0]) * u.m / u.s
x, residuals, rank, s = np.linalg.lstsq(self.q, b, rcond=None)
xx, residualsx, rankx, sx = np.linalg.lstsq(self.q.value, b.value, rcond=None)
xx <<= b.unit / self.q.unit
residualsx <<= b.unit**2
sx <<= self.q.unit
assert_array_equal(x, xx)
assert_array_equal(residuals, residualsx)
assert_array_equal(s, sx)
assert rank == rankx
assert u.allclose(self.q @ x, b)
# Also do one where we can check the answer...
m = np.eye(3)
b = np.arange(3) * u.m
x, residuals, rank, s = np.linalg.lstsq(m, b, rcond=1.0 * u.percent)
assert_array_equal(x, b)
assert np.all(residuals == 0 * u.m**2)
assert rank == 3
assert_array_equal(s, np.array([1.0, 1.0, 1.0]) << u.one)
with pytest.raises(u.UnitsError):
np.linalg.lstsq(m, b, rcond=1.0 * u.s)
@needs_array_function
def test_norm(self):
n = np.linalg.norm(self.q)
expected = np.linalg.norm(self.q.value) << self.q.unit
assert_array_equal(n, expected)
# Special case: 1-D, ord=0.
n1 = np.linalg.norm(self.q[0], ord=0)
expected1 = np.linalg.norm(self.q[0].value, ord=0) << u.one
assert_array_equal(n1, expected1)
@needs_array_function
def test_cholesky(self):
# Numbers from np.linalg.cholesky docstring.
q = np.array([[1, -2j], [2j, 5]]) * u.m
cd = np.linalg.cholesky(q)
cdx = np.linalg.cholesky(q.value) << q.unit**0.5
assert_array_equal(cd, cdx)
assert u.allclose(cd @ cd.T.conj(), q)
@needs_array_function
def test_qr(self):
# This is not exhaustive...
a = np.array([[1, -2j], [2j, 5]]) * u.m
q, r = np.linalg.qr(a)
qx, rx = np.linalg.qr(a.value)
qx <<= u.one
rx <<= a.unit
assert_array_equal(q, qx)
assert_array_equal(r, rx)
assert u.allclose(q @ r, a)
@needs_array_function
def test_eig(self):
w, v = np.linalg.eig(self.q)
wx, vx = np.linalg.eig(self.q.value)
wx <<= self.q.unit
vx <<= u.one
assert_array_equal(w, wx)
assert_array_equal(v, vx)
# Comprehensible example
q = np.diag((1, 2, 3) * u.m)
w, v = np.linalg.eig(q)
assert_array_equal(w, np.arange(1, 4) * u.m)
assert_array_equal(v, np.eye(3))
@needs_array_function
def test_eigvals(self):
w = np.linalg.eigvals(self.q)
wx = np.linalg.eigvals(self.q.value) << self.q.unit
assert_array_equal(w, wx)
# Comprehensible example
q = np.diag((1, 2, 3) * u.m)
w = np.linalg.eigvals(q)
assert_array_equal(w, np.arange(1, 4) * u.m)
@needs_array_function
def test_eigh(self):
w, v = np.linalg.eigh(self.q)
wx, vx = np.linalg.eigh(self.q.value)
wx <<= self.q.unit
vx <<= u.one
assert_array_equal(w, wx)
assert_array_equal(v, vx)
@needs_array_function
def test_eigvalsh(self):
w = np.linalg.eigvalsh(self.q)
wx = np.linalg.eigvalsh(self.q.value) << self.q.unit
assert_array_equal(w, wx)
class TestRecFunctions(metaclass=CoverageMeta):
@classmethod
def setup_class(self):
self.pv_dtype = np.dtype([("p", "f8"), ("v", "f8")])
self.pv_t_dtype = np.dtype(
[("pv", np.dtype([("pp", "f8"), ("vv", "f8")])), ("t", "f8")]
)
self.pv = np.array([(1.0, 0.25), (2.0, 0.5), (3.0, 0.75)], self.pv_dtype)
self.pv_t = np.array(
[((4.0, 2.5), 0.0), ((5.0, 5.0), 1.0), ((6.0, 7.5), 2.0)], self.pv_t_dtype
)
self.pv_unit = u.StructuredUnit((u.km, u.km / u.s), ("p", "v"))
self.pv_t_unit = u.StructuredUnit((self.pv_unit, u.s), ("pv", "t"))
self.q_pv = self.pv << self.pv_unit
self.q_pv_t = self.pv_t << self.pv_t_unit
def test_structured_to_unstructured(self):
# can't unstructure something with incompatible units
with pytest.raises(u.UnitConversionError, match="'m'"):
rfn.structured_to_unstructured(u.Quantity((0, 0.6), u.Unit("(eV, m)")))
# it works if all the units are equal
struct = u.Quantity((0, 0, 0.6), u.Unit("(eV, eV, eV)"))
unstruct = rfn.structured_to_unstructured(struct)
assert_array_equal(unstruct, [0, 0, 0.6] * u.eV)
# also if the units are convertible
struct = u.Quantity((0, 0, 0.6), u.Unit("(eV, eV, keV)"))
unstruct = rfn.structured_to_unstructured(struct)
assert_array_equal(unstruct, [0, 0, 600] * u.eV)
struct = u.Quantity((0, 0, 1.7827e-33), u.Unit("(eV, eV, g)"))
with u.add_enabled_equivalencies(u.mass_energy()):
unstruct = rfn.structured_to_unstructured(struct)
u.allclose(unstruct, [0, 0, 1.0000214] * u.eV)
# and if the dtype is nested
struct = [(5, (400.0, 3e6))] * u.Unit("m, (cm, um)")
unstruct = rfn.structured_to_unstructured(struct)
assert_array_equal(unstruct, [[5, 4, 3]] * u.m)
# For the other tests of ``structured_to_unstructured``, see
# ``test_structured.TestStructuredQuantityFunctions.test_structured_to_unstructured``
def test_unstructured_to_structured(self):
unstruct = [1, 2, 3] * u.m
dtype = np.dtype([("f1", float), ("f2", float), ("f3", float)])
# It works.
struct = rfn.unstructured_to_structured(unstruct, dtype=dtype)
assert struct.unit == u.Unit("(m, m, m)")
assert_array_equal(rfn.structured_to_unstructured(struct), unstruct)
# Can't structure something that's already structured.
with pytest.raises(ValueError, match="arr must have at least one dimension"):
rfn.unstructured_to_structured(struct, dtype=dtype)
# For the other tests of ``structured_to_unstructured``, see
# ``test_structured.TestStructuredQuantityFunctions.test_unstructured_to_structured``
def test_merge_arrays_repeat_dtypes(self):
# Cannot merge things with repeat dtypes.
q1 = u.Quantity([(1,)], dtype=[("f1", float)])
q2 = u.Quantity([(1,)], dtype=[("f1", float)])
with pytest.raises(ValueError, match="field 'f1' occurs more than once"):
rfn.merge_arrays((q1, q2))
@pytest.mark.parametrize("flatten", [True, False])
def test_merge_arrays(self, flatten):
"""Test `numpy.lib.recfunctions.merge_arrays`."""
# Merge single normal array.
arr = rfn.merge_arrays(self.q_pv["p"], flatten=flatten)
assert_array_equal(arr["f0"], self.q_pv["p"])
assert arr.unit == (u.km,)
# Merge single structured array.
arr = rfn.merge_arrays(self.q_pv, flatten=flatten)
assert_array_equal(arr, self.q_pv)
assert arr.unit == (u.km, u.km / u.s)
# Merge 1-element tuple.
arr = rfn.merge_arrays((self.q_pv,), flatten=flatten)
assert np.array_equal(arr, self.q_pv)
assert arr.unit == (u.km, u.km / u.s)
@pytest.mark.xfail
@pytest.mark.parametrize("flatten", [True, False])
def test_merge_arrays_nonquantities(self, flatten):
# Fails because cannot create quantity from structured array.
arr = rfn.merge_arrays((q_pv["p"], q_pv.value), flatten=flatten)
def test_merge_array_nested_structure(self):
# Merge 2-element tuples without flattening.
arr = rfn.merge_arrays((self.q_pv, self.q_pv_t))
assert_array_equal(arr["f0"], self.q_pv)
assert_array_equal(arr["f1"], self.q_pv_t)
assert arr.unit == ((u.km, u.km / u.s), ((u.km, u.km / u.s), u.s))
def test_merge_arrays_flatten_nested_structure(self):
# Merge 2-element tuple, flattening it.
arr = rfn.merge_arrays((self.q_pv, self.q_pv_t), flatten=True)
assert_array_equal(arr["p"], self.q_pv["p"])
assert_array_equal(arr["v"], self.q_pv["v"])
assert_array_equal(arr["pp"], self.q_pv_t["pv"]["pp"])
assert_array_equal(arr["vv"], self.q_pv_t["pv"]["vv"])
assert_array_equal(arr["t"], self.q_pv_t["t"])
assert arr.unit == (u.km, u.km / u.s, u.km, u.km / u.s, u.s)
def test_merge_arrays_asrecarray(self):
with pytest.raises(ValueError, match="asrecarray=True is not supported."):
rfn.merge_arrays(self.q_pv, asrecarray=True)
def test_merge_arrays_usemask(self):
with pytest.raises(ValueError, match="usemask=True is not supported."):
rfn.merge_arrays(self.q_pv, usemask=True)
@pytest.mark.parametrize("flatten", [True, False])
def test_merge_arrays_str(self, flatten):
with pytest.raises(
TypeError, match="the Quantity implementation cannot handle"
):
rfn.merge_arrays((self.q_pv, np.array(["a", "b", "c"])), flatten=flatten)
untested_functions = set()
if NUMPY_LT_1_23:
deprecated_functions = {
# Deprecated, removed in numpy 1.23
np.asscalar,
np.alen,
}
else:
deprecated_functions = set()
untested_functions |= deprecated_functions
io_functions = {np.save, np.savez, np.savetxt, np.savez_compressed}
untested_functions |= io_functions
poly_functions = {
np.poly, np.polyadd, np.polyder, np.polydiv, np.polyfit, np.polyint,
np.polymul, np.polysub, np.polyval, np.roots, np.vander,
} # fmt: skip
untested_functions |= poly_functions
rec_functions = {
rfn.rec_append_fields, rfn.rec_drop_fields, rfn.rec_join,
rfn.drop_fields, rfn.rename_fields, rfn.append_fields, rfn.join_by,
rfn.repack_fields, rfn.apply_along_fields, rfn.assign_fields_by_name,
rfn.stack_arrays, rfn.find_duplicates,
rfn.recursive_fill_fields, rfn.require_fields,
} # fmt: skip
untested_functions |= rec_functions
@needs_array_function
def test_testing_completeness():
assert not CoverageMeta.covered.intersection(untested_functions)
assert all_wrapped == (CoverageMeta.covered | untested_functions)
class TestFunctionHelpersCompleteness:
@pytest.mark.parametrize(
"one, two",
itertools.combinations(
(
SUBCLASS_SAFE_FUNCTIONS,
UNSUPPORTED_FUNCTIONS,
set(FUNCTION_HELPERS.keys()),
set(DISPATCHED_FUNCTIONS.keys()),
),
2,
),
)
def test_no_duplicates(self, one, two):
assert not one.intersection(two)
@needs_array_function
def test_all_included(self):
included_in_helpers = (
SUBCLASS_SAFE_FUNCTIONS
| UNSUPPORTED_FUNCTIONS
| set(FUNCTION_HELPERS.keys())
| set(DISPATCHED_FUNCTIONS.keys())
)
assert all_wrapped == included_in_helpers
# untested_function is created using all_wrapped_functions
@needs_array_function
def test_ignored_are_untested(self):
assert IGNORED_FUNCTIONS | TBD_FUNCTIONS == untested_functions
|
fe8ef21e9dae7b3dc4cdac382cb3022e03aaacd8d52a568194bb7cc9481fd60d | # The purpose of these tests are to ensure that calling ufuncs with quantities
# returns quantities with the right units, or raises exceptions.
import concurrent.futures
import dataclasses
import warnings
from collections import namedtuple
import numpy as np
import pytest
from erfa import ufunc as erfa_ufunc
from numpy.testing import assert_allclose, assert_array_equal
from astropy import units as u
from astropy.units import quantity_helper as qh
from astropy.units.quantity_helper.converters import UfuncHelpers
from astropy.units.quantity_helper.helpers import helper_sqrt
from astropy.utils.compat.numpycompat import NUMPY_LT_1_25
from astropy.utils.compat.optional_deps import HAS_SCIPY
testcase = namedtuple("testcase", ["f", "q_in", "q_out"])
testexc = namedtuple("testexc", ["f", "q_in", "exc", "msg"])
testwarn = namedtuple("testwarn", ["f", "q_in", "wfilter"])
@pytest.mark.skip
def test_testcase(tc):
results = tc.f(*tc.q_in)
# careful of the following line, would break on a function returning
# a single tuple (as opposed to tuple of return values)
results = (results,) if not isinstance(results, tuple) else results
for result, expected in zip(results, tc.q_out):
assert result.unit == expected.unit
assert_allclose(result.value, expected.value, atol=1.0e-15)
@pytest.mark.skip
def test_testexc(te):
with pytest.raises(te.exc) as exc:
te.f(*te.q_in)
if te.msg is not None:
assert te.msg in exc.value.args[0]
@pytest.mark.skip
def test_testwarn(tw):
with warnings.catch_warnings():
warnings.filterwarnings(tw.wfilter)
tw.f(*tw.q_in)
class TestUfuncHelpers:
# Note that this test should work even if scipy is present, since
# the scipy.special ufuncs are only loaded on demand.
# The test passes independently of whether erfa is already loaded
# (which will be the case for a full test, since coordinates uses it).
def test_coverage(self):
"""Test that we cover all ufunc's"""
all_np_ufuncs = {
ufunc
for ufunc in np.core.umath.__dict__.values()
if isinstance(ufunc, np.ufunc)
}
all_q_ufuncs = qh.UNSUPPORTED_UFUNCS | set(qh.UFUNC_HELPERS.keys())
# Check that every numpy ufunc is covered.
assert all_np_ufuncs - all_q_ufuncs == set()
# Check that all ufuncs we cover come from numpy or erfa.
# (Since coverage for erfa is incomplete, we do not check
# this the other way).
all_erfa_ufuncs = {
ufunc
for ufunc in erfa_ufunc.__dict__.values()
if isinstance(ufunc, np.ufunc)
}
assert all_q_ufuncs - all_np_ufuncs - all_erfa_ufuncs == set()
def test_scipy_registered(self):
# Should be registered as existing even if scipy is not available.
assert "scipy.special" in qh.UFUNC_HELPERS.modules
def test_removal_addition(self):
assert np.add in qh.UFUNC_HELPERS
assert np.add not in qh.UNSUPPORTED_UFUNCS
qh.UFUNC_HELPERS[np.add] = None
assert np.add not in qh.UFUNC_HELPERS
assert np.add in qh.UNSUPPORTED_UFUNCS
qh.UFUNC_HELPERS[np.add] = qh.UFUNC_HELPERS[np.subtract]
assert np.add in qh.UFUNC_HELPERS
assert np.add not in qh.UNSUPPORTED_UFUNCS
@pytest.mark.slow
def test_thread_safety(self, fast_thread_switching):
def dummy_ufunc(*args, **kwargs):
return np.sqrt(*args, **kwargs)
def register():
return {dummy_ufunc: helper_sqrt}
workers = 8
with concurrent.futures.ThreadPoolExecutor(max_workers=workers) as executor:
for p in range(10000):
helpers = UfuncHelpers()
helpers.register_module(
"astropy.units.tests.test_quantity_ufuncs",
["dummy_ufunc"],
register,
)
futures = [
executor.submit(lambda: helpers[dummy_ufunc])
for i in range(workers)
]
values = [future.result() for future in futures]
assert values == [helper_sqrt] * workers
class TestQuantityTrigonometricFuncs:
"""
Test trigonometric functions
"""
@pytest.mark.parametrize(
"tc",
(
testcase(
f=np.sin,
q_in=(30.0 * u.degree,),
q_out=(0.5 * u.dimensionless_unscaled,),
),
testcase(
f=np.sin,
q_in=(np.array([0.0, np.pi / 4.0, np.pi / 2.0]) * u.radian,),
q_out=(np.array([0.0, 1.0 / np.sqrt(2.0), 1.0]) * u.one,),
),
testcase(
f=np.arcsin,
q_in=(np.sin(30.0 * u.degree),),
q_out=(np.radians(30.0) * u.radian,),
),
testcase(
f=np.arcsin,
q_in=(np.sin(np.array([0.0, np.pi / 4.0, np.pi / 2.0]) * u.radian),),
q_out=(np.array([0.0, np.pi / 4.0, np.pi / 2.0]) * u.radian,),
),
testcase(
f=np.cos,
q_in=(np.pi / 3.0 * u.radian,),
q_out=(0.5 * u.dimensionless_unscaled,),
),
testcase(
f=np.cos,
q_in=(np.array([0.0, np.pi / 4.0, np.pi / 2.0]) * u.radian,),
q_out=(np.array([1.0, 1.0 / np.sqrt(2.0), 0.0]) * u.one,),
),
testcase(
f=np.arccos,
q_in=(np.cos(np.pi / 3.0 * u.radian),),
q_out=(np.pi / 3.0 * u.radian,),
),
testcase(
f=np.arccos,
q_in=(np.cos(np.array([0.0, np.pi / 4.0, np.pi / 2.0]) * u.radian),),
q_out=(np.array([0.0, np.pi / 4.0, np.pi / 2.0]) * u.radian,),
),
testcase(
f=np.tan,
q_in=(np.pi / 3.0 * u.radian,),
q_out=(np.sqrt(3.0) * u.dimensionless_unscaled,),
),
testcase(
f=np.tan,
q_in=(np.array([0.0, 45.0, 135.0, 180.0]) * u.degree,),
q_out=(np.array([0.0, 1.0, -1.0, 0.0]) * u.dimensionless_unscaled,),
),
testcase(
f=np.arctan,
q_in=(np.tan(np.pi / 3.0 * u.radian),),
q_out=(np.pi / 3.0 * u.radian,),
),
testcase(
f=np.arctan,
q_in=(np.tan(np.array([10.0, 30.0, 70.0, 80.0]) * u.degree),),
q_out=(np.radians(np.array([10.0, 30.0, 70.0, 80.0]) * u.degree),),
),
testcase(
f=np.arctan2,
q_in=(np.array([10.0, 30.0, 70.0, 80.0]) * u.m, 2.0 * u.km),
q_out=(
np.arctan2(np.array([10.0, 30.0, 70.0, 80.0]), 2000.0) * u.radian,
),
),
testcase(
f=np.arctan2,
q_in=((np.array([10.0, 80.0]) * u.m / (2.0 * u.km)).to(u.one), 1.0),
q_out=(np.arctan2(np.array([10.0, 80.0]) / 2000.0, 1.0) * u.radian,),
),
testcase(f=np.deg2rad, q_in=(180.0 * u.degree,), q_out=(np.pi * u.radian,)),
testcase(f=np.radians, q_in=(180.0 * u.degree,), q_out=(np.pi * u.radian,)),
testcase(f=np.deg2rad, q_in=(3.0 * u.radian,), q_out=(3.0 * u.radian,)),
testcase(f=np.radians, q_in=(3.0 * u.radian,), q_out=(3.0 * u.radian,)),
testcase(f=np.rad2deg, q_in=(60.0 * u.degree,), q_out=(60.0 * u.degree,)),
testcase(f=np.degrees, q_in=(60.0 * u.degree,), q_out=(60.0 * u.degree,)),
testcase(f=np.rad2deg, q_in=(np.pi * u.radian,), q_out=(180.0 * u.degree,)),
testcase(f=np.degrees, q_in=(np.pi * u.radian,), q_out=(180.0 * u.degree,)),
),
)
def test_testcases(self, tc):
return test_testcase(tc)
@pytest.mark.parametrize(
"te",
(
testexc(f=np.deg2rad, q_in=(3.0 * u.m,), exc=TypeError, msg=None),
testexc(f=np.radians, q_in=(3.0 * u.m,), exc=TypeError, msg=None),
testexc(f=np.rad2deg, q_in=(3.0 * u.m), exc=TypeError, msg=None),
testexc(f=np.degrees, q_in=(3.0 * u.m), exc=TypeError, msg=None),
testexc(
f=np.sin,
q_in=(3.0 * u.m,),
exc=TypeError,
msg="Can only apply 'sin' function to quantities with angle units",
),
testexc(
f=np.arcsin,
q_in=(3.0 * u.m,),
exc=TypeError,
msg="Can only apply 'arcsin' function to dimensionless quantities",
),
testexc(
f=np.cos,
q_in=(3.0 * u.s,),
exc=TypeError,
msg="Can only apply 'cos' function to quantities with angle units",
),
testexc(
f=np.arccos,
q_in=(3.0 * u.s,),
exc=TypeError,
msg="Can only apply 'arccos' function to dimensionless quantities",
),
testexc(
f=np.tan,
q_in=(np.array([1, 2, 3]) * u.N,),
exc=TypeError,
msg="Can only apply 'tan' function to quantities with angle units",
),
testexc(
f=np.arctan,
q_in=(np.array([1, 2, 3]) * u.N,),
exc=TypeError,
msg="Can only apply 'arctan' function to dimensionless quantities",
),
testexc(
f=np.arctan2,
q_in=(np.array([1, 2, 3]) * u.N, 1.0 * u.s),
exc=u.UnitsError,
msg="compatible dimensions",
),
testexc(
f=np.arctan2,
q_in=(np.array([1, 2, 3]) * u.N, 1.0),
exc=u.UnitsError,
msg="dimensionless quantities when other arg",
),
),
)
def test_testexcs(self, te):
return test_testexc(te)
@pytest.mark.parametrize(
"tw",
(testwarn(f=np.arcsin, q_in=(27.0 * u.pc / (15 * u.kpc),), wfilter="error"),),
)
def test_testwarns(self, tw):
return test_testwarn(tw)
class TestQuantityMathFuncs:
"""
Test other mathematical functions
"""
def test_multiply_scalar(self):
assert np.multiply(4.0 * u.m, 2.0 / u.s) == 8.0 * u.m / u.s
assert np.multiply(4.0 * u.m, 2.0) == 8.0 * u.m
assert np.multiply(4.0, 2.0 / u.s) == 8.0 / u.s
def test_multiply_array(self):
assert np.all(
np.multiply(np.arange(3.0) * u.m, 2.0 / u.s)
== np.arange(0, 6.0, 2.0) * u.m / u.s
)
@pytest.mark.skipif(
not isinstance(getattr(np, "matmul", None), np.ufunc),
reason="np.matmul is not yet a gufunc",
)
def test_matmul(self):
q = np.arange(3.0) * u.m
r = np.matmul(q, q)
assert r == 5.0 * u.m**2
# less trivial case.
q1 = np.eye(3) * u.m
q2 = np.array(
[[[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]],
[[0., 1., 0.],
[0., 0., 1.],
[1., 0., 0.]],
[[0., 0., 1.],
[1., 0., 0.],
[0., 1., 0.]]]
) / u.s # fmt: skip
r2 = np.matmul(q1, q2)
assert np.all(r2 == np.matmul(q1.value, q2.value) * q1.unit * q2.unit)
@pytest.mark.parametrize("function", (np.divide, np.true_divide))
def test_divide_scalar(self, function):
assert function(4.0 * u.m, 2.0 * u.s) == function(4.0, 2.0) * u.m / u.s
assert function(4.0 * u.m, 2.0) == function(4.0, 2.0) * u.m
assert function(4.0, 2.0 * u.s) == function(4.0, 2.0) / u.s
@pytest.mark.parametrize("function", (np.divide, np.true_divide))
def test_divide_array(self, function):
assert np.all(
function(np.arange(3.0) * u.m, 2.0 * u.s)
== function(np.arange(3.0), 2.0) * u.m / u.s
)
def test_floor_divide_remainder_and_divmod(self):
inch = u.Unit(0.0254 * u.m)
dividend = np.array([1.0, 2.0, 3.0]) * u.m
divisor = np.array([3.0, 4.0, 5.0]) * inch
quotient = dividend // divisor
remainder = dividend % divisor
assert_allclose(quotient.value, [13.0, 19.0, 23.0])
assert quotient.unit == u.dimensionless_unscaled
assert_allclose(remainder.value, [0.0094, 0.0696, 0.079])
assert remainder.unit == dividend.unit
quotient2 = np.floor_divide(dividend, divisor)
remainder2 = np.remainder(dividend, divisor)
assert np.all(quotient2 == quotient)
assert np.all(remainder2 == remainder)
quotient3, remainder3 = divmod(dividend, divisor)
assert np.all(quotient3 == quotient)
assert np.all(remainder3 == remainder)
with pytest.raises(TypeError):
divmod(dividend, u.km)
with pytest.raises(TypeError):
dividend // u.km
with pytest.raises(TypeError):
dividend % u.km
quotient4, remainder4 = np.divmod(dividend, divisor)
assert np.all(quotient4 == quotient)
assert np.all(remainder4 == remainder)
with pytest.raises(TypeError):
np.divmod(dividend, u.km)
def test_sqrt_scalar(self):
assert np.sqrt(4.0 * u.m) == 2.0 * u.m**0.5
def test_sqrt_array(self):
assert np.all(
np.sqrt(np.array([1.0, 4.0, 9.0]) * u.m)
== np.array([1.0, 2.0, 3.0]) * u.m**0.5
)
def test_square_scalar(self):
assert np.square(4.0 * u.m) == 16.0 * u.m**2
def test_square_array(self):
assert np.all(
np.square(np.array([1.0, 2.0, 3.0]) * u.m)
== np.array([1.0, 4.0, 9.0]) * u.m**2
)
def test_reciprocal_scalar(self):
assert np.reciprocal(4.0 * u.m) == 0.25 / u.m
def test_reciprocal_array(self):
assert np.all(
np.reciprocal(np.array([1.0, 2.0, 4.0]) * u.m)
== np.array([1.0, 0.5, 0.25]) / u.m
)
def test_heaviside_scalar(self):
assert np.heaviside(0.0 * u.m, 0.5) == 0.5 * u.dimensionless_unscaled
assert (
np.heaviside(0.0 * u.s, 25 * u.percent) == 0.25 * u.dimensionless_unscaled
)
assert np.heaviside(2.0 * u.J, 0.25) == 1.0 * u.dimensionless_unscaled
def test_heaviside_array(self):
values = np.array([-1.0, 0.0, 0.0, +1.0])
halfway = np.array([0.75, 0.25, 0.75, 0.25]) * u.dimensionless_unscaled
assert np.all(
np.heaviside(values * u.m, halfway * u.dimensionless_unscaled)
== [0, 0.25, 0.75, +1.0] * u.dimensionless_unscaled
)
@pytest.mark.parametrize("function", (np.cbrt,))
def test_cbrt_scalar(self, function):
assert function(8.0 * u.m**3) == 2.0 * u.m
@pytest.mark.parametrize("function", (np.cbrt,))
def test_cbrt_array(self, function):
# Calculate cbrt on both sides since on Windows the cube root of 64
# does not exactly equal 4. See 4388.
values = np.array([1.0, 8.0, 64.0])
assert np.all(function(values * u.m**3) == function(values) * u.m)
def test_power_scalar(self):
assert np.power(4.0 * u.m, 2.0) == 16.0 * u.m**2
assert np.power(4.0, 200.0 * u.cm / u.m) == u.Quantity(
16.0, u.dimensionless_unscaled
)
# regression check on #1696
assert np.power(4.0 * u.m, 0.0) == 1.0 * u.dimensionless_unscaled
def test_power_array(self):
assert np.all(
np.power(np.array([1.0, 2.0, 3.0]) * u.m, 3.0)
== np.array([1.0, 8.0, 27.0]) * u.m**3
)
# regression check on #1696
assert np.all(
np.power(np.arange(4.0) * u.m, 0.0) == 1.0 * u.dimensionless_unscaled
)
def test_float_power_array(self):
assert np.all(
np.float_power(np.array([1.0, 2.0, 3.0]) * u.m, 3.0)
== np.array([1.0, 8.0, 27.0]) * u.m**3
)
# regression check on #1696
assert np.all(
np.float_power(np.arange(4.0) * u.m, 0.0) == 1.0 * u.dimensionless_unscaled
)
def test_power_array_array(self):
with pytest.raises(ValueError):
np.power(4.0 * u.m, [2.0, 4.0])
def test_power_array_array2(self):
with pytest.raises(ValueError):
np.power([2.0, 4.0] * u.m, [2.0, 4.0])
def test_power_array_array3(self):
# Identical unit fractions are converted automatically to dimensionless
# and should be allowed as base for np.power: #4764
q = [2.0, 4.0] * u.m / u.m
powers = [2.0, 4.0]
res = np.power(q, powers)
assert np.all(res.value == q.value**powers)
assert res.unit == u.dimensionless_unscaled
# The same holds for unit fractions that are scaled dimensionless.
q2 = [2.0, 4.0] * u.m / u.cm
# Test also against different types of exponent
for cls in (list, tuple, np.array, np.ma.array, u.Quantity):
res2 = np.power(q2, cls(powers))
assert np.all(res2.value == q2.to_value(1) ** powers)
assert res2.unit == u.dimensionless_unscaled
# Though for single powers, we keep the composite unit.
res3 = q2**2
assert np.all(res3.value == q2.value**2)
assert res3.unit == q2.unit**2
assert np.all(res3 == q2 ** [2, 2])
def test_power_invalid(self):
with pytest.raises(TypeError, match="raise something to a dimensionless"):
np.power(3.0, 4.0 * u.m)
def test_copysign_scalar(self):
assert np.copysign(3 * u.m, 1.0) == 3.0 * u.m
assert np.copysign(3 * u.m, 1.0 * u.s) == 3.0 * u.m
assert np.copysign(3 * u.m, -1.0) == -3.0 * u.m
assert np.copysign(3 * u.m, -1.0 * u.s) == -3.0 * u.m
def test_copysign_array(self):
assert np.all(
np.copysign(np.array([1.0, 2.0, 3.0]) * u.s, -1.0)
== -np.array([1.0, 2.0, 3.0]) * u.s
)
assert np.all(
np.copysign(np.array([1.0, 2.0, 3.0]) * u.s, -1.0 * u.m)
== -np.array([1.0, 2.0, 3.0]) * u.s
)
assert np.all(
np.copysign(
np.array([1.0, 2.0, 3.0]) * u.s, np.array([-2.0, 2.0, -4.0]) * u.m
)
== np.array([-1.0, 2.0, -3.0]) * u.s
)
q = np.copysign(np.array([1.0, 2.0, 3.0]), -3 * u.m)
assert np.all(q == np.array([-1.0, -2.0, -3.0]))
assert not isinstance(q, u.Quantity)
def test_ldexp_scalar(self):
assert np.ldexp(4.0 * u.m, 2) == 16.0 * u.m
def test_ldexp_array(self):
assert np.all(
np.ldexp(np.array([1.0, 2.0, 3.0]) * u.m, [3, 2, 1])
== np.array([8.0, 8.0, 6.0]) * u.m
)
def test_ldexp_invalid(self):
with pytest.raises(TypeError):
np.ldexp(3.0 * u.m, 4.0)
with pytest.raises(TypeError):
np.ldexp(3.0, u.Quantity(4, u.m, dtype=int))
@pytest.mark.parametrize(
"function", (np.exp, np.expm1, np.exp2, np.log, np.log2, np.log10, np.log1p)
)
def test_exp_scalar(self, function):
q = function(3.0 * u.m / (6.0 * u.m))
assert q.unit == u.dimensionless_unscaled
assert q.value == function(0.5)
@pytest.mark.parametrize(
"function", (np.exp, np.expm1, np.exp2, np.log, np.log2, np.log10, np.log1p)
)
def test_exp_array(self, function):
q = function(np.array([2.0, 3.0, 6.0]) * u.m / (6.0 * u.m))
assert q.unit == u.dimensionless_unscaled
assert np.all(q.value == function(np.array([1.0 / 3.0, 1.0 / 2.0, 1.0])))
# should also work on quantities that can be made dimensionless
q2 = function(np.array([2.0, 3.0, 6.0]) * u.m / (6.0 * u.cm))
assert q2.unit == u.dimensionless_unscaled
assert_allclose(q2.value, function(np.array([100.0 / 3.0, 100.0 / 2.0, 100.0])))
@pytest.mark.parametrize(
"function", (np.exp, np.expm1, np.exp2, np.log, np.log2, np.log10, np.log1p)
)
def test_exp_invalid_units(self, function):
# Can't use exp() with non-dimensionless quantities
with pytest.raises(
TypeError,
match=(
f"Can only apply '{function.__name__}' function "
"to dimensionless quantities"
),
):
function(3.0 * u.m / u.s)
def test_modf_scalar(self):
q = np.modf(9.0 * u.m / (600.0 * u.cm))
assert q == (0.5 * u.dimensionless_unscaled, 1.0 * u.dimensionless_unscaled)
def test_modf_array(self):
v = np.arange(10.0) * u.m / (500.0 * u.cm)
q = np.modf(v)
n = np.modf(v.to_value(u.dimensionless_unscaled))
assert q[0].unit == u.dimensionless_unscaled
assert q[1].unit == u.dimensionless_unscaled
assert all(q[0].value == n[0])
assert all(q[1].value == n[1])
def test_frexp_scalar(self):
q = np.frexp(3.0 * u.m / (6.0 * u.m))
assert q == (np.array(0.5), np.array(0.0))
def test_frexp_array(self):
q = np.frexp(np.array([2.0, 3.0, 6.0]) * u.m / (6.0 * u.m))
assert all(
(_q0, _q1) == np.frexp(_d)
for _q0, _q1, _d in zip(q[0], q[1], [1.0 / 3.0, 1.0 / 2.0, 1.0])
)
def test_frexp_invalid_units(self):
# Can't use prod() with non-dimensionless quantities
with pytest.raises(
TypeError,
match=(
"Can only apply 'frexp' function to unscaled dimensionless quantities"
),
):
np.frexp(3.0 * u.m / u.s)
# also does not work on quantities that can be made dimensionless
with pytest.raises(
TypeError,
match=(
"Can only apply 'frexp' function to unscaled dimensionless quantities"
),
):
np.frexp(np.array([2.0, 3.0, 6.0]) * u.m / (6.0 * u.cm))
@pytest.mark.parametrize("function", (np.logaddexp, np.logaddexp2))
def test_dimensionless_twoarg_array(self, function):
q = function(np.array([2.0, 3.0, 6.0]) * u.m / (6.0 * u.cm), 1.0)
assert q.unit == u.dimensionless_unscaled
assert_allclose(
q.value, function(np.array([100.0 / 3.0, 100.0 / 2.0, 100.0]), 1.0)
)
@pytest.mark.parametrize("function", (np.logaddexp, np.logaddexp2))
def test_dimensionless_twoarg_invalid_units(self, function):
with pytest.raises(
TypeError,
match=(
f"Can only apply '{function.__name__}' function to dimensionless"
" quantities"
),
):
function(1.0 * u.km / u.s, 3.0 * u.m / u.s)
class TestInvariantUfuncs:
@pytest.mark.parametrize(
"ufunc",
[
np.absolute,
np.fabs,
np.conj,
np.conjugate,
np.negative,
np.spacing,
np.rint,
np.floor,
np.ceil,
np.positive,
],
)
def test_invariant_scalar(self, ufunc):
q_i = 4.7 * u.m
q_o = ufunc(q_i)
assert isinstance(q_o, u.Quantity)
assert q_o.unit == q_i.unit
assert q_o.value == ufunc(q_i.value)
@pytest.mark.parametrize(
"ufunc", [np.absolute, np.conjugate, np.negative, np.rint, np.floor, np.ceil]
)
def test_invariant_array(self, ufunc):
q_i = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s
q_o = ufunc(q_i)
assert isinstance(q_o, u.Quantity)
assert q_o.unit == q_i.unit
assert np.all(q_o.value == ufunc(q_i.value))
@pytest.mark.parametrize(
"ufunc",
[
np.add,
np.subtract,
np.hypot,
np.maximum,
np.minimum,
np.nextafter,
np.remainder,
np.mod,
np.fmod,
],
)
def test_invariant_twoarg_scalar(self, ufunc):
q_i1 = 4.7 * u.m
q_i2 = 9.4 * u.km
q_o = ufunc(q_i1, q_i2)
assert isinstance(q_o, u.Quantity)
assert q_o.unit == q_i1.unit
assert_allclose(q_o.value, ufunc(q_i1.value, q_i2.to_value(q_i1.unit)))
@pytest.mark.parametrize(
"ufunc",
[
np.add,
np.subtract,
np.hypot,
np.maximum,
np.minimum,
np.nextafter,
np.remainder,
np.mod,
np.fmod,
],
)
def test_invariant_twoarg_array(self, ufunc):
q_i1 = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s
q_i2 = np.array([10.0, -5.0, 1.0e6]) * u.g / u.us
q_o = ufunc(q_i1, q_i2)
assert isinstance(q_o, u.Quantity)
assert q_o.unit == q_i1.unit
assert_allclose(q_o.value, ufunc(q_i1.value, q_i2.to_value(q_i1.unit)))
@pytest.mark.parametrize(
("ufunc", "arbitrary"),
[
(np.add, 0.0),
(np.subtract, 0.0),
(np.hypot, 0.0),
(np.maximum, 0.0),
(np.minimum, 0.0),
(np.nextafter, 0.0),
(np.remainder, np.inf),
(np.mod, np.inf),
(np.fmod, np.inf),
],
)
def test_invariant_twoarg_one_arbitrary(self, ufunc, arbitrary):
q_i1 = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s
q_o = ufunc(q_i1, arbitrary)
assert isinstance(q_o, u.Quantity)
assert q_o.unit == q_i1.unit
assert_allclose(q_o.value, ufunc(q_i1.value, arbitrary))
@pytest.mark.parametrize(
"ufunc",
[
np.add,
np.subtract,
np.hypot,
np.maximum,
np.minimum,
np.nextafter,
np.remainder,
np.mod,
np.fmod,
],
)
def test_invariant_twoarg_invalid_units(self, ufunc):
q_i1 = 4.7 * u.m
q_i2 = 9.4 * u.s
with pytest.raises(u.UnitsError, match="compatible dimensions"):
ufunc(q_i1, q_i2)
class TestComparisonUfuncs:
@pytest.mark.parametrize(
"ufunc",
[np.greater, np.greater_equal, np.less, np.less_equal, np.not_equal, np.equal],
)
def test_comparison_valid_units(self, ufunc):
q_i1 = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s
q_i2 = np.array([10.0, -5.0, 1.0e6]) * u.g / u.Ms
q_o = ufunc(q_i1, q_i2)
assert not isinstance(q_o, u.Quantity)
assert q_o.dtype == bool
assert np.all(q_o == ufunc(q_i1.value, q_i2.to_value(q_i1.unit)))
q_o2 = ufunc(q_i1 / q_i2, 2.0)
assert not isinstance(q_o2, u.Quantity)
assert q_o2.dtype == bool
assert np.all(
q_o2 == ufunc((q_i1 / q_i2).to_value(u.dimensionless_unscaled), 2.0)
)
# comparison with 0., inf, nan is OK even for dimensional quantities
# (though ignore numpy runtime warnings for comparisons with nan).
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=RuntimeWarning)
for arbitrary_unit_value in (0.0, np.inf, np.nan):
ufunc(q_i1, arbitrary_unit_value)
ufunc(q_i1, arbitrary_unit_value * np.ones(len(q_i1)))
# and just for completeness
ufunc(q_i1, np.array([0.0, np.inf, np.nan]))
@pytest.mark.parametrize(
"ufunc",
[np.greater, np.greater_equal, np.less, np.less_equal, np.not_equal, np.equal],
)
def test_comparison_invalid_units(self, ufunc):
q_i1 = 4.7 * u.m
q_i2 = 9.4 * u.s
with pytest.raises(u.UnitsError, match="compatible dimensions"):
ufunc(q_i1, q_i2)
@pytest.mark.parametrize("ufunc", (np.isfinite, np.isinf, np.isnan, np.signbit))
def test_onearg_test_ufuncs(self, ufunc):
q = [1.0, np.inf, -np.inf, np.nan, -1.0, 0.0] * u.m
out = ufunc(q)
assert not isinstance(out, u.Quantity)
assert out.dtype == bool
assert np.all(out == ufunc(q.value))
# Ignore RuntimeWarning raised on Windows and s390.
@pytest.mark.filterwarnings("ignore:.*invalid value encountered in sign")
def test_sign(self):
q = [1.0, np.inf, -np.inf, np.nan, -1.0, 0.0] * u.m
out = np.sign(q)
assert not isinstance(out, u.Quantity)
assert out.dtype == q.dtype
assert np.all((out == np.sign(q.value)) | (np.isnan(out) & np.isnan(q.value)))
class TestInplaceUfuncs:
@pytest.mark.parametrize("value", [1.0, np.arange(10.0)])
def test_one_argument_ufunc_inplace(self, value):
# without scaling
s = value * u.rad
check = s
np.sin(s, out=s)
assert check is s
assert check.unit == u.dimensionless_unscaled
# with scaling
s2 = (value * u.rad).to(u.deg)
check2 = s2
np.sin(s2, out=s2)
assert check2 is s2
assert check2.unit == u.dimensionless_unscaled
assert_allclose(s.value, s2.value)
@pytest.mark.parametrize("value", [1.0, np.arange(10.0)])
def test_one_argument_ufunc_inplace_2(self, value):
"""Check inplace works with non-quantity input and quantity output"""
s = value * u.m
check = s
np.absolute(value, out=s)
assert check is s
assert np.all(check.value == np.absolute(value))
assert check.unit is u.dimensionless_unscaled
np.sqrt(value, out=s)
assert check is s
assert np.all(check.value == np.sqrt(value))
assert check.unit is u.dimensionless_unscaled
np.exp(value, out=s)
assert check is s
assert np.all(check.value == np.exp(value))
assert check.unit is u.dimensionless_unscaled
np.arcsin(value / 10.0, out=s)
assert check is s
assert np.all(check.value == np.arcsin(value / 10.0))
assert check.unit is u.radian
@pytest.mark.parametrize("value", [1.0, np.arange(10.0)])
def test_one_argument_two_output_ufunc_inplace(self, value):
v = 100.0 * value * u.cm / u.m
v_copy = v.copy()
tmp = v.copy()
check = v
np.modf(v, tmp, v)
assert check is v
assert check.unit == u.dimensionless_unscaled
v2 = v_copy.to(u.dimensionless_unscaled)
check2 = v2
np.modf(v2, tmp, v2)
assert check2 is v2
assert check2.unit == u.dimensionless_unscaled
# can also replace in last position if no scaling is needed
v3 = v_copy.to(u.dimensionless_unscaled)
check3 = v3
np.modf(v3, v3, tmp)
assert check3 is v3
assert check3.unit == u.dimensionless_unscaled
# can also replace input with first output when scaling
v4 = v_copy.copy()
check4 = v4
np.modf(v4, v4, tmp)
assert check4 is v4
assert check4.unit == u.dimensionless_unscaled
@pytest.mark.parametrize("value", [1.0, np.arange(10.0)])
def test_two_argument_ufunc_inplace_1(self, value):
s = value * u.cycle
check = s
s /= 2.0
assert check is s
assert np.all(check.value == value / 2.0)
s /= u.s
assert check is s
assert check.unit == u.cycle / u.s
s *= 2.0 * u.s
assert check is s
assert np.all(check == value * u.cycle)
@pytest.mark.parametrize("value", [1.0, np.arange(10.0)])
def test_two_argument_ufunc_inplace_2(self, value):
s = value * u.cycle
check = s
np.arctan2(s, s, out=s)
assert check is s
assert check.unit == u.radian
with pytest.raises(u.UnitsError):
s += 1.0 * u.m
assert check is s
assert check.unit == u.radian
np.arctan2(1.0 * u.deg, s, out=s)
assert check is s
assert check.unit == u.radian
np.add(1.0 * u.deg, s, out=s)
assert check is s
assert check.unit == u.deg
np.multiply(2.0 / u.s, s, out=s)
assert check is s
assert check.unit == u.deg / u.s
def test_two_argument_ufunc_inplace_3(self):
s = np.array([1.0, 2.0, 3.0]) * u.dimensionless_unscaled
np.add(np.array([1.0, 2.0, 3.0]), np.array([1.0, 2.0, 3.0]) * 2.0, out=s)
assert np.all(s.value == np.array([3.0, 6.0, 9.0]))
assert s.unit is u.dimensionless_unscaled
np.arctan2(np.array([1.0, 2.0, 3.0]), np.array([1.0, 2.0, 3.0]) * 2.0, out=s)
assert_allclose(s.value, np.arctan2(1.0, 2.0))
assert s.unit is u.radian
@pytest.mark.parametrize("value", [1.0, np.arange(10.0)])
def test_two_argument_two_output_ufunc_inplace(self, value):
v = value * u.m
divisor = 70.0 * u.cm
v1 = v.copy()
tmp = v.copy()
check = np.divmod(v1, divisor, out=(tmp, v1))
assert check[0] is tmp and check[1] is v1
assert tmp.unit == u.dimensionless_unscaled
assert v1.unit == v.unit
v2 = v.copy()
check2 = np.divmod(v2, divisor, out=(v2, tmp))
assert check2[0] is v2 and check2[1] is tmp
assert v2.unit == u.dimensionless_unscaled
assert tmp.unit == v.unit
v3a = v.copy()
v3b = v.copy()
check3 = np.divmod(v3a, divisor, out=(v3a, v3b))
assert check3[0] is v3a and check3[1] is v3b
assert v3a.unit == u.dimensionless_unscaled
assert v3b.unit == v.unit
def test_ufunc_inplace_non_contiguous_data(self):
# ensure inplace works also for non-contiguous data (closes #1834)
s = np.arange(10.0) * u.m
s_copy = s.copy()
s2 = s[::2]
s2 += 1.0 * u.cm
assert np.all(s[::2] > s_copy[::2])
assert np.all(s[1::2] == s_copy[1::2])
def test_ufunc_inplace_non_standard_dtype(self):
"""Check that inplace operations check properly for casting.
First two tests that check that float32 is kept close #3976.
"""
a1 = u.Quantity([1, 2, 3, 4], u.m, dtype=np.float32)
a1 *= np.float32(10)
assert a1.unit is u.m
assert a1.dtype == np.float32
a2 = u.Quantity([1, 2, 3, 4], u.m, dtype=np.float32)
a2 += 20.0 * u.km
assert a2.unit is u.m
assert a2.dtype == np.float32
# For integer, in-place only works if no conversion is done.
a3 = u.Quantity([1, 2, 3, 4], u.m, dtype=np.int32)
a3 += u.Quantity(10, u.m, dtype=np.int64)
assert a3.unit is u.m
assert a3.dtype == np.int32
a4 = u.Quantity([1, 2, 3, 4], u.m, dtype=np.int32)
with pytest.raises(TypeError):
a4 += u.Quantity(10, u.mm, dtype=np.int64)
@pytest.mark.parametrize("ufunc", (np.equal, np.greater))
def test_comparison_ufuncs_inplace(self, ufunc):
q_i1 = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s
q_i2 = np.array([10.0, -5.0, 1.0e6]) * u.g / u.Ms
check = np.empty(q_i1.shape, bool)
ufunc(q_i1.value, q_i2.to_value(q_i1.unit), out=check)
result = np.empty(q_i1.shape, bool)
q_o = ufunc(q_i1, q_i2, out=result)
assert q_o is result
assert type(q_o) is np.ndarray
assert q_o.dtype == bool
assert np.all(q_o == check)
@pytest.mark.parametrize("ufunc", (np.isfinite, np.signbit))
def test_onearg_test_ufuncs_inplace(self, ufunc):
q = [1.0, np.inf, -np.inf, np.nan, -1.0, 0.0] * u.m
check = np.empty(q.shape, bool)
ufunc(q.value, out=check)
result = np.empty(q.shape, bool)
out = ufunc(q, out=result)
assert out is result
assert type(out) is np.ndarray
assert out.dtype == bool
assert np.all(out == ufunc(q.value))
# Ignore RuntimeWarning raised on Windows and s390.
@pytest.mark.filterwarnings("ignore:.*invalid value encountered in sign")
def test_sign_inplace(self):
q = [1.0, np.inf, -np.inf, np.nan, -1.0, 0.0] * u.m
check = np.empty(q.shape, q.dtype)
np.sign(q.value, out=check)
result = np.empty(q.shape, q.dtype)
out = np.sign(q, out=result)
assert out is result
assert type(out) is np.ndarray
assert out.dtype == q.dtype
assert np.all((out == np.sign(q.value)) | (np.isnan(out) & np.isnan(q.value)))
def test_ndarray_inplace_op_with_quantity(self):
"""Regression test for gh-13911."""
a = np.arange(3.0)
q = u.Quantity([12.5, 25.0], u.percent)
a[:2] += q # This used to fail
assert_array_equal(a, np.array([0.125, 1.25, 2.0]))
class TestWhere:
"""Test the where argument in ufuncs."""
def test_where(self):
q = np.arange(4.0) << u.m
out = np.zeros(4) << u.m
result = np.add(q, 1 * u.km, out=out, where=[True, True, True, False])
assert result is out
assert_array_equal(result, [1000.0, 1001.0, 1002.0, 0.0] << u.m)
@pytest.mark.xfail(
NUMPY_LT_1_25, reason="where array_ufunc support introduced in numpy 1.25"
)
def test_exception_with_where_quantity(self):
a = np.ones(2)
where = np.ones(2, bool) << u.m
with pytest.raises(TypeError, match="all returned NotImplemented"):
np.add(a, a, out=a, where=where)
@pytest.mark.skipif(
not hasattr(np.core.umath, "clip"), reason="no clip ufunc available"
)
class TestClip:
"""Test the clip ufunc.
In numpy, this is hidden behind a function that does not backwards
compatibility checks. We explicitly test the ufunc here.
"""
def setup_method(self):
self.clip = np.core.umath.clip
def test_clip_simple(self):
q = np.arange(-1.0, 10.0) * u.m
q_min = 125 * u.cm
q_max = 0.0055 * u.km
result = self.clip(q, q_min, q_max)
assert result.unit == q.unit
expected = (
self.clip(q.value, q_min.to_value(q.unit), q_max.to_value(q.unit)) * q.unit
)
assert np.all(result == expected)
def test_clip_unitless_parts(self):
q = np.arange(-1.0, 10.0) * u.m
qlim = 0.0055 * u.km
# one-sided
result1 = self.clip(q, -np.inf, qlim)
expected1 = self.clip(q.value, -np.inf, qlim.to_value(q.unit)) * q.unit
assert np.all(result1 == expected1)
result2 = self.clip(q, qlim, np.inf)
expected2 = self.clip(q.value, qlim.to_value(q.unit), np.inf) * q.unit
assert np.all(result2 == expected2)
# Zero
result3 = self.clip(q, np.zeros(q.shape), qlim)
expected3 = self.clip(q.value, 0, qlim.to_value(q.unit)) * q.unit
assert np.all(result3 == expected3)
# Two unitless parts, array-shaped.
result4 = self.clip(q, np.zeros(q.shape), np.full(q.shape, np.inf))
expected4 = self.clip(q.value, 0, np.inf) * q.unit
assert np.all(result4 == expected4)
def test_clip_dimensionless(self):
q = np.arange(-1.0, 10.0) * u.dimensionless_unscaled
result = self.clip(q, 200 * u.percent, 5.0)
expected = self.clip(q, 2.0, 5.0)
assert result.unit == u.dimensionless_unscaled
assert np.all(result == expected)
def test_clip_ndarray(self):
a = np.arange(-1.0, 10.0)
result = self.clip(a, 200 * u.percent, 5.0 * u.dimensionless_unscaled)
assert isinstance(result, u.Quantity)
expected = self.clip(a, 2.0, 5.0) * u.dimensionless_unscaled
assert np.all(result == expected)
def test_clip_quantity_inplace(self):
q = np.arange(-1.0, 10.0) * u.m
q_min = 125 * u.cm
q_max = 0.0055 * u.km
expected = (
self.clip(q.value, q_min.to_value(q.unit), q_max.to_value(q.unit)) * q.unit
)
result = self.clip(q, q_min, q_max, out=q)
assert result is q
assert np.all(result == expected)
def test_clip_ndarray_dimensionless_output(self):
a = np.arange(-1.0, 10.0)
q = np.zeros_like(a) * u.m
expected = self.clip(a, 2.0, 5.0) * u.dimensionless_unscaled
result = self.clip(a, 200 * u.percent, 5.0 * u.dimensionless_unscaled, out=q)
assert result is q
assert result.unit == u.dimensionless_unscaled
assert np.all(result == expected)
def test_clip_errors(self):
q = np.arange(-1.0, 10.0) * u.m
with pytest.raises(u.UnitsError):
self.clip(q, 0, 1 * u.s)
with pytest.raises(u.UnitsError):
self.clip(q.value, 0, 1 * u.s)
with pytest.raises(u.UnitsError):
self.clip(q, -1, 0.0)
with pytest.raises(u.UnitsError):
self.clip(q, 0.0, 1.0)
class TestUfuncAt:
"""Test that 'at' method for ufuncs (calculates in-place at given indices)
For Quantities, since calculations are in-place, it makes sense only
if the result is still a quantity, and if the unit does not have to change
"""
def test_one_argument_ufunc_at(self):
q = np.arange(10.0) * u.m
i = np.array([1, 2])
qv = q.value.copy()
np.negative.at(q, i)
np.negative.at(qv, i)
assert np.all(q.value == qv)
assert q.unit is u.m
# cannot change from quantity to bool array
with pytest.raises(TypeError):
np.isfinite.at(q, i)
# for selective in-place, cannot change the unit
with pytest.raises(u.UnitsError):
np.square.at(q, i)
# except if the unit does not change (i.e., dimensionless)
d = np.arange(10.0) * u.dimensionless_unscaled
dv = d.value.copy()
np.square.at(d, i)
np.square.at(dv, i)
assert np.all(d.value == dv)
assert d.unit is u.dimensionless_unscaled
d = np.arange(10.0) * u.dimensionless_unscaled
dv = d.value.copy()
np.log.at(d, i)
np.log.at(dv, i)
assert np.all(d.value == dv)
assert d.unit is u.dimensionless_unscaled
# also for sine it doesn't work, even if given an angle
a = np.arange(10.0) * u.radian
with pytest.raises(u.UnitsError):
np.sin.at(a, i)
# except, for consistency, if we have made radian equivalent to
# dimensionless (though hopefully it will never be needed)
av = a.value.copy()
with u.add_enabled_equivalencies(u.dimensionless_angles()):
np.sin.at(a, i)
np.sin.at(av, i)
assert_allclose(a.value, av)
# but we won't do double conversion
ad = np.arange(10.0) * u.degree
with pytest.raises(u.UnitsError):
np.sin.at(ad, i)
def test_two_argument_ufunc_at(self):
s = np.arange(10.0) * u.m
i = np.array([1, 2])
check = s.value.copy()
np.add.at(s, i, 1.0 * u.km)
np.add.at(check, i, 1000.0)
assert np.all(s.value == check)
assert s.unit is u.m
with pytest.raises(u.UnitsError):
np.add.at(s, i, 1.0 * u.s)
# also raise UnitsError if unit would have to be changed
with pytest.raises(u.UnitsError):
np.multiply.at(s, i, 1 * u.s)
# but be fine if it does not
s = np.arange(10.0) * u.m
check = s.value.copy()
np.multiply.at(s, i, 2.0 * u.dimensionless_unscaled)
np.multiply.at(check, i, 2)
assert np.all(s.value == check)
s = np.arange(10.0) * u.m
np.multiply.at(s, i, 2.0)
assert np.all(s.value == check)
# of course cannot change class of data either
with pytest.raises(TypeError):
np.greater.at(s, i, 1.0 * u.km)
class TestUfuncReduceReduceatAccumulate:
"""Test 'reduce', 'reduceat' and 'accumulate' methods for ufuncs
For Quantities, it makes sense only if the unit does not have to change
"""
def test_one_argument_ufunc_reduce_accumulate(self):
# one argument cannot be used
s = np.arange(10.0) * u.radian
i = np.array([0, 5, 1, 6])
with pytest.raises(ValueError):
np.sin.reduce(s)
with pytest.raises(ValueError):
np.sin.accumulate(s)
with pytest.raises(ValueError):
np.sin.reduceat(s, i)
def test_two_argument_ufunc_reduce_accumulate(self):
s = np.arange(10.0) * u.m
i = np.array([0, 5, 1, 6])
check = s.value.copy()
s_add_reduce = np.add.reduce(s)
check_add_reduce = np.add.reduce(check)
assert s_add_reduce.value == check_add_reduce
assert s_add_reduce.unit is u.m
s_add_accumulate = np.add.accumulate(s)
check_add_accumulate = np.add.accumulate(check)
assert np.all(s_add_accumulate.value == check_add_accumulate)
assert s_add_accumulate.unit is u.m
s_add_reduceat = np.add.reduceat(s, i)
check_add_reduceat = np.add.reduceat(check, i)
assert np.all(s_add_reduceat.value == check_add_reduceat)
assert s_add_reduceat.unit is u.m
# reduce(at) or accumulate on comparisons makes no sense,
# as intermediate result is not even a Quantity
with pytest.raises(TypeError):
np.greater.reduce(s)
with pytest.raises(TypeError):
np.greater.accumulate(s)
with pytest.raises(TypeError):
np.greater.reduceat(s, i)
# raise UnitsError if unit would have to be changed
with pytest.raises(u.UnitsError):
np.multiply.reduce(s)
with pytest.raises(u.UnitsError):
np.multiply.accumulate(s)
with pytest.raises(u.UnitsError):
np.multiply.reduceat(s, i)
# but be fine if it does not
s = np.arange(10.0) * u.dimensionless_unscaled
check = s.value.copy()
s_multiply_reduce = np.multiply.reduce(s)
check_multiply_reduce = np.multiply.reduce(check)
assert s_multiply_reduce.value == check_multiply_reduce
assert s_multiply_reduce.unit is u.dimensionless_unscaled
s_multiply_accumulate = np.multiply.accumulate(s)
check_multiply_accumulate = np.multiply.accumulate(check)
assert np.all(s_multiply_accumulate.value == check_multiply_accumulate)
assert s_multiply_accumulate.unit is u.dimensionless_unscaled
s_multiply_reduceat = np.multiply.reduceat(s, i)
check_multiply_reduceat = np.multiply.reduceat(check, i)
assert np.all(s_multiply_reduceat.value == check_multiply_reduceat)
assert s_multiply_reduceat.unit is u.dimensionless_unscaled
class TestUfuncOuter:
"""Test 'outer' methods for ufuncs
Just a few spot checks, since it uses the same code as the regular
ufunc call
"""
def test_one_argument_ufunc_outer(self):
# one argument cannot be used
s = np.arange(10.0) * u.radian
with pytest.raises(ValueError):
np.sin.outer(s)
def test_two_argument_ufunc_outer(self):
s1 = np.arange(10.0) * u.m
s2 = np.arange(2.0) * u.s
check1 = s1.value
check2 = s2.value
s12_multiply_outer = np.multiply.outer(s1, s2)
check12_multiply_outer = np.multiply.outer(check1, check2)
assert np.all(s12_multiply_outer.value == check12_multiply_outer)
assert s12_multiply_outer.unit == s1.unit * s2.unit
# raise UnitsError if appropriate
with pytest.raises(u.UnitsError):
np.add.outer(s1, s2)
# but be fine if it does not
s3 = np.arange(2.0) * s1.unit
check3 = s3.value
s13_add_outer = np.add.outer(s1, s3)
check13_add_outer = np.add.outer(check1, check3)
assert np.all(s13_add_outer.value == check13_add_outer)
assert s13_add_outer.unit is s1.unit
s13_greater_outer = np.greater.outer(s1, s3)
check13_greater_outer = np.greater.outer(check1, check3)
assert type(s13_greater_outer) is np.ndarray
assert np.all(s13_greater_outer == check13_greater_outer)
@dataclasses.dataclass
class DuckQuantity1:
data: u.Quantity
@dataclasses.dataclass
class DuckQuantity2(DuckQuantity1):
@property
def unit(self) -> u.UnitBase:
return self.data.unit
@dataclasses.dataclass(eq=False)
class DuckQuantity3(DuckQuantity2):
def __array_ufunc__(self, function, method, *inputs, **kwargs):
inputs = [inp.data if isinstance(inp, type(self)) else inp for inp in inputs]
out = kwargs.get("out", None)
kwargs_copy = {}
for k in kwargs:
kwarg = kwargs[k]
if isinstance(kwarg, type(self)):
kwargs_copy[k] = kwarg.data
elif isinstance(kwarg, (list, tuple)):
kwargs_copy[k] = type(kwarg)(
item.data if isinstance(item, type(self)) else item
for item in kwarg
)
else:
kwargs_copy[k] = kwarg
kwargs = kwargs_copy
for inp in inputs:
if isinstance(inp, np.ndarray):
result = inp.__array_ufunc__(function, method, *inputs, **kwargs)
if result is not NotImplemented:
if out is None:
return type(self)(result)
else:
if function.nout == 1:
return out[0]
else:
return out
return NotImplemented
class DuckQuantity4(DuckQuantity3):
@property
def unit(self):
return DuckQuantity1(1 * self.data.unit)
class TestUfuncReturnsNotImplemented:
@pytest.mark.parametrize("ufunc", (np.negative, np.abs))
class TestUnaryUfuncs:
@pytest.mark.parametrize(
"duck_quantity",
[DuckQuantity1(1 * u.mm), DuckQuantity2(1 * u.mm)],
)
def test_basic(self, ufunc, duck_quantity):
with pytest.raises(TypeError, match="bad operand type for .*"):
ufunc(duck_quantity)
@pytest.mark.parametrize(
"duck_quantity",
[
DuckQuantity3(1 * u.mm),
DuckQuantity3([1, 2] * u.mm),
DuckQuantity4(1 * u.mm),
],
)
@pytest.mark.parametrize("out", [None, "empty"])
def test_full(self, ufunc, duck_quantity, out):
out_expected = out
if out == "empty":
out = type(duck_quantity)(np.empty_like(ufunc(duck_quantity.data)))
out_expected = np.empty_like(ufunc(duck_quantity.data))
result = ufunc(duck_quantity, out=out)
if out is not None:
assert result is out
result_expected = ufunc(duck_quantity.data, out=out_expected)
assert np.all(result.data == result_expected)
@pytest.mark.parametrize("ufunc", (np.add, np.multiply, np.less))
@pytest.mark.parametrize("quantity", (1 * u.m, [1, 2] * u.m))
class TestBinaryUfuncs:
@pytest.mark.parametrize(
"duck_quantity",
[DuckQuantity1(1 * u.mm), DuckQuantity2(1 * u.mm)],
)
def test_basic(self, ufunc, quantity, duck_quantity):
with pytest.raises(
(TypeError, ValueError),
match=(
r"(Unsupported operand type\(s\) for ufunc .*)|"
r"(unsupported operand type\(s\) for .*)|"
r"(Value not scalar compatible or convertible to an int, float, or complex array)"
),
):
ufunc(quantity, duck_quantity)
@pytest.mark.parametrize(
"duck_quantity",
[
DuckQuantity3(1 * u.mm),
DuckQuantity3([1, 2] * u.mm),
DuckQuantity4(1 * u.mm),
],
)
@pytest.mark.parametrize("out", [None, "empty"])
def test_full(self, ufunc, quantity, duck_quantity, out):
out_expected = out
if out == "empty":
out = type(duck_quantity)(
np.empty_like(ufunc(quantity, duck_quantity.data))
)
out_expected = np.empty_like(ufunc(quantity, duck_quantity.data))
result = ufunc(quantity, duck_quantity, out=out)
if out is not None:
assert result is out
result_expected = ufunc(quantity, duck_quantity.data, out=out_expected)
assert np.all(result.data == result_expected)
if HAS_SCIPY:
from scipy import special as sps
erf_like_ufuncs = (
sps.erf, sps.erfc, sps.erfcx, sps.erfi,
sps.gamma, sps.gammaln, sps.loggamma, sps.gammasgn, sps.psi,
sps.rgamma, sps.digamma, sps.wofz, sps.dawsn,
sps.entr, sps.exprel, sps.expm1, sps.log1p, sps.exp2, sps.exp10,
) # fmt: skip
if isinstance(sps.erfinv, np.ufunc):
erf_like_ufuncs += (sps.erfinv, sps.erfcinv)
def test_scipy_registration():
"""Check that scipy gets loaded upon first use."""
assert sps.erf not in qh.UFUNC_HELPERS
sps.erf(1.0 * u.percent)
assert sps.erf in qh.UFUNC_HELPERS
if isinstance(sps.erfinv, np.ufunc):
assert sps.erfinv in qh.UFUNC_HELPERS
else:
assert sps.erfinv not in qh.UFUNC_HELPERS
class TestScipySpecialUfuncs:
@pytest.mark.parametrize("function", erf_like_ufuncs)
def test_erf_scalar(self, function):
TestQuantityMathFuncs.test_exp_scalar(None, function)
@pytest.mark.parametrize("function", erf_like_ufuncs)
def test_erf_array(self, function):
TestQuantityMathFuncs.test_exp_array(None, function)
@pytest.mark.parametrize("function", erf_like_ufuncs)
def test_erf_invalid_units(self, function):
TestQuantityMathFuncs.test_exp_invalid_units(None, function)
@pytest.mark.parametrize("function", (sps.cbrt,))
def test_cbrt_scalar(self, function):
TestQuantityMathFuncs.test_cbrt_scalar(None, function)
@pytest.mark.parametrize("function", (sps.cbrt,))
def test_cbrt_array(self, function):
TestQuantityMathFuncs.test_cbrt_array(None, function)
@pytest.mark.parametrize("function", (sps.radian,))
def test_radian(self, function):
q1 = function(180.0 * u.degree, 0.0 * u.arcmin, 0.0 * u.arcsec)
assert_allclose(q1.value, np.pi)
assert q1.unit == u.radian
q2 = function(0.0 * u.degree, 30.0 * u.arcmin, 0.0 * u.arcsec)
assert_allclose(q2.value, (30.0 * u.arcmin).to(u.radian).value)
assert q2.unit == u.radian
q3 = function(0.0 * u.degree, 0.0 * u.arcmin, 30.0 * u.arcsec)
assert_allclose(q3.value, (30.0 * u.arcsec).to(u.radian).value)
# the following doesn't make much sense in terms of the name of the
# routine, but we check it gives the correct result.
q4 = function(3.0 * u.radian, 0.0 * u.arcmin, 0.0 * u.arcsec)
assert_allclose(q4.value, 3.0)
assert q4.unit == u.radian
with pytest.raises(TypeError):
function(3.0 * u.m, 2.0 * u.s, 1.0 * u.kg)
jv_like_ufuncs = (
sps.jv, sps.jn, sps.jve, sps.yn, sps.yv, sps.yve, sps.kn, sps.kv,
sps.kve, sps.iv, sps.ive, sps.hankel1, sps.hankel1e, sps.hankel2,
sps.hankel2e,
) # fmt: skip
@pytest.mark.parametrize("function", jv_like_ufuncs)
def test_jv_scalar(self, function):
q = function(2.0 * u.m / (2.0 * u.m), 3.0 * u.m / (6.0 * u.m))
assert q.unit == u.dimensionless_unscaled
assert q.value == function(1.0, 0.5)
@pytest.mark.parametrize("function", jv_like_ufuncs)
def test_jv_array(self, function):
q = function(
np.ones(3) * u.m / (1.0 * u.m),
np.array([2.0, 3.0, 6.0]) * u.m / (6.0 * u.m),
)
assert q.unit == u.dimensionless_unscaled
assert np.all(
q.value == function(np.ones(3), np.array([1.0 / 3.0, 1.0 / 2.0, 1.0]))
)
# should also work on quantities that can be made dimensionless
q2 = function(
np.ones(3) * u.m / (1.0 * u.m),
np.array([2.0, 3.0, 6.0]) * u.m / (6.0 * u.cm),
)
assert q2.unit == u.dimensionless_unscaled
assert_allclose(
q2.value,
function(np.ones(3), np.array([100.0 / 3.0, 100.0 / 2.0, 100.0])),
)
@pytest.mark.parametrize("function", jv_like_ufuncs)
def test_jv_invalid_units(self, function):
# Can't use jv() with non-dimensionless quantities
with pytest.raises(
TypeError,
match=(
f"Can only apply '{function.__name__}' function to dimensionless"
" quantities"
),
):
function(1.0 * u.kg, 3.0 * u.m / u.s)
|
de44d6c18144d601d6c930b34a07647a4ebe2d55f41bc67da5c02f74e330fa71 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numbers
import numpy as np
from astropy.units import (
CompositeUnit,
Unit,
UnitConversionError,
UnitsError,
UnitTypeError,
dimensionless_unscaled,
photometric,
)
from .core import FunctionQuantity, FunctionUnitBase
from .units import dB, dex, mag
__all__ = [
"LogUnit",
"MagUnit",
"DexUnit",
"DecibelUnit",
"LogQuantity",
"Magnitude",
"Decibel",
"Dex",
"STmag",
"ABmag",
"M_bol",
"m_bol",
]
class LogUnit(FunctionUnitBase):
"""Logarithmic unit containing a physical one.
Usually, logarithmic units are instantiated via specific subclasses
such `~astropy.units.MagUnit`, `~astropy.units.DecibelUnit`, and
`~astropy.units.DexUnit`.
Parameters
----------
physical_unit : `~astropy.units.Unit` or `string`
Unit that is encapsulated within the logarithmic function unit.
If not given, dimensionless.
function_unit : `~astropy.units.Unit` or `string`
By default, the same as the logarithmic unit set by the subclass.
"""
# the four essential overrides of FunctionUnitBase
@property
def _default_function_unit(self):
return dex
@property
def _quantity_class(self):
return LogQuantity
def from_physical(self, x):
"""Transformation from value in physical to value in logarithmic units.
Used in equivalency.
"""
return dex.to(self._function_unit, np.log10(x))
def to_physical(self, x):
"""Transformation from value in logarithmic to value in physical units.
Used in equivalency.
"""
return 10 ** self._function_unit.to(dex, x)
# ^^^^ the four essential overrides of FunctionUnitBase
# add addition and subtraction, which imply multiplication/division of
# the underlying physical units
def _add_and_adjust_physical_unit(self, other, sign_self, sign_other):
"""Add/subtract LogUnit to/from another unit, and adjust physical unit.
self and other are multiplied by sign_self and sign_other, resp.
We wish to do: ±lu_1 + ±lu_2 -> lu_f (lu=logarithmic unit)
and pu_1^(±1) * pu_2^(±1) -> pu_f (pu=physical unit)
Raises
------
UnitsError
If function units are not equivalent.
"""
# First, insist on compatible logarithmic type. Here, plain u.mag,
# u.dex, and u.dB are OK, i.e., other does not have to be LogUnit
# (this will indirectly test whether other is a unit at all).
try:
getattr(other, "function_unit", other)._to(self._function_unit)
except AttributeError:
# if other is not a unit (i.e., does not have _to).
return NotImplemented
except UnitsError:
raise UnitsError(
"Can only add/subtract logarithmic units of compatible type."
)
other_physical_unit = getattr(other, "physical_unit", dimensionless_unscaled)
physical_unit = CompositeUnit(
1, [self._physical_unit, other_physical_unit], [sign_self, sign_other]
)
return self._copy(physical_unit)
def __neg__(self):
return self._copy(self.physical_unit ** (-1))
def __add__(self, other):
# Only know how to add to a logarithmic unit with compatible type,
# be it a plain one (u.mag, etc.,) or another LogUnit
return self._add_and_adjust_physical_unit(other, +1, +1)
def __radd__(self, other):
return self._add_and_adjust_physical_unit(other, +1, +1)
def __sub__(self, other):
return self._add_and_adjust_physical_unit(other, +1, -1)
def __rsub__(self, other):
# here, in normal usage other cannot be LogUnit; only equivalent one
# would be u.mag,u.dB,u.dex. But might as well use common routine.
return self._add_and_adjust_physical_unit(other, -1, +1)
class MagUnit(LogUnit):
"""Logarithmic physical units expressed in magnitudes.
Parameters
----------
physical_unit : `~astropy.units.Unit` or `string`
Unit that is encapsulated within the magnitude function unit.
If not given, dimensionless.
function_unit : `~astropy.units.Unit` or `string`
By default, this is ``mag``, but this allows one to use an equivalent
unit such as ``2 mag``.
"""
@property
def _default_function_unit(self):
return mag
@property
def _quantity_class(self):
return Magnitude
class DexUnit(LogUnit):
"""Logarithmic physical units expressed in magnitudes.
Parameters
----------
physical_unit : `~astropy.units.Unit` or `string`
Unit that is encapsulated within the magnitude function unit.
If not given, dimensionless.
function_unit : `~astropy.units.Unit` or `string`
By default, this is ``dex``, but this allows one to use an equivalent
unit such as ``0.5 dex``.
"""
@property
def _default_function_unit(self):
return dex
@property
def _quantity_class(self):
return Dex
def to_string(self, format="generic"):
if format == "cds":
if self.physical_unit == dimensionless_unscaled:
return "[-]" # by default, would get "[---]".
else:
return f"[{self.physical_unit.to_string(format=format)}]"
else:
return super().to_string()
class DecibelUnit(LogUnit):
"""Logarithmic physical units expressed in dB.
Parameters
----------
physical_unit : `~astropy.units.Unit` or `string`
Unit that is encapsulated within the decibel function unit.
If not given, dimensionless.
function_unit : `~astropy.units.Unit` or `string`
By default, this is ``dB``, but this allows one to use an equivalent
unit such as ``2 dB``.
"""
@property
def _default_function_unit(self):
return dB
@property
def _quantity_class(self):
return Decibel
class LogQuantity(FunctionQuantity):
"""A representation of a (scaled) logarithm of a number with a unit.
Parameters
----------
value : number, `~astropy.units.Quantity`, `~astropy.units.LogQuantity`, or sequence of quantity-like.
The numerical value of the logarithmic quantity. If a number or
a `~astropy.units.Quantity` with a logarithmic unit, it will be
converted to ``unit`` and the physical unit will be inferred from
``unit``. If a `~astropy.units.Quantity` with just a physical unit,
it will converted to the logarithmic unit, after, if necessary,
converting it to the physical unit inferred from ``unit``.
unit : str, `~astropy.units.UnitBase`, or `~astropy.units.FunctionUnitBase`, optional
For an `~astropy.units.FunctionUnitBase` instance, the
physical unit will be taken from it; for other input, it will be
inferred from ``value``. By default, ``unit`` is set by the subclass.
dtype : `~numpy.dtype`, optional
The ``dtype`` of the resulting Numpy array or scalar that will
hold the value. If not provided, is is determined automatically
from the input value.
copy : bool, optional
If `True` (default), then the value is copied. Otherwise, a copy will
only be made if ``__array__`` returns a copy, if value is a nested
sequence, or if a copy is needed to satisfy an explicitly given
``dtype``. (The `False` option is intended mostly for internal use,
to speed up initialization where a copy is known to have been made.
Use with care.)
Examples
--------
Typically, use is made of an `~astropy.units.FunctionQuantity`
subclass, as in::
>>> import astropy.units as u
>>> u.Magnitude(-2.5)
<Magnitude -2.5 mag>
>>> u.Magnitude(10.*u.count/u.second)
<Magnitude -2.5 mag(ct / s)>
>>> u.Decibel(1.*u.W, u.DecibelUnit(u.mW)) # doctest: +FLOAT_CMP
<Decibel 30. dB(mW)>
"""
# only override of FunctionQuantity
_unit_class = LogUnit
# additions that work just for logarithmic units
def __add__(self, other):
# Add function units, thus multiplying physical units. If no unit is
# given, assume dimensionless_unscaled; this will give the appropriate
# exception in LogUnit.__add__.
new_unit = self.unit + getattr(other, "unit", dimensionless_unscaled)
# Add actual logarithmic values, rescaling, e.g., dB -> dex.
result = self._function_view + getattr(other, "_function_view", other)
return self._new_view(result, new_unit)
def __radd__(self, other):
return self.__add__(other)
def __iadd__(self, other):
new_unit = self.unit + getattr(other, "unit", dimensionless_unscaled)
# Do calculation in-place using _function_view of array.
function_view = self._function_view
function_view += getattr(other, "_function_view", other)
self._set_unit(new_unit)
return self
def __sub__(self, other):
# Subtract function units, thus dividing physical units.
new_unit = self.unit - getattr(other, "unit", dimensionless_unscaled)
# Subtract actual logarithmic values, rescaling, e.g., dB -> dex.
result = self._function_view - getattr(other, "_function_view", other)
return self._new_view(result, new_unit)
def __rsub__(self, other):
new_unit = self.unit.__rsub__(getattr(other, "unit", dimensionless_unscaled))
result = self._function_view.__rsub__(getattr(other, "_function_view", other))
# Ensure the result is in right function unit scale
# (with rsub, this does not have to be one's own).
result = result.to(new_unit.function_unit)
return self._new_view(result, new_unit)
def __isub__(self, other):
new_unit = self.unit - getattr(other, "unit", dimensionless_unscaled)
# Do calculation in-place using _function_view of array.
function_view = self._function_view
function_view -= getattr(other, "_function_view", other)
self._set_unit(new_unit)
return self
def __mul__(self, other):
# Multiply by a float or a dimensionless quantity
if isinstance(other, numbers.Number):
# Multiplying a log means putting the factor into the exponent
# of the unit
new_physical_unit = self.unit.physical_unit**other
result = self.view(np.ndarray) * other
return self._new_view(result, self.unit._copy(new_physical_unit))
else:
return super().__mul__(other)
def __rmul__(self, other):
return self.__mul__(other)
def __imul__(self, other):
if isinstance(other, numbers.Number):
new_physical_unit = self.unit.physical_unit**other
function_view = self._function_view
function_view *= other
self._set_unit(self.unit._copy(new_physical_unit))
return self
else:
return super().__imul__(other)
def __truediv__(self, other):
# Divide by a float or a dimensionless quantity
if isinstance(other, numbers.Number):
# Dividing a log means putting the denominator into the exponent
# of the unit
new_physical_unit = self.unit.physical_unit ** (1 / other)
result = self.view(np.ndarray) / other
return self._new_view(result, self.unit._copy(new_physical_unit))
else:
return super().__truediv__(other)
def __itruediv__(self, other):
if isinstance(other, numbers.Number):
new_physical_unit = self.unit.physical_unit ** (1 / other)
function_view = self._function_view
function_view /= other
self._set_unit(self.unit._copy(new_physical_unit))
return self
else:
return super().__itruediv__(other)
def __pow__(self, other):
# We check if this power is OK by applying it first to the unit.
try:
other = float(other)
except TypeError:
return NotImplemented
new_unit = self.unit**other
new_value = self.view(np.ndarray) ** other
return self._new_view(new_value, new_unit)
def __ilshift__(self, other):
try:
other = Unit(other)
except UnitTypeError:
return NotImplemented
if not isinstance(other, self._unit_class):
return NotImplemented
try:
factor = self.unit.physical_unit._to(other.physical_unit)
except UnitConversionError:
# Maybe via equivalencies? Now we do make a temporary copy.
try:
value = self._to_value(other)
except UnitConversionError:
return NotImplemented
self.view(np.ndarray)[...] = value
else:
self.view(np.ndarray)[...] += self.unit.from_physical(factor)
self._set_unit(other)
return self
# Methods that do not work for function units generally but are OK for
# logarithmic units as they imply differences and independence of
# physical unit.
def var(self, axis=None, dtype=None, out=None, ddof=0):
unit = self.unit.function_unit**2
return self._wrap_function(np.var, axis, dtype, out=out, ddof=ddof, unit=unit)
def std(self, axis=None, dtype=None, out=None, ddof=0):
unit = self.unit._copy(dimensionless_unscaled)
return self._wrap_function(np.std, axis, dtype, out=out, ddof=ddof, unit=unit)
def ptp(self, axis=None, out=None):
unit = self.unit._copy(dimensionless_unscaled)
return self._wrap_function(np.ptp, axis, out=out, unit=unit)
def diff(self, n=1, axis=-1):
unit = self.unit._copy(dimensionless_unscaled)
return self._wrap_function(np.diff, n, axis, unit=unit)
def ediff1d(self, to_end=None, to_begin=None):
unit = self.unit._copy(dimensionless_unscaled)
return self._wrap_function(np.ediff1d, to_end, to_begin, unit=unit)
_supported_functions = FunctionQuantity._supported_functions | {
getattr(np, function) for function in ("var", "std", "ptp", "diff", "ediff1d")
}
class Dex(LogQuantity):
_unit_class = DexUnit
class Decibel(LogQuantity):
_unit_class = DecibelUnit
class Magnitude(LogQuantity):
_unit_class = MagUnit
dex._function_unit_class = DexUnit
dB._function_unit_class = DecibelUnit
mag._function_unit_class = MagUnit
STmag = MagUnit(photometric.STflux)
STmag.__doc__ = "ST magnitude: STmag=-21.1 corresponds to 1 erg/s/cm2/A"
ABmag = MagUnit(photometric.ABflux)
ABmag.__doc__ = "AB magnitude: ABmag=-48.6 corresponds to 1 erg/s/cm2/Hz"
M_bol = MagUnit(photometric.Bol)
M_bol.__doc__ = (
f"Absolute bolometric magnitude: M_bol=0 corresponds to L_bol0={photometric.Bol.si}"
)
m_bol = MagUnit(photometric.bol)
m_bol.__doc__ = (
f"Apparent bolometric magnitude: m_bol=0 corresponds to f_bol0={photometric.bol.si}"
)
|
8e5897645d09f8335c59367e3c4fc9608fdfbf8b655f3fc14584d1ec29130907 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Function Units and Quantities."""
from abc import ABCMeta, abstractmethod
import numpy as np
from astropy.units import (
Quantity,
Unit,
UnitBase,
UnitConversionError,
UnitsError,
UnitTypeError,
dimensionless_unscaled,
)
__all__ = ["FunctionUnitBase", "FunctionQuantity"]
SUPPORTED_UFUNCS = {
getattr(np.core.umath, ufunc)
for ufunc in (
"isfinite",
"isinf",
"isnan",
"sign",
"signbit",
"rint",
"floor",
"ceil",
"trunc",
"_ones_like",
"ones_like",
"positive",
)
if hasattr(np.core.umath, ufunc)
}
# TODO: the following could work if helper changed relative to Quantity:
# - spacing should return dimensionless, not same unit
# - negative should negate unit too,
# - add, subtract, comparisons can work if units added/subtracted
SUPPORTED_FUNCTIONS = {
getattr(np, function)
for function in ("clip", "trace", "mean", "min", "max", "round")
}
# subclassing UnitBase or CompositeUnit was found to be problematic, requiring
# a large number of overrides. Hence, define new class.
class FunctionUnitBase(metaclass=ABCMeta):
"""Abstract base class for function units.
Function units are functions containing a physical unit, such as dB(mW).
Most of the arithmetic operations on function units are defined in this
base class.
While instantiation is defined, this class should not be used directly.
Rather, subclasses should be used that override the abstract properties
`_default_function_unit` and `_quantity_class`, and the abstract methods
`from_physical`, and `to_physical`.
Parameters
----------
physical_unit : `~astropy.units.Unit` or `string`
Unit that is encapsulated within the function unit.
If not given, dimensionless.
function_unit : `~astropy.units.Unit` or `string`
By default, the same as the function unit set by the subclass.
"""
# ↓↓↓ the following four need to be set by subclasses
# Make this a property so we can ensure subclasses define it.
@property
@abstractmethod
def _default_function_unit(self):
"""Default function unit corresponding to the function.
This property should be overridden by subclasses, with, e.g.,
`~astropy.unit.MagUnit` returning `~astropy.unit.mag`.
"""
# This has to be a property because the function quantity will not be
# known at unit definition time, as it gets defined after.
@property
@abstractmethod
def _quantity_class(self):
"""Function quantity class corresponding to this function unit.
This property should be overridden by subclasses, with, e.g.,
`~astropy.unit.MagUnit` returning `~astropy.unit.Magnitude`.
"""
@abstractmethod
def from_physical(self, x):
"""Transformation from value in physical to value in function units.
This method should be overridden by subclasses. It is used to
provide automatic transformations using an equivalency.
"""
@abstractmethod
def to_physical(self, x):
"""Transformation from value in function to value in physical units.
This method should be overridden by subclasses. It is used to
provide automatic transformations using an equivalency.
"""
# ↑↑↑ the above four need to be set by subclasses
# have priority over arrays, regular units, and regular quantities
__array_priority__ = 30000
def __init__(self, physical_unit=None, function_unit=None):
if physical_unit is None:
physical_unit = dimensionless_unscaled
else:
physical_unit = Unit(physical_unit)
if not isinstance(physical_unit, UnitBase) or physical_unit.is_equivalent(
self._default_function_unit
):
raise UnitConversionError(f"{physical_unit} is not a physical unit.")
if function_unit is None:
function_unit = self._default_function_unit
else:
# any function unit should be equivalent to subclass default
function_unit = Unit(getattr(function_unit, "function_unit", function_unit))
if not function_unit.is_equivalent(self._default_function_unit):
raise UnitConversionError(
f"Cannot initialize '{self.__class__.__name__}' instance with "
f"function unit '{function_unit}', as it is not equivalent to "
f"default function unit '{self._default_function_unit}'."
)
self._physical_unit = physical_unit
self._function_unit = function_unit
def _copy(self, physical_unit=None):
"""Copy oneself, possibly with a different physical unit."""
if physical_unit is None:
physical_unit = self.physical_unit
return self.__class__(physical_unit, self.function_unit)
@property
def physical_unit(self):
return self._physical_unit
@property
def function_unit(self):
return self._function_unit
@property
def equivalencies(self):
"""List of equivalencies between function and physical units.
Uses the `from_physical` and `to_physical` methods.
"""
return [(self, self.physical_unit, self.to_physical, self.from_physical)]
# ↓↓↓ properties/methods required to behave like a unit
def decompose(self, bases=set()):
"""Copy the current unit with the physical unit decomposed.
For details, see `~astropy.units.UnitBase.decompose`.
"""
return self._copy(self.physical_unit.decompose(bases))
@property
def si(self):
"""Copy the current function unit with the physical unit in SI."""
return self._copy(self.physical_unit.si)
@property
def cgs(self):
"""Copy the current function unit with the physical unit in CGS."""
return self._copy(self.physical_unit.cgs)
def _get_physical_type_id(self):
"""Get physical type corresponding to physical unit."""
return self.physical_unit._get_physical_type_id()
@property
def physical_type(self):
"""Return the physical type of the physical unit (e.g., 'length')."""
return self.physical_unit.physical_type
def is_equivalent(self, other, equivalencies=[]):
"""
Returns `True` if this unit is equivalent to ``other``.
Parameters
----------
other : `~astropy.units.Unit`, string, or tuple
The unit to convert to. If a tuple of units is specified, this
method returns true if the unit matches any of those in the tuple.
equivalencies : list of tuple
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`astropy:unit_equivalencies`.
This list is in addition to the built-in equivalencies between the
function unit and the physical one, as well as possible global
defaults set by, e.g., `~astropy.units.set_enabled_equivalencies`.
Use `None` to turn off any global equivalencies.
Returns
-------
bool
"""
if isinstance(other, tuple):
return any(self.is_equivalent(u, equivalencies) for u in other)
other_physical_unit = getattr(
other,
"physical_unit",
(
dimensionless_unscaled
if self.function_unit.is_equivalent(other)
else other
),
)
return self.physical_unit.is_equivalent(other_physical_unit, equivalencies)
def to(self, other, value=1.0, equivalencies=[]):
"""
Return the converted values in the specified unit.
Parameters
----------
other : `~astropy.units.Unit`, `~astropy.units.FunctionUnitBase`, or str
The unit to convert to.
value : int, float, or scalar array-like, optional
Value(s) in the current unit to be converted to the specified unit.
If not provided, defaults to 1.0.
equivalencies : list of tuple
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`astropy:unit_equivalencies`.
This list is in meant to treat only equivalencies between different
physical units; the built-in equivalency between the function
unit and the physical one is automatically taken into account.
Returns
-------
values : scalar or array
Converted value(s). Input value sequences are returned as
numpy arrays.
Raises
------
`~astropy.units.UnitsError`
If units are inconsistent.
"""
# conversion to one's own physical unit should be fastest
if other is self.physical_unit:
return self.to_physical(value)
other_function_unit = getattr(other, "function_unit", other)
if self.function_unit.is_equivalent(other_function_unit):
# when other is an equivalent function unit:
# first convert physical units to other's physical units
other_physical_unit = getattr(
other, "physical_unit", dimensionless_unscaled
)
if self.physical_unit != other_physical_unit:
value_other_physical = self.physical_unit.to(
other_physical_unit, self.to_physical(value), equivalencies
)
# make function unit again, in own system
value = self.from_physical(value_other_physical)
# convert possible difference in function unit (e.g., dex->dB)
return self.function_unit.to(other_function_unit, value)
else:
try:
# when other is not a function unit
return self.physical_unit.to(
other, self.to_physical(value), equivalencies
)
except UnitConversionError as e:
if self.function_unit == Unit("mag"):
# One can get to raw magnitudes via math that strips the dimensions off.
# Include extra information in the exception to remind users of this.
msg = "Did you perhaps subtract magnitudes so the unit got lost?"
e.args += (msg,)
raise e
else:
raise
def is_unity(self):
return False
def __eq__(self, other):
return self.physical_unit == getattr(
other, "physical_unit", dimensionless_unscaled
) and self.function_unit == getattr(other, "function_unit", other)
def __ne__(self, other):
return not self.__eq__(other)
def __rlshift__(self, other):
"""Unit conversion operator ``<<``."""
try:
return self._quantity_class(other, self, copy=False, subok=True)
except Exception:
return NotImplemented
def __mul__(self, other):
if isinstance(other, (str, UnitBase, FunctionUnitBase)):
if self.physical_unit == dimensionless_unscaled:
# If dimensionless, drop back to normal unit and retry.
return self.function_unit * other
else:
raise UnitsError(
"Cannot multiply a function unit with a physical dimension "
"with any unit."
)
else:
# Anything not like a unit, try initialising as a function quantity.
try:
return self._quantity_class(other, unit=self)
except Exception:
return NotImplemented
def __rmul__(self, other):
return self.__mul__(other)
def __truediv__(self, other):
if isinstance(other, (str, UnitBase, FunctionUnitBase)):
if self.physical_unit == dimensionless_unscaled:
# If dimensionless, drop back to normal unit and retry.
return self.function_unit / other
else:
raise UnitsError(
"Cannot divide a function unit with a physical dimension "
"by any unit."
)
else:
# Anything not like a unit, try initialising as a function quantity.
try:
return self._quantity_class(1.0 / other, unit=self)
except Exception:
return NotImplemented
def __rtruediv__(self, other):
if isinstance(other, (str, UnitBase, FunctionUnitBase)):
if self.physical_unit == dimensionless_unscaled:
# If dimensionless, drop back to normal unit and retry.
return other / self.function_unit
else:
raise UnitsError(
"Cannot divide a function unit with a physical dimension "
"into any unit"
)
else:
# Don't know what to do with anything not like a unit.
return NotImplemented
def __pow__(self, power):
if power == 0:
return dimensionless_unscaled
elif power == 1:
return self._copy()
if self.physical_unit == dimensionless_unscaled:
return self.function_unit**power
raise UnitsError(
"Cannot raise a function unit with a physical dimension "
"to any power but 0 or 1."
)
def __pos__(self):
return self._copy()
def to_string(self, format="generic", **kwargs):
"""
Output the unit in the given format as a string.
The physical unit is appended, within parentheses, to the function
unit, as in "dB(mW)", with both units set using the given format
Parameters
----------
format : `astropy.units.format.Base` instance or str
The name of a format or a formatter object. If not
provided, defaults to the generic format.
"""
supported_formats = (
"generic",
"unscaled",
"latex",
"latex_inline",
"unicode",
"console",
)
if format not in supported_formats:
raise ValueError(
f"Function units cannot be written in {format} "
f"format. Only {', '.join(supported_formats)} are supported."
)
self_str = self.function_unit.to_string(format, **kwargs)
pu_str = self.physical_unit.to_string(format, **kwargs)
if pu_str == "":
pu_str = "1"
if format.startswith("latex"):
# need to strip leading and trailing "$"
self_str += rf"$\mathrm{{\left( {pu_str[1:-1]} \right)}}$"
else:
pu_lines = pu_str.splitlines()
if len(pu_lines) == 1:
self_str += f"({pu_str})"
else:
# If the physical unit is formatted into a multiline
# string, the lines need to be adjusted so that the
# functional string is aligned with the fraction line
# (second one), and all other lines are indented
# accordingly.
f = f"{{0:^{len(self_str)+1}s}}{{1:s}}"
lines = [
f.format("", pu_lines[0]),
f.format(f"{self_str}(", f"{pu_lines[1]})"),
] + [f.format("", line) for line in pu_lines[2:]]
self_str = "\n".join(lines)
return self_str
def __format__(self, format_spec):
"""Try to format units using a formatter."""
try:
return self.to_string(format=format_spec)
except ValueError:
return format(str(self), format_spec)
def __str__(self):
"""Return string representation for unit."""
self_str = str(self.function_unit)
pu_str = str(self.physical_unit)
if pu_str:
self_str += f"({pu_str})"
return self_str
def __repr__(self):
# By default, try to give a representation using `Unit(<string>)`,
# with string such that parsing it would give the correct FunctionUnit.
if callable(self.function_unit):
return f'Unit("{self.to_string()}")'
else:
return '{}("{}"{})'.format(
self.__class__.__name__,
self.physical_unit,
""
if self.function_unit is self._default_function_unit
else f', unit="{self.function_unit}"',
)
def _repr_latex_(self):
"""
Generate latex representation of unit name. This is used by
the IPython notebook to print a unit with a nice layout.
Returns
-------
Latex string
"""
return self.to_string("latex")
def __hash__(self):
return hash((self.function_unit, self.physical_unit))
class FunctionQuantity(Quantity):
"""A representation of a (scaled) function of a number with a unit.
Function quantities are quantities whose units are functions containing a
physical unit, such as dB(mW). Most of the arithmetic operations on
function quantities are defined in this base class.
While instantiation is also defined here, this class should not be
instantiated directly. Rather, subclasses should be made which have
``_unit_class`` pointing back to the corresponding function unit class.
Parameters
----------
value : number, quantity-like, or sequence thereof
The numerical value of the function quantity. If a number or
a `~astropy.units.Quantity` with a function unit, it will be converted
to ``unit`` and the physical unit will be inferred from ``unit``.
If a `~astropy.units.Quantity` with just a physical unit, it will
converted to the function unit, after, if necessary, converting it to
the physical unit inferred from ``unit``.
unit : str, `~astropy.units.UnitBase`, or `~astropy.units.FunctionUnitBase`, optional
For an `~astropy.units.FunctionUnitBase` instance, the
physical unit will be taken from it; for other input, it will be
inferred from ``value``. By default, ``unit`` is set by the subclass.
dtype : `~numpy.dtype`, optional
The dtype of the resulting Numpy array or scalar that will
hold the value. If not provided, it is determined from the input,
except that any input that cannot represent float (integer and bool)
is converted to float.
copy : bool, optional
If `True` (default), then the value is copied. Otherwise, a copy will
only be made if ``__array__`` returns a copy, if value is a nested
sequence, or if a copy is needed to satisfy an explicitly given
``dtype``. (The `False` option is intended mostly for internal use,
to speed up initialization where a copy is known to have been made.
Use with care.)
order : {'C', 'F', 'A'}, optional
Specify the order of the array. As in `~numpy.array`. Ignored
if the input does not need to be converted and ``copy=False``.
subok : bool, optional
If `False` (default), the returned array will be forced to be of the
class used. Otherwise, subclasses will be passed through.
ndmin : int, optional
Specifies the minimum number of dimensions that the resulting array
should have. Ones will be prepended to the shape as needed to meet
this requirement. This parameter is ignored if the input is a
`~astropy.units.Quantity` and ``copy=False``.
Raises
------
TypeError
If the value provided is not a Python numeric type.
TypeError
If the unit provided is not a `~astropy.units.FunctionUnitBase`
or `~astropy.units.Unit` object, or a parseable string unit.
"""
_unit_class = None
"""Default `~astropy.units.FunctionUnitBase` subclass.
This should be overridden by subclasses.
"""
# Ensure priority over ndarray, regular Unit & Quantity, and FunctionUnit.
__array_priority__ = 40000
# Define functions that work on FunctionQuantity.
_supported_ufuncs = SUPPORTED_UFUNCS
_supported_functions = SUPPORTED_FUNCTIONS
def __new__(
cls,
value,
unit=None,
dtype=np.inexact,
copy=True,
order=None,
subok=False,
ndmin=0,
):
if unit is not None:
# Convert possible string input to a (function) unit.
unit = Unit(unit)
if not isinstance(unit, FunctionUnitBase):
# By default, use value's physical unit.
value_unit = getattr(value, "unit", None)
if value_unit is None:
# if iterable, see if first item has a unit
# (mixed lists fail in super call below).
try:
value_unit = value[0].unit
except Exception:
pass
physical_unit = getattr(value_unit, "physical_unit", value_unit)
unit = cls._unit_class(physical_unit, function_unit=unit)
# initialise!
return super().__new__(
cls,
value,
unit,
dtype=dtype,
copy=copy,
order=order,
subok=subok,
ndmin=ndmin,
)
# ↓↓↓ properties not found in Quantity
@property
def physical(self):
"""The physical quantity corresponding the function one."""
return self.to(self.unit.physical_unit)
@property
def _function_view(self):
"""View as Quantity with function unit, dropping the physical unit.
Use `~astropy.units.quantity.Quantity.value` for just the value.
"""
return self._new_view(unit=self.unit.function_unit)
# ↓↓↓ methods overridden to change the behavior
@property
def si(self):
"""Return a copy with the physical unit in SI units."""
return self.__class__(self.physical.si)
@property
def cgs(self):
"""Return a copy with the physical unit in CGS units."""
return self.__class__(self.physical.cgs)
def decompose(self, bases=[]):
"""Generate a new instance with the physical unit decomposed.
For details, see `~astropy.units.Quantity.decompose`.
"""
return self.__class__(self.physical.decompose(bases))
# ↓↓↓ methods overridden to add additional behavior
def __quantity_subclass__(self, unit):
if isinstance(unit, FunctionUnitBase):
return self.__class__, True
else:
return super().__quantity_subclass__(unit)[0], False
def _set_unit(self, unit):
if not isinstance(unit, self._unit_class):
# Have to take care of, e.g., (10*u.mag).view(u.Magnitude)
try:
# "or 'nonsense'" ensures `None` breaks, just in case.
unit = self._unit_class(function_unit=unit or "nonsense")
except Exception:
raise UnitTypeError(
f"{type(self).__name__} instances require"
f" {self._unit_class.__name__} function units, so cannot set it to"
f" '{unit}'."
)
self._unit = unit
def __array_ufunc__(self, function, method, *inputs, **kwargs):
# TODO: it would be more logical to have this in Quantity already,
# instead of in UFUNC_HELPERS, where it cannot be overridden.
# And really it should just return NotImplemented, since possibly
# another argument might know what to do.
if function not in self._supported_ufuncs:
raise UnitTypeError(
f"Cannot use ufunc '{function.__name__}' with function quantities"
)
return super().__array_ufunc__(function, method, *inputs, **kwargs)
def _maybe_new_view(self, result):
"""View as function quantity if the unit is unchanged.
Used for the case that self.unit.physical_unit is dimensionless,
where multiplication and division is done using the Quantity
equivalent, to transform them back to a FunctionQuantity if possible.
"""
if isinstance(result, Quantity) and result.unit == self.unit:
return self._new_view(result)
else:
return result
# ↓↓↓ methods overridden to change behavior
def __mul__(self, other):
if self.unit.physical_unit == dimensionless_unscaled:
return self._maybe_new_view(self._function_view * other)
raise UnitTypeError(
"Cannot multiply function quantities which are not dimensionless "
"with anything."
)
def __truediv__(self, other):
if self.unit.physical_unit == dimensionless_unscaled:
return self._maybe_new_view(self._function_view / other)
raise UnitTypeError(
"Cannot divide function quantities which are not dimensionless by anything."
)
def __rtruediv__(self, other):
if self.unit.physical_unit == dimensionless_unscaled:
return self._maybe_new_view(self._function_view.__rtruediv__(other))
raise UnitTypeError(
"Cannot divide function quantities which are not dimensionless "
"into anything."
)
def _comparison(self, other, comparison_func):
"""Do a comparison between self and other, raising UnitsError when
other cannot be converted to self because it has different physical
unit, and returning NotImplemented when there are other errors.
"""
try:
# will raise a UnitsError if physical units not equivalent
other_in_own_unit = self._to_own_unit(other, check_precision=False)
except UnitsError as exc:
if self.unit.physical_unit != dimensionless_unscaled:
raise exc
try:
other_in_own_unit = self._function_view._to_own_unit(
other, check_precision=False
)
except Exception:
raise exc
except Exception:
return NotImplemented
return comparison_func(other_in_own_unit)
def __eq__(self, other):
try:
return self._comparison(other, self.value.__eq__)
except UnitsError:
return False
def __ne__(self, other):
try:
return self._comparison(other, self.value.__ne__)
except UnitsError:
return True
def __gt__(self, other):
return self._comparison(other, self.value.__gt__)
def __ge__(self, other):
return self._comparison(other, self.value.__ge__)
def __lt__(self, other):
return self._comparison(other, self.value.__lt__)
def __le__(self, other):
return self._comparison(other, self.value.__le__)
def __lshift__(self, other):
"""Unit conversion operator `<<`."""
try:
other = Unit(other, parse_strict="silent")
except UnitTypeError:
return NotImplemented
return self.__class__(self, other, copy=False, subok=True)
# Ensure Quantity methods are used only if they make sense.
def _wrap_function(self, function, *args, **kwargs):
if function in self._supported_functions:
return super()._wrap_function(function, *args, **kwargs)
# For dimensionless, we can convert to regular quantities.
if all(
arg.unit.physical_unit == dimensionless_unscaled
for arg in (self,) + args
if (hasattr(arg, "unit") and hasattr(arg.unit, "physical_unit"))
):
args = tuple(getattr(arg, "_function_view", arg) for arg in args)
return self._function_view._wrap_function(function, *args, **kwargs)
raise TypeError(
f"Cannot use method that uses function '{function.__name__}' with "
"function quantities that are not dimensionless."
)
# Override functions that are supported but do not use _wrap_function
# in Quantity.
def max(self, axis=None, out=None, keepdims=False):
return self._wrap_function(np.max, axis, out=out, keepdims=keepdims)
def min(self, axis=None, out=None, keepdims=False):
return self._wrap_function(np.min, axis, out=out, keepdims=keepdims)
def sum(self, axis=None, dtype=None, out=None, keepdims=False):
return self._wrap_function(np.sum, axis, dtype, out=out, keepdims=keepdims)
def cumsum(self, axis=None, dtype=None, out=None):
return self._wrap_function(np.cumsum, axis, dtype, out=out)
def clip(self, a_min, a_max, out=None):
return self._wrap_function(
np.clip, self._to_own_unit(a_min), self._to_own_unit(a_max), out=out
)
|
3425be4498b58bedb29338d8ddc1db276fcff5a6028019f08a7c77fc85a1f511 | import os
from urllib.error import HTTPError
import numpy as np
import pytest
from astropy import units as u
from astropy.constants import c
from astropy.coordinates.builtin_frames import TETE
from astropy.coordinates.earth import EarthLocation
from astropy.coordinates.funcs import get_sun
from astropy.coordinates.representation import (
CartesianRepresentation,
UnitSphericalRepresentation,
)
from astropy.coordinates.sky_coordinate import SkyCoord
from astropy.coordinates.solar_system import (
BODY_NAME_TO_KERNEL_SPEC,
_get_apparent_body_position,
get_body,
get_body_barycentric,
get_body_barycentric_posvel,
get_moon,
solar_system_ephemeris,
)
from astropy.tests.helper import assert_quantity_allclose
from astropy.time import Time
from astropy.units import allclose as quantity_allclose
from astropy.utils.compat.optional_deps import HAS_JPLEPHEM, HAS_SKYFIELD
from astropy.utils.data import download_file, get_pkg_data_filename
from astropy.utils.exceptions import AstropyDeprecationWarning
if HAS_SKYFIELD:
from skyfield.api import Loader
KITT_PEAK = EarthLocation.from_geodetic(
lon=-111.6 * u.deg, lat=31.963333333333342 * u.deg, height=2120 * u.m
)
@pytest.fixture(scope="module")
def skyfield_ephemeris(tmp_path_factory):
load = Loader(tmp_path_factory.mktemp("skyfield_ephemeris"))
try:
planets = load("de421.bsp")
ts = load.timescale()
except OSError as e:
if os.environ.get("CI", False) and "timed out" in str(e):
pytest.xfail("Timed out in CI")
else:
raise
yield planets, ts
planets.close()
@pytest.fixture(scope="module")
def horizons_ephemeris():
"""
Test positions generated by JPL Horizons accessed on
2016-03-28, with refraction turned on.
"""
geocentric_apparent_frame = TETE(obstime=Time("1980-03-25 00:00"))
t = Time("2014-09-25T00:00", location=KITT_PEAK)
kitt_peak_apparent_frame = TETE(obstime=t, location=t.location)
return {
"geocentric": {
"mercury": SkyCoord(
ra="22h41m47.78s",
dec="-08d29m32.0s",
distance=c * 6.323037 * u.min,
frame=geocentric_apparent_frame,
),
"moon": SkyCoord(
ra="07h32m02.62s",
dec="+18d34m05.0s",
distance=c * 0.021921 * u.min,
frame=geocentric_apparent_frame,
),
"jupiter": SkyCoord(
ra="10h17m12.82s",
dec="+12d02m57.0s",
distance=c * 37.694557 * u.min,
frame=geocentric_apparent_frame,
),
"sun": SkyCoord(
ra="00h16m31.00s",
dec="+01d47m16.9s",
distance=c * 8.294858 * u.min,
frame=geocentric_apparent_frame,
),
},
"kitt_peak": {
"mercury": SkyCoord(
ra="13h38m58.50s",
dec="-13d34m42.6s",
distance=c * 7.699020 * u.min,
frame=kitt_peak_apparent_frame,
),
"moon": SkyCoord(
ra="12h33m12.85s",
dec="-05d17m54.4s",
distance=c * 0.022054 * u.min,
frame=kitt_peak_apparent_frame,
),
"jupiter": SkyCoord(
ra="09h09m55.55s",
dec="+16d51m57.8s",
distance=c * 49.244937 * u.min,
frame=kitt_peak_apparent_frame,
),
},
}
@pytest.mark.remote_data
@pytest.mark.skipif(not HAS_SKYFIELD, reason="requires skyfield")
@pytest.mark.parametrize("body", ["mercury", "jupiter barycenter", "moon"])
def test_positions_skyfield(body, skyfield_ephemeris):
"""
Test positions against those generated by skyfield.
"""
planets, ts = skyfield_ephemeris
t = Time("1980-03-25 00:00")
frame = TETE(obstime=t)
skyfield_t = ts.from_astropy(t)
skyfield_coords = planets["earth"].at(skyfield_t).observe(planets[body]).apparent()
ra, dec, dist = skyfield_coords.radec(epoch="date")
skyfield_coords = SkyCoord(
ra.to(u.deg), dec.to(u.deg), distance=dist.to(u.km), frame=frame
)
# planet positions w.r.t true equator and equinox
astropy_coords = get_body(
"jupiter" if body == "jupiter barycenter" else body, time=t, ephemeris="de430"
).transform_to(frame)
assert astropy_coords.separation(skyfield_coords) < 1 * u.arcsec
assert astropy_coords.separation_3d(skyfield_coords) < 10 * u.km
@pytest.mark.parametrize(
("body", "sep_tol", "dist_tol", "location"),
(
("mercury", 7.0 * u.arcsec, 1000 * u.km, "geocentric"),
("jupiter", 78.0 * u.arcsec, 76000 * u.km, "geocentric"),
("moon", 20.0 * u.arcsec, 80 * u.km, "geocentric"),
("sun", 5.0 * u.arcsec, 11.0 * u.km, "geocentric"),
("mercury", 7.0 * u.arcsec, 500 * u.km, "kitt_peak"),
("jupiter", 78.0 * u.arcsec, 82000 * u.km, "kitt_peak"),
),
)
def test_erfa_planet(body, sep_tol, dist_tol, location, horizons_ephemeris):
"""Test predictions using erfa/plan94.
Accuracies are maximum deviations listed in erfa/plan94.c, for Jupiter and
Mercury, and that quoted in Meeus "Astronomical Algorithms" (1998) for the Moon.
"""
if location == "kitt_peak":
# Add uncertainty in position of Earth
dist_tol += 1300 * u.km
horizons = horizons_ephemeris[location][body]
astropy = get_body(body, horizons.frame.obstime, ephemeris="builtin").transform_to(
horizons.frame
)
assert astropy.separation(horizons) < sep_tol
assert_quantity_allclose(astropy.distance, horizons.distance, atol=dist_tol)
@pytest.mark.remote_data
@pytest.mark.skipif(not HAS_JPLEPHEM, reason="requires jplephem")
@pytest.mark.parametrize(
"body,location",
(
("mercury", "geocentric"),
("jupiter", "geocentric"),
("sun", "geocentric"),
("moon", "geocentric"),
("mercury", "kitt_peak"),
("jupiter", "kitt_peak"),
("moon", "kitt_peak"),
),
)
def test_de432s_planet(body, location, horizons_ephemeris):
horizons = horizons_ephemeris[location][body]
astropy = get_body(body, horizons.frame.obstime, ephemeris="de432s").transform_to(
horizons.frame
)
assert astropy.separation(horizons) < 5 * u.arcsec
assert_quantity_allclose(astropy.distance, horizons.distance, atol=20 * u.km)
@pytest.mark.remote_data
@pytest.mark.skipif(not HAS_JPLEPHEM, reason="requires jplephem")
@pytest.mark.parametrize("bodyname", ("mercury", "jupiter"))
def test_custom_kernel_spec_body(bodyname):
"""
Checks that giving a kernel specifier instead of a body name works
"""
t = Time("2014-09-25T00:00", location=KITT_PEAK)
coord_by_name = get_body(bodyname, t, ephemeris="de432s")
coord_by_kspec = get_body(BODY_NAME_TO_KERNEL_SPEC[bodyname], t, ephemeris="de432s")
assert_quantity_allclose(coord_by_name.ra, coord_by_kspec.ra)
assert_quantity_allclose(coord_by_name.dec, coord_by_kspec.dec)
assert_quantity_allclose(coord_by_name.distance, coord_by_kspec.distance)
@pytest.mark.remote_data
@pytest.mark.skipif(not HAS_JPLEPHEM, reason="requires jplephem")
def test_horizons_consistency_with_precision():
"""
A test to compare at high precision against output of JPL horizons.
Tests ephemerides, and conversions from ICRS to GCRS to TETE. We are aiming for
better than 2 milli-arcsecond precision.
We use the Moon since it is nearby, and moves fast in the sky so we are
testing for parallax, proper handling of light deflection and aberration.
"""
moon_data = np.loadtxt(get_pkg_data_filename("data/jpl_moon.dat"))
loc = EarthLocation.from_geodetic(
-67.787260 * u.deg, -22.959748 * u.deg, 5186 * u.m
)
times = Time("2020-04-06 00:00") + np.arange(0, 24, 1) * u.hour
apparent_frame = TETE(obstime=times, location=loc)
with solar_system_ephemeris.set("de430"):
astropy = get_body("moon", times, loc).transform_to(apparent_frame)
# JPL Horizons has a known offset (frame bias) of 51.02 mas in RA.
usrepr = UnitSphericalRepresentation(
moon_data[:, 0] * u.deg + 51.02376467 * u.mas, moon_data[:, 1] * u.deg
)
horizons = apparent_frame.realize_frame(usrepr)
assert_quantity_allclose(astropy.separation(horizons), 0 * u.mas, atol=1.5 * u.mas)
@pytest.mark.remote_data
@pytest.mark.skipif(not HAS_JPLEPHEM, reason="requires jplephem")
@pytest.mark.parametrize(
"time",
(Time("1960-01-12 00:00"), Time("1980-03-25 00:00"), Time("2010-10-13 00:00")),
)
def test_get_sun_consistency(time):
"""
Test that the sun from JPL and the builtin get_sun match
"""
sun_jpl_gcrs = get_body("sun", time, ephemeris="de432s")
assert get_sun(time).separation(sun_jpl_gcrs) < 0.1 * u.arcsec
def test_get_body_nonscalar_regression():
"""
Test that the builtin ephemeris works with non-scalar times.
See Issue #5069.
"""
times = Time(["2015-08-28 03:30", "2015-09-05 10:30"])
# the following line will raise an Exception if the bug recurs.
get_body("moon", times, ephemeris="builtin")
def test_barycentric_pos_posvel_same():
# Check that the two routines give identical results.
ep1 = get_body_barycentric("earth", Time("2016-03-20T12:30:00"))
ep2, _ = get_body_barycentric_posvel("earth", Time("2016-03-20T12:30:00"))
np.testing.assert_array_equal(ep1.xyz, ep2.xyz)
def test_earth_barycentric_velocity_rough():
# Check that a time near the equinox gives roughly the right result.
ep, ev = get_body_barycentric_posvel("earth", Time("2016-03-20T12:30:00"))
assert_quantity_allclose(ep.xyz, [-1.0, 0.0, 0.0] * u.AU, atol=0.01 * u.AU)
expected = (
u.Quantity([0.0 * u.one, np.cos(23.5 * u.deg), np.sin(23.5 * u.deg)])
* -30.0
* u.km
/ u.s
)
assert_quantity_allclose(ev.xyz, expected, atol=1.0 * u.km / u.s)
def test_earth_barycentric_velocity_multi_d():
# Might as well test it with a multidimensional array too.
t = Time("2016-03-20T12:30:00") + np.arange(8.0).reshape(2, 2, 2) * u.yr / 2.0
ep, ev = get_body_barycentric_posvel("earth", t)
# note: assert_quantity_allclose doesn't like the shape mismatch.
# this is a problem with np.testing.assert_allclose.
assert quantity_allclose(
ep.get_xyz(xyz_axis=-1),
[[-1.0, 0.0, 0.0], [+1.0, 0.0, 0.0]] * u.AU,
atol=0.06 * u.AU,
)
expected = u.Quantity([0.0 * u.one, np.cos(23.5 * u.deg), np.sin(23.5 * u.deg)]) * (
[[-30.0], [30.0]] * u.km / u.s
)
assert quantity_allclose(ev.get_xyz(xyz_axis=-1), expected, atol=2.0 * u.km / u.s)
@pytest.mark.remote_data
@pytest.mark.skipif(not HAS_JPLEPHEM, reason="requires jplephem")
@pytest.mark.parametrize(
("body", "pos_tol", "vel_tol"),
(
pytest.param("mercury", 1000.0 * u.km, 1.0 * u.km / u.s, id="mercury"),
pytest.param("jupiter", 100000.0 * u.km, 2.0 * u.km / u.s, id="jupiter"),
pytest.param("earth", 10 * u.km, 10 * u.mm / u.s, id="earth"),
pytest.param("moon", 18 * u.km, 50 * u.mm / u.s, id="moon"),
),
)
def test_barycentric_velocity_consistency(body, pos_tol, vel_tol):
# Tolerances are about 1.5 times the rms listed for plan94 and epv00,
# except for Mercury (which nominally is 334 km rms), and the Moon
# (which nominally is 6 km rms).
t = Time("2016-03-20T12:30:00")
ep, ev = get_body_barycentric_posvel(body, t, ephemeris="builtin")
dp, dv = get_body_barycentric_posvel(body, t, ephemeris="de432s")
assert_quantity_allclose(ep.xyz, dp.xyz, atol=pos_tol)
assert_quantity_allclose(ev.xyz, dv.xyz, atol=vel_tol)
# Might as well test it with a multidimensional array too.
t = Time("2016-03-20T12:30:00") + np.arange(8.0).reshape(2, 2, 2) * u.yr / 2.0
ep, ev = get_body_barycentric_posvel(body, t, ephemeris="builtin")
dp, dv = get_body_barycentric_posvel(body, t, ephemeris="de432s")
assert_quantity_allclose(ep.xyz, dp.xyz, atol=pos_tol)
assert_quantity_allclose(ev.xyz, dv.xyz, atol=vel_tol)
@pytest.mark.remote_data
@pytest.mark.skipif(not HAS_JPLEPHEM, reason="requires jplephem")
@pytest.mark.parametrize(
"time",
(Time("1960-01-12 00:00"), Time("1980-03-25 00:00"), Time("2010-10-13 00:00")),
)
def test_url_or_file_ephemeris(time):
# URL for ephemeris de432s used for testing:
url = "http://naif.jpl.nasa.gov/pub/naif/generic_kernels/spk/planets/de432s.bsp"
# Pass the ephemeris directly as a URL.
coord_by_url = get_body("earth", time, ephemeris=url)
# Translate the URL to the cached location on the filesystem.
# Since we just used the url above, it should already have been downloaded.
filepath = download_file(url, cache=True)
# Get the coordinates using the file path directly:
coord_by_filepath = get_body("earth", time, ephemeris=filepath)
# Using the URL or filepath should give exactly the same results:
np.testing.assert_array_equal(coord_by_url.ra, coord_by_filepath.ra)
np.testing.assert_array_equal(coord_by_url.dec, coord_by_filepath.dec)
np.testing.assert_array_equal(coord_by_url.distance, coord_by_filepath.distance)
@pytest.mark.skipif(not HAS_JPLEPHEM, reason="requires jplephem")
def test_ephemeris_non_existing_url(monkeypatch):
def request_invalid_url(*args, **kwargs):
raise HTTPError(code=404, msg="Not Found", fp=None, hdrs=None, url="")
monkeypatch.setattr("urllib.request.OpenerDirector.open", request_invalid_url)
with pytest.raises(HTTPError, match="^HTTP Error 404: Not Found$"):
get_body(
"earth",
time=Time("1960-01-12 00:00"),
ephemeris="https://www.astropy.org/path/to/nonexisting/file.bsp",
)
@pytest.mark.skipif(not HAS_JPLEPHEM, reason="requires jplephem")
@pytest.mark.parametrize(
"ephemeris,expected_error",
[
pytest.param(
"de001",
pytest.raises(HTTPError, match="^HTTP Error 404: Not Found$"),
marks=pytest.mark.remote_data,
id="non_existing_JPL_ephemeris_version",
),
pytest.param(
"not_an_ephemeris",
pytest.raises(ValueError, match="^Malformed URL: 'not_an_ephemeris'$"),
marks=pytest.mark.remote_data,
id="invalid_string",
),
pytest.param(
"/path/to/nonexisting/file.bsp",
pytest.raises(
ValueError, match="^Malformed URL: '/path/to/nonexisting/file.bsp'$"
),
id="missing_local_file",
),
],
)
def test_ephemeris_wrong_input(ephemeris, expected_error):
with expected_error:
get_body("earth", Time("1960-01-12 00:00"), ephemeris=ephemeris)
@pytest.mark.skipif(not HAS_JPLEPHEM, reason="requires jplephem")
def test_ephemeris_local_file_not_ephemeris():
# NOTE: This test currently leaves the file open (ResourceWarning).
# To fix this issue, an upstream fix is required in jplephem
# package.
with pytest.warns(ResourceWarning), pytest.raises(ValueError, match="^file starts"):
get_body("earth", Time("1960-01-12 00:00"), ephemeris=__file__)
def test_get_body_accounts_for_location_on_Earth():
"""Regression test for #10271"""
t = Time(58973.534052125986, format="mjd")
# GCRS position of ALMA at this time
obs_p = CartesianRepresentation(
5724535.74068625, -1311071.58985697, -2492738.93017009, u.m
)
icrs_sun_from_alma = _get_apparent_body_position("sun", t, "builtin", obs_p)
icrs_sun_from_geocentre = _get_apparent_body_position(
"sun", t, "builtin", CartesianRepresentation(0, 0, 0, u.m)
)
difference = (icrs_sun_from_alma - icrs_sun_from_geocentre).norm()
assert_quantity_allclose(difference, 0.13046941 * u.m, atol=1 * u.mm)
def test_get_moon_deprecation():
time_now = Time.now()
with pytest.warns(
AstropyDeprecationWarning, match=r'Use get_body\("moon"\) instead\.$'
):
moon = get_moon(time_now)
assert moon == get_body("moon", time_now)
|
5455177ce2562ae86e68af39ec838c553c9dc9893164c8b4203a634fd8912888 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
from astropy import units as u
from astropy.coordinates import representation as r
from astropy.coordinates import transformations as t
from astropy.coordinates.attributes import Attribute
from astropy.coordinates.baseframe import BaseCoordinateFrame, frame_transform_graph
from astropy.coordinates.builtin_frames import (
FK4,
FK5,
HCRS,
ICRS,
AltAz,
FK4NoETerms,
Galactic,
)
from astropy.coordinates.matrix_utilities import rotation_matrix
from astropy.tests.helper import assert_quantity_allclose as assert_allclose
from astropy.time import Time
from astropy.units import allclose as quantity_allclose
from astropy.utils.exceptions import AstropyWarning
CARTESIAN_POS = r.CartesianRepresentation([1, 2, 3] * u.kpc)
CARTESIAN_VEL = r.CartesianDifferential([8, 9, 10] * u.km / u.s)
CARTESIAN_POS_AND_VEL = CARTESIAN_POS.with_differentials(CARTESIAN_VEL)
RADIAL_VEL = r.RadialDifferential(1 * u.km / u.s)
SPHERICAL_COS_LAT_VEL = r.SphericalCosLatDifferential(
1 * u.mas / u.yr, 2 * u.mas / u.yr, 3 * u.km / u.s
)
SPHERICAL_POS = r.SphericalRepresentation(
lon=1 * u.deg, lat=2.0 * u.deg, distance=10 * u.pc
)
UNIT_SPHERICAL_POS = r.UnitSphericalRepresentation(lon=1 * u.deg, lat=2.0 * u.deg)
ROT_30 = rotation_matrix(30 * u.deg)
ROT_45 = rotation_matrix(45 * u.deg)
ROT_75 = rotation_matrix(75 * u.deg)
OFFSET_X = r.CartesianRepresentation([1, 0, 0])
OFFSET_Z = r.CartesianRepresentation([0, 0, 1])
OFFSET_123 = r.CartesianRepresentation([1, 2, 3])
OFFSET_456 = r.CartesianRepresentation([4, 5, 6])
OFFSET_579 = r.CartesianRepresentation([5, 7, 9])
SQRT_2 = np.sqrt(2)
# Coordinates just for these tests.
class TCoo1(ICRS):
pass
class TCoo2(ICRS):
pass
class TCoo3(ICRS):
pass
def test_transform_classes():
"""
Tests the class-based/OO syntax for creating transforms
"""
def tfun(c, f):
return f.__class__(ra=c.ra, dec=c.dec)
_ = t.FunctionTransform(tfun, TCoo1, TCoo2, register_graph=frame_transform_graph)
c1 = TCoo1(ra=1 * u.radian, dec=0.5 * u.radian)
c2 = c1.transform_to(TCoo2())
assert_allclose(c2.ra.radian, 1)
assert_allclose(c2.dec.radian, 0.5)
def matfunc(coo, fr):
return [[1, 0, 0], [0, coo.ra.degree, 0], [0, 0, 1]]
trans2 = t.DynamicMatrixTransform(matfunc, TCoo1, TCoo2)
trans2.register(frame_transform_graph)
c3 = TCoo1(ra=1 * u.deg, dec=2 * u.deg)
c4 = c3.transform_to(TCoo2())
assert_allclose(c4.ra.degree, 1)
assert_allclose(c4.ra.degree, 1)
# be sure to unregister the second one - no need for trans1 because it
# already got unregistered when trans2 was created.
trans2.unregister(frame_transform_graph)
def test_transform_decos():
"""
Tests the decorator syntax for creating transforms
"""
c1 = TCoo1(ra=1 * u.deg, dec=2 * u.deg)
@frame_transform_graph.transform(t.FunctionTransform, TCoo1, TCoo2)
def trans(coo1, f):
return TCoo2(ra=coo1.ra, dec=coo1.dec * 2)
c2 = c1.transform_to(TCoo2())
assert_allclose(c2.ra.degree, 1)
assert_allclose(c2.dec.degree, 4)
c3 = TCoo1(r.CartesianRepresentation(x=1 * u.pc, y=1 * u.pc, z=2 * u.pc))
@frame_transform_graph.transform(t.StaticMatrixTransform, TCoo1, TCoo2)
def matrix():
return [[2, 0, 0], [0, 1, 0], [0, 0, 1]]
c4 = c3.transform_to(TCoo2())
assert_allclose(c4.cartesian.x, 2 * u.pc)
assert_allclose(c4.cartesian.y, 1 * u.pc)
assert_allclose(c4.cartesian.z, 2 * u.pc)
def test_shortest_path():
class FakeTransform:
def __init__(self, pri):
self.priority = pri
g = t.TransformGraph()
# cheating by adding graph elements directly that are not classes - the
# graphing algorithm still works fine with integers - it just isn't a valid
# TransformGraph
# the graph looks is a down-going diamond graph with the lower-right slightly
# heavier and a cycle from the bottom to the top
# also, a pair of nodes isolated from 1
g._graph[1][2] = FakeTransform(1)
g._graph[1][3] = FakeTransform(1)
g._graph[2][4] = FakeTransform(1)
g._graph[3][4] = FakeTransform(2)
g._graph[4][1] = FakeTransform(5)
g._graph[5][6] = FakeTransform(1)
path, d = g.find_shortest_path(1, 2)
assert path == [1, 2]
assert d == 1
path, d = g.find_shortest_path(1, 3)
assert path == [1, 3]
assert d == 1
path, d = g.find_shortest_path(1, 4)
print("Cached paths:", g._shortestpaths)
assert path == [1, 2, 4]
assert d == 2
# unreachable
path, d = g.find_shortest_path(1, 5)
assert path is None
assert d == float("inf")
path, d = g.find_shortest_path(5, 6)
assert path == [5, 6]
assert d == 1
def test_sphere_cart():
"""
Tests the spherical <-> cartesian transform functions
"""
from astropy.coordinates import cartesian_to_spherical, spherical_to_cartesian
from astropy.utils import NumpyRNGContext
x, y, z = spherical_to_cartesian(1, 0, 0)
assert_allclose(x, 1)
assert_allclose(y, 0)
assert_allclose(z, 0)
x, y, z = spherical_to_cartesian(0, 1, 1)
assert_allclose(x, 0)
assert_allclose(y, 0)
assert_allclose(z, 0)
x, y, z = spherical_to_cartesian(5, 0, np.arcsin(4.0 / 5.0))
assert_allclose(x, 3)
assert_allclose(y, 4)
assert_allclose(z, 0)
r, lat, lon = cartesian_to_spherical(0, 1, 0)
assert_allclose(r, 1)
assert_allclose(lat, 0 * u.deg)
assert_allclose(lon, np.pi / 2 * u.rad)
# test round-tripping
with NumpyRNGContext(13579):
x, y, z = np.random.randn(3, 5)
x2, y2, z2 = spherical_to_cartesian(*cartesian_to_spherical(x, y, z))
assert_allclose(x, x2)
assert_allclose(y, y2)
assert_allclose(z, z2)
def test_transform_path_pri():
"""
This checks that the transformation path prioritization works by
making sure the ICRS -> Gal transformation always goes through FK5
and not FK4.
"""
frame_transform_graph.invalidate_cache()
tpath, td = frame_transform_graph.find_shortest_path(ICRS, Galactic)
assert tpath == [ICRS, FK5, Galactic]
assert td == 2
# but direct from FK4 to Galactic should still be possible
tpath, td = frame_transform_graph.find_shortest_path(FK4, Galactic)
assert tpath == [FK4, FK4NoETerms, Galactic]
assert td == 2
def test_obstime():
"""
Checks to make sure observation time is
accounted for at least in FK4 <-> ICRS transformations
"""
b1950 = Time("B1950")
j1975 = Time("J1975")
fk4_50 = FK4(ra=1 * u.deg, dec=2 * u.deg, obstime=b1950)
fk4_75 = FK4(ra=1 * u.deg, dec=2 * u.deg, obstime=j1975)
icrs_50 = fk4_50.transform_to(ICRS())
icrs_75 = fk4_75.transform_to(ICRS())
# now check that the resulting coordinates are *different* - they should be,
# because the obstime is different
assert icrs_50.ra.degree != icrs_75.ra.degree
assert icrs_50.dec.degree != icrs_75.dec.degree
# ------------------------------------------------------------------------------
# Affine transform tests and helpers:
# just acting as a namespace
class transfunc:
rep = r.CartesianRepresentation(np.arange(3) * u.pc)
dif = r.CartesianDifferential(*np.arange(3, 6) * u.pc / u.Myr)
rep0 = r.CartesianRepresentation(np.zeros(3) * u.pc)
@classmethod
def both(cls, coo, fr):
# exchange x <-> z and offset
M = np.array([[0.0, 0.0, 1.0], [0.0, 1.0, 0.0], [1.0, 0.0, 0.0]])
return M, cls.rep.with_differentials(cls.dif)
@classmethod
def just_matrix(cls, coo, fr):
# exchange x <-> z and offset
M = np.array([[0.0, 0.0, 1.0], [0.0, 1.0, 0.0], [1.0, 0.0, 0.0]])
return M, None
@classmethod
def no_matrix(cls, coo, fr):
return None, cls.rep.with_differentials(cls.dif)
@classmethod
def no_pos(cls, coo, fr):
return None, cls.rep0.with_differentials(cls.dif)
@classmethod
def no_vel(cls, coo, fr):
return None, cls.rep
@pytest.mark.parametrize(
"transfunc",
[
transfunc.both,
transfunc.no_matrix,
transfunc.no_pos,
transfunc.no_vel,
transfunc.just_matrix,
],
)
@pytest.mark.parametrize(
"rep",
(
CARTESIAN_POS,
CARTESIAN_POS_AND_VEL,
CARTESIAN_POS_AND_VEL.represent_as(
r.CylindricalRepresentation, r.CylindricalDifferential
),
),
)
def test_affine_transform_succeed(transfunc, rep):
c = TCoo1(rep)
# compute expected output
M, offset = transfunc(c, TCoo2)
expected_rep = rep.to_cartesian().with_differentials(
{
k: diff.represent_as(r.CartesianDifferential, rep)
for k, diff in rep.differentials.items()
}
)
if M is not None:
expected_rep = expected_rep.transform(M)
expected_pos = expected_rep.without_differentials()
if offset is not None:
expected_pos += offset.without_differentials()
expected_vel = None
if c.data.differentials:
expected_vel = expected_rep.differentials["s"]
if offset and offset.differentials:
expected_vel += offset.differentials["s"]
# register and do the transformation and check against expected
trans = t.AffineTransform(transfunc, TCoo1, TCoo2)
trans.register(frame_transform_graph)
c2 = c.transform_to(TCoo2())
assert quantity_allclose(
c2.data.to_cartesian().xyz, expected_pos.to_cartesian().xyz
)
if expected_vel is not None:
diff = c2.data.differentials["s"].to_cartesian(base=c2.data)
assert quantity_allclose(diff.xyz, expected_vel.d_xyz)
trans.unregister(frame_transform_graph)
# these should fail
def transfunc_invalid_matrix(coo, fr):
return np.eye(4), None
# Leaving this open in case we want to add more functions to check for failures
@pytest.mark.parametrize("transfunc", [transfunc_invalid_matrix])
def test_affine_transform_fail(transfunc):
c = TCoo1(CARTESIAN_POS_AND_VEL)
# register and do the transformation and check against expected
trans = t.AffineTransform(transfunc, TCoo1, TCoo2)
trans.register(frame_transform_graph)
with pytest.raises(ValueError):
c.transform_to(TCoo2())
trans.unregister(frame_transform_graph)
def test_too_many_differentials():
dif2 = r.CartesianDifferential(*np.arange(3, 6) * u.pc / u.Myr**2)
rep = CARTESIAN_POS_AND_VEL.with_differentials(dif2)
with pytest.raises(ValueError):
c = TCoo1(rep)
# register and do the transformation and check against expected
trans = t.AffineTransform(transfunc.both, TCoo1, TCoo2)
trans.register(frame_transform_graph)
# Check that if frame somehow gets through to transformation, multiple
# differentials are caught
c = TCoo1(rep.without_differentials())
c._data = c._data.with_differentials({"s": CARTESIAN_VEL, "s2": dif2})
with pytest.raises(ValueError):
c.transform_to(TCoo2())
trans.unregister(frame_transform_graph)
# A matrix transform of a unit spherical with differentials should work
@pytest.mark.parametrize(
"rep",
(
UNIT_SPHERICAL_POS.with_differentials(SPHERICAL_COS_LAT_VEL),
r.UnitSphericalRepresentation(
UNIT_SPHERICAL_POS, differentials={"s": RADIAL_VEL}
),
SPHERICAL_POS.with_differentials(RADIAL_VEL),
),
)
def test_unit_spherical_with_differentials(rep):
c = TCoo1(rep)
# register and do the transformation and check against expected
trans = t.AffineTransform(transfunc.just_matrix, TCoo1, TCoo2)
trans.register(frame_transform_graph)
c2 = c.transform_to(TCoo2())
assert "s" in rep.differentials
assert isinstance(c2.data.differentials["s"], type(rep.differentials["s"]))
if isinstance(rep.differentials["s"], r.RadialDifferential):
assert c2.data.differentials["s"] is rep.differentials["s"]
trans.unregister(frame_transform_graph)
# should fail if we have to do offsets
trans = t.AffineTransform(transfunc.both, TCoo1, TCoo2)
trans.register(frame_transform_graph)
with pytest.raises(TypeError):
c.transform_to(TCoo2())
trans.unregister(frame_transform_graph)
def test_vel_transformation_obstime_err():
# TODO: replace after a final decision on PR #6280
from astropy.coordinates.sites import get_builtin_sites
diff = r.CartesianDifferential([0.1, 0.2, 0.3] * u.km / u.s)
rep = r.CartesianRepresentation([1, 2, 3] * u.au, differentials=diff)
loc = get_builtin_sites()["example_site"]
aaf = AltAz(obstime="J2010", location=loc)
aaf2 = AltAz(obstime=aaf.obstime + 3 * u.day, location=loc)
aaf3 = AltAz(obstime=aaf.obstime + np.arange(3) * u.day, location=loc)
aaf4 = AltAz(obstime=aaf.obstime, location=loc)
aa = aaf.realize_frame(rep)
with pytest.raises(NotImplementedError) as exc:
aa.transform_to(aaf2)
assert "cannot transform" in exc.value.args[0]
with pytest.raises(NotImplementedError) as exc:
aa.transform_to(aaf3)
assert "cannot transform" in exc.value.args[0]
aa.transform_to(aaf4)
aa.transform_to(ICRS())
def test_function_transform_with_differentials():
def tfun(c, f):
return f.__class__(ra=c.ra, dec=c.dec)
_ = t.FunctionTransform(tfun, TCoo3, TCoo2, register_graph=frame_transform_graph)
t3 = TCoo3(
ra=1 * u.deg,
dec=2 * u.deg,
pm_ra_cosdec=1 * u.marcsec / u.yr,
pm_dec=1 * u.marcsec / u.yr,
)
with pytest.warns(AstropyWarning, match=r".*they have been dropped.*") as w:
t3.transform_to(TCoo2())
assert len(w) == 1
def test_frame_override_component_with_attribute():
"""
It was previously possible to define a frame with an attribute with the
same name as a component. We don't want to allow this!
"""
class BorkedFrame(BaseCoordinateFrame):
ra = Attribute(default=150)
dec = Attribute(default=150)
def trans_func(coo1, f):
pass
trans = t.FunctionTransform(trans_func, BorkedFrame, ICRS)
with pytest.raises(ValueError) as exc:
trans.register(frame_transform_graph)
assert (
"BorkedFrame" in exc.value.args[0]
and "'ra'" in exc.value.args[0]
and "'dec'" in exc.value.args[0]
)
def test_static_matrix_combine_paths():
"""
Check that combined staticmatrixtransform matrices provide the same
transformation as using an intermediate transformation.
This is somewhat of a regression test for #7706
"""
class AFrame(BaseCoordinateFrame):
default_representation = r.SphericalRepresentation
default_differential = r.SphericalCosLatDifferential
t1 = t.StaticMatrixTransform(rotation_matrix(30.0 * u.deg, "z"), ICRS, AFrame)
t1.register(frame_transform_graph)
t2 = t.StaticMatrixTransform(rotation_matrix(30.0 * u.deg, "z").T, AFrame, ICRS)
t2.register(frame_transform_graph)
class BFrame(BaseCoordinateFrame):
default_representation = r.SphericalRepresentation
default_differential = r.SphericalCosLatDifferential
t3 = t.StaticMatrixTransform(rotation_matrix(30.0 * u.deg, "x"), ICRS, BFrame)
t3.register(frame_transform_graph)
t4 = t.StaticMatrixTransform(rotation_matrix(30.0 * u.deg, "x").T, BFrame, ICRS)
t4.register(frame_transform_graph)
c = Galactic(123 * u.deg, 45 * u.deg)
c_direct = c.transform_to(BFrame())
c_through_A = c.transform_to(AFrame()).transform_to(BFrame())
c_through_ICRS = c.transform_to(ICRS()).transform_to(BFrame())
assert quantity_allclose(c_direct.lon, c_through_A.lon)
assert quantity_allclose(c_direct.lat, c_through_A.lat)
assert quantity_allclose(c_direct.lon, c_through_ICRS.lon)
assert quantity_allclose(c_direct.lat, c_through_ICRS.lat)
for t_ in [t1, t2, t3, t4]:
t_.unregister(frame_transform_graph)
def test_multiple_aliases():
# Define a frame with multiple aliases
class MultipleAliasesFrame(BaseCoordinateFrame):
name = ["alias_1", "alias_2"]
default_representation = r.SphericalRepresentation
def tfun(c, f):
return f.__class__(lon=c.lon, lat=c.lat)
# Register a transform
graph = t.TransformGraph()
_ = t.FunctionTransform(
tfun, MultipleAliasesFrame, MultipleAliasesFrame, register_graph=graph
)
# Test that both aliases have been added to the transform graph
assert graph.lookup_name("alias_1") == MultipleAliasesFrame
assert graph.lookup_name("alias_2") == MultipleAliasesFrame
# Test that both aliases appear in the graphviz DOT format output
dotstr = graph.to_dot_graph()
assert "`alias_1`\\n`alias_2`" in dotstr
def test_remove_transform_and_unregister():
def tfun(c, f):
f.__class__(ra=c.ra, dec=c.dec)
# Register transforms
graph = t.TransformGraph()
ftrans1 = t.FunctionTransform(tfun, TCoo1, TCoo1, register_graph=graph)
ftrans2 = t.FunctionTransform(tfun, TCoo2, TCoo2, register_graph=graph)
_ = t.FunctionTransform(tfun, TCoo1, TCoo2, register_graph=graph)
# Confirm that the frames are part of the graph
assert TCoo1 in graph.frame_set
assert TCoo2 in graph.frame_set
# Use all three ways to remove a transform
# Remove the only transform with TCoo2 as the "from" frame
ftrans2.unregister(graph)
# TCoo2 should still be part of the graph because it is the "to" frame of a transform
assert TCoo2 in graph.frame_set
# Remove the remaining transform that involves TCoo2
graph.remove_transform(TCoo1, TCoo2, None)
# Now TCoo2 should not be part of the graph
assert TCoo2 not in graph.frame_set
# Remove the remaining transform that involves TCoo1
graph.remove_transform(None, None, ftrans1)
# Now TCoo1 should not be part of the graph
assert TCoo1 not in graph.frame_set
def test_remove_transform_errors():
def tfun(c, f):
return f.__class__(ra=c.ra, dec=c.dec)
graph = t.TransformGraph()
_ = t.FunctionTransform(tfun, TCoo1, TCoo1, register_graph=graph)
# Test bad calls to remove_transform
with pytest.raises(ValueError):
graph.remove_transform(None, TCoo1, None)
with pytest.raises(ValueError):
graph.remove_transform(TCoo1, None, None)
with pytest.raises(ValueError):
graph.remove_transform(None, None, None)
with pytest.raises(ValueError):
graph.remove_transform(None, None, 1)
with pytest.raises(ValueError):
graph.remove_transform(TCoo1, TCoo1, 1)
def test_impose_finite_difference_dt():
class H1(HCRS):
pass
class H2(HCRS):
pass
class H3(HCRS):
pass
graph = t.TransformGraph()
tfun = lambda c, f: type(f)(ra=c.ra, dec=c.dec)
# Set up a number of transforms with different time steps
old_dt = 1 * u.min
transform1 = t.FunctionTransformWithFiniteDifference(
tfun, H1, H1, register_graph=graph, finite_difference_dt=old_dt
)
transform2 = t.FunctionTransformWithFiniteDifference(
tfun, H2, H2, register_graph=graph, finite_difference_dt=old_dt * 2
)
transform3 = t.FunctionTransformWithFiniteDifference(
tfun, H2, H3, register_graph=graph, finite_difference_dt=old_dt * 3
)
# Check that all of the transforms have the same new time step
new_dt = 1 * u.yr
with graph.impose_finite_difference_dt(new_dt):
assert transform1.finite_difference_dt == new_dt
assert transform2.finite_difference_dt == new_dt
assert transform3.finite_difference_dt == new_dt
# Check that all of the original time steps have been restored
assert transform1.finite_difference_dt == old_dt
assert transform2.finite_difference_dt == old_dt * 2
assert transform3.finite_difference_dt == old_dt * 3
@pytest.mark.parametrize(
"first,second,check",
(
([ROT_30, None], [ROT_45, None], [ROT_75, None]),
([ROT_30, None], [ROT_45, OFFSET_Z], [ROT_75, OFFSET_Z]),
([ROT_30, OFFSET_123], [None, OFFSET_456], [ROT_30, OFFSET_579]),
([None, OFFSET_123], [None, OFFSET_456], [None, OFFSET_579]),
([ROT_30, OFFSET_X], [None, None], [ROT_30, OFFSET_X]),
([None, None], [ROT_45, OFFSET_Z], [ROT_45, OFFSET_Z]),
([None, None], [None, None], [None, None]),
(
[ROT_30, OFFSET_X],
[ROT_45, None],
[ROT_75, r.CartesianRepresentation([1 / SQRT_2, -1 / SQRT_2, 0])],
),
(
[ROT_30, OFFSET_X],
[ROT_45, OFFSET_Z],
[ROT_75, r.CartesianRepresentation([1 / SQRT_2, -1 / SQRT_2, 1])],
),
(
[None, OFFSET_123],
[ROT_45, OFFSET_456],
[ROT_45, r.CartesianRepresentation([3 / SQRT_2 + 4, 1 / SQRT_2 + 5, 9])],
),
),
)
def test_combine_affine_params(first, second, check):
result = t._combine_affine_params(first, second)
if check[0] is None:
assert result[0] is None
else:
assert_allclose(result[0], check[0])
if check[1] is None:
assert result[1] is None
else:
assert_allclose(result[1].xyz, check[1].xyz)
|
5eea8327889344d150fa54eb01749e2411988ed50ec38abbbaec9561c005857d | import numpy as np
import pytest
from astropy import units as u
from astropy.constants import c as speed_of_light
from astropy.coordinates import Distance, EarthLocation, SkyCoord
from astropy.coordinates.sites import get_builtin_sites
from astropy.table import Table
from astropy.tests.helper import assert_quantity_allclose
from astropy.time import Time
from astropy.utils.data import get_pkg_data_filename
@pytest.fixture(scope="module")
def input_radecs():
ras = []
decs = []
for dec in np.linspace(-85, 85, 15):
nra = int(np.round(10 * np.cos(dec * u.deg)).value)
ras1 = np.linspace(-180, 180 - 1e-6, nra)
ras.extend(ras1)
decs.extend([dec] * len(ras1))
return SkyCoord(ra=ras, dec=decs, unit=u.deg)
@pytest.mark.parametrize("kind", ["heliocentric", "barycentric"])
def test_basic(kind):
t0 = Time("2015-1-1")
loc = get_builtin_sites()["example_site"]
sc = SkyCoord(0, 0, unit=u.deg, obstime=t0, location=loc)
rvc0 = sc.radial_velocity_correction(kind)
assert rvc0.shape == ()
assert rvc0.unit.is_equivalent(u.km / u.s)
scs = SkyCoord(0, 0, unit=u.deg, obstime=t0 + np.arange(10) * u.day, location=loc)
rvcs = scs.radial_velocity_correction(kind)
assert rvcs.shape == (10,)
assert rvcs.unit.is_equivalent(u.km / u.s)
test_input_time = Time(2457244.5, format="jd")
# test_input_loc = EarthLocation.of_site('Cerro Paranal')
# to avoid the network hit we just copy here what that yields
test_input_loc = EarthLocation.from_geodetic(
lon=-70.403 * u.deg, lat=-24.6252 * u.deg, height=2635 * u.m
)
def test_helio_iraf(input_radecs):
"""
Compare the heliocentric correction to the IRAF rvcorrect.
`generate_IRAF_input` function is provided to show how the comparison data
was produced
def generate_IRAF_input(writefn=None):
dt = test_input_time.utc.datetime
coos = input_radecs # `input_radecs` is implemented as pytest fixture
lines = []
for ra, dec in zip(coos.ra, coos.dec):
rastr = Angle(ra).to_string(u.hour, sep=":")
decstr = Angle(dec).to_string(u.deg, sep=":")
lines.append(
f"{dt.year} {dt.month} {dt.day} {dt.hour}:{dt.minute} {rastr} {decstr}"
)
if writefn:
with open(writefn, "w") as f:
for l in lines:
f.write(l)
else:
for l in lines:
print(l)
print("Run IRAF as:\nastutil\nrvcorrect f=<filename> observatory=Paranal")
"""
rvcorr_result = """
# RVCORRECT: Observatory parameters for European Southern Observatory: Paranal
# latitude = -24:37.5
# longitude = 70:24.2
# altitude = 2635
## HJD VOBS VHELIO VLSR VDIURNAL VLUNAR VANNUAL VSOLAR
2457244.50120 0.00 -10.36 -20.35 -0.034 -0.001 -10.325 -9.993
2457244.50025 0.00 -14.20 -23.86 -0.115 -0.004 -14.085 -9.656
2457244.50278 0.00 -2.29 -11.75 0.115 0.004 -2.413 -9.459
2457244.50025 0.00 -14.20 -23.86 -0.115 -0.004 -14.085 -9.656
2457244.49929 0.00 -17.41 -26.30 -0.192 -0.006 -17.214 -8.888
2457244.50317 0.00 -17.19 -17.44 0.078 0.001 -17.269 -0.253
2457244.50348 0.00 2.35 -6.21 0.192 0.006 2.156 -8.560
2457244.49959 0.00 2.13 -15.06 -0.078 -0.000 2.211 -17.194
2457244.49929 0.00 -17.41 -26.30 -0.192 -0.006 -17.214 -8.888
2457244.49835 0.00 -19.84 -27.56 -0.259 -0.008 -19.573 -7.721
2457244.50186 0.00 -24.47 -22.16 -0.038 -0.004 -24.433 2.313
2457244.50470 0.00 -11.11 -8.57 0.221 0.005 -11.332 2.534
2457244.50402 0.00 6.90 -0.38 0.259 0.008 6.629 -7.277
2457244.50051 0.00 11.53 -5.78 0.038 0.004 11.489 -17.311
2457244.49768 0.00 -1.84 -19.37 -0.221 -0.004 -1.612 -17.533
2457244.49835 0.00 -19.84 -27.56 -0.259 -0.008 -19.573 -7.721
2457244.49749 0.00 -21.38 -27.59 -0.315 -0.010 -21.056 -6.209
2457244.50109 0.00 -27.69 -22.90 -0.096 -0.006 -27.584 4.785
2457244.50457 0.00 -17.00 -9.30 0.196 0.003 -17.201 7.704
2457244.50532 0.00 2.62 2.97 0.340 0.009 2.276 0.349
2457244.50277 0.00 16.42 4.67 0.228 0.009 16.178 -11.741
2457244.49884 0.00 13.98 -5.48 -0.056 0.002 14.039 -19.463
2457244.49649 0.00 -2.84 -19.84 -0.297 -0.007 -2.533 -17.000
2457244.49749 0.00 -21.38 -27.59 -0.315 -0.010 -21.056 -6.209
2457244.49675 0.00 -21.97 -26.39 -0.357 -0.011 -21.598 -4.419
2457244.50025 0.00 -29.30 -22.47 -0.149 -0.008 -29.146 6.831
2457244.50398 0.00 -21.55 -9.88 0.146 0.001 -21.700 11.670
2457244.50577 0.00 -3.26 4.00 0.356 0.009 -3.623 7.263
2457244.50456 0.00 14.87 11.06 0.357 0.011 14.497 -3.808
2457244.50106 0.00 22.20 7.14 0.149 0.008 22.045 -15.058
2457244.49732 0.00 14.45 -5.44 -0.146 -0.001 14.600 -19.897
2457244.49554 0.00 -3.84 -19.33 -0.356 -0.008 -3.478 -15.491
2457244.49675 0.00 -21.97 -26.39 -0.357 -0.011 -21.598 -4.419
2457244.49615 0.00 -21.57 -24.00 -0.383 -0.012 -21.172 -2.432
2457244.49942 0.00 -29.36 -20.83 -0.193 -0.009 -29.157 8.527
2457244.50312 0.00 -24.26 -9.75 0.088 -0.001 -24.348 14.511
2457244.50552 0.00 -8.66 4.06 0.327 0.007 -8.996 12.721
2457244.50549 0.00 10.14 14.13 0.413 0.012 9.715 3.994
2457244.50305 0.00 23.35 15.76 0.306 0.011 23.031 -7.586
2457244.49933 0.00 24.78 8.18 0.056 0.006 24.721 -16.601
2457244.49609 0.00 13.77 -5.06 -0.221 -0.003 13.994 -18.832
2457244.49483 0.00 -4.53 -17.77 -0.394 -0.010 -4.131 -13.237
2457244.49615 0.00 -21.57 -24.00 -0.383 -0.012 -21.172 -2.432
2457244.49572 0.00 -20.20 -20.54 -0.392 -0.013 -19.799 -0.335
2457244.49907 0.00 -28.17 -17.30 -0.197 -0.009 -27.966 10.874
2457244.50285 0.00 -22.96 -5.96 0.090 -0.001 -23.048 16.995
2457244.50531 0.00 -7.00 8.16 0.335 0.007 -7.345 15.164
2457244.50528 0.00 12.23 18.47 0.423 0.012 11.795 6.238
2457244.50278 0.00 25.74 20.13 0.313 0.012 25.416 -5.607
2457244.49898 0.00 27.21 12.38 0.057 0.006 27.144 -14.829
2457244.49566 0.00 15.94 -1.17 -0.226 -0.003 16.172 -17.111
2457244.49437 0.00 -2.78 -14.17 -0.403 -0.010 -2.368 -11.387
2457244.49572 0.00 -20.20 -20.54 -0.392 -0.013 -19.799 -0.335
2457244.49548 0.00 -17.94 -16.16 -0.383 -0.012 -17.541 1.776
2457244.49875 0.00 -25.73 -12.99 -0.193 -0.009 -25.525 12.734
2457244.50246 0.00 -20.63 -1.91 0.088 -0.001 -20.716 18.719
2457244.50485 0.00 -5.03 11.90 0.327 0.007 -5.365 16.928
2457244.50482 0.00 13.77 21.97 0.413 0.012 13.347 8.202
2457244.50238 0.00 26.98 23.60 0.306 0.011 26.663 -3.378
2457244.49867 0.00 28.41 16.02 0.056 0.005 28.353 -12.393
2457244.49542 0.00 17.40 2.78 -0.221 -0.003 17.625 -14.625
2457244.49416 0.00 -0.90 -9.93 -0.394 -0.010 -0.499 -9.029
2457244.49548 0.00 -17.94 -16.16 -0.383 -0.012 -17.541 1.776
2457244.49544 0.00 -14.87 -11.06 -0.357 -0.011 -14.497 3.808
2457244.49894 0.00 -22.20 -7.14 -0.149 -0.008 -22.045 15.058
2457244.50268 0.00 -14.45 5.44 0.146 0.001 -14.600 19.897
2457244.50446 0.00 3.84 19.33 0.356 0.008 3.478 15.491
2457244.50325 0.00 21.97 26.39 0.357 0.011 21.598 4.419
2457244.49975 0.00 29.30 22.47 0.149 0.008 29.146 -6.831
2457244.49602 0.00 21.55 9.88 -0.146 -0.001 21.700 -11.670
2457244.49423 0.00 3.26 -4.00 -0.356 -0.009 3.623 -7.263
2457244.49544 0.00 -14.87 -11.06 -0.357 -0.011 -14.497 3.808
2457244.49561 0.00 -11.13 -5.46 -0.315 -0.010 -10.805 5.670
2457244.49921 0.00 -17.43 -0.77 -0.096 -0.006 -17.333 16.664
2457244.50269 0.00 -6.75 12.83 0.196 0.003 -6.949 19.583
2457244.50344 0.00 12.88 25.10 0.340 0.009 12.527 12.227
2457244.50089 0.00 26.67 26.80 0.228 0.009 26.430 0.137
2457244.49696 0.00 24.24 16.65 -0.056 0.002 24.290 -7.584
2457244.49461 0.00 7.42 2.29 -0.297 -0.007 7.719 -5.122
2457244.49561 0.00 -11.13 -5.46 -0.315 -0.010 -10.805 5.670
2457244.49598 0.00 -6.90 0.38 -0.259 -0.008 -6.629 7.277
2457244.49949 0.00 -11.53 5.78 -0.038 -0.004 -11.489 17.311
2457244.50232 0.00 1.84 19.37 0.221 0.004 1.612 17.533
2457244.50165 0.00 19.84 27.56 0.259 0.008 19.573 7.721
2457244.49814 0.00 24.47 22.16 0.038 0.004 24.433 -2.313
2457244.49530 0.00 11.11 8.57 -0.221 -0.005 11.332 -2.534
2457244.49598 0.00 -6.90 0.38 -0.259 -0.008 -6.629 7.277
2457244.49652 0.00 -2.35 6.21 -0.192 -0.006 -2.156 8.560
2457244.50041 0.00 -2.13 15.06 0.078 0.000 -2.211 17.194
2457244.50071 0.00 17.41 26.30 0.192 0.006 17.214 8.888
2457244.49683 0.00 17.19 17.44 -0.078 -0.001 17.269 0.253
2457244.49652 0.00 -2.35 6.21 -0.192 -0.006 -2.156 8.560
2457244.49722 0.00 2.29 11.75 -0.115 -0.004 2.413 9.459
2457244.49975 0.00 14.20 23.86 0.115 0.004 14.085 9.656
2457244.49722 0.00 2.29 11.75 -0.115 -0.004 2.413 9.459
2457244.49805 0.00 6.84 16.77 -0.034 -0.001 6.874 9.935
"""
vhs_iraf = []
for line in rvcorr_result.strip().split("\n")[5:]:
vhs_iraf.append(float(line.split()[2]))
vhs_iraf = vhs_iraf * u.km / u.s
targets = SkyCoord(input_radecs, obstime=test_input_time, location=test_input_loc)
vhs_astropy = targets.radial_velocity_correction("heliocentric")
assert_quantity_allclose(vhs_astropy, vhs_iraf, atol=150 * u.m / u.s)
def test_barycorr(input_radecs):
barycorr_bvcs = (
np.loadtxt(get_pkg_data_filename("data/barycorr_bvcs.dat")) * u.m / u.s
)
# this tries the *other* way of calling radial_velocity_correction relative
# to the IRAF tests
bvcs_astropy = input_radecs.radial_velocity_correction(
obstime=test_input_time, location=test_input_loc, kind="barycentric"
)
assert_quantity_allclose(bvcs_astropy, barycorr_bvcs, atol=10 * u.mm / u.s)
def test_rvcorr_multiple_obstimes_onskycoord():
loc = EarthLocation(-2309223 * u.m, -3695529 * u.m, -4641767 * u.m)
arrtime = Time("2005-03-21 00:00:00") + np.linspace(-1, 1, 10) * u.day
sc = SkyCoord(1 * u.deg, 2 * u.deg, 100 * u.kpc, obstime=arrtime, location=loc)
rvcbary_sc2 = sc.radial_velocity_correction(kind="barycentric")
assert len(rvcbary_sc2) == 10
# check the multiple-obstime and multi- mode
sc = SkyCoord(
([1] * 10) * u.deg, 2 * u.deg, 100 * u.kpc, obstime=arrtime, location=loc
)
rvcbary_sc3 = sc.radial_velocity_correction(kind="barycentric")
assert len(rvcbary_sc3) == 10
def test_invalid_argument_combos():
loc = EarthLocation(-2309223 * u.m, -3695529 * u.m, -4641767 * u.m)
time = Time("2005-03-21 00:00:00")
timel = Time("2005-03-21 00:00:00", location=loc)
scwattrs = SkyCoord(1 * u.deg, 2 * u.deg, obstime=time, location=loc)
scwoattrs = SkyCoord(1 * u.deg, 2 * u.deg)
scwattrs.radial_velocity_correction()
with pytest.raises(ValueError):
scwattrs.radial_velocity_correction(obstime=time, location=loc)
with pytest.raises(TypeError):
scwoattrs.radial_velocity_correction(obstime=time)
scwoattrs.radial_velocity_correction(obstime=time, location=loc)
with pytest.raises(TypeError):
scwoattrs.radial_velocity_correction()
with pytest.raises(ValueError):
scwattrs.radial_velocity_correction(timel)
def test_regression_9645():
sc = SkyCoord(
10 * u.deg,
20 * u.deg,
distance=5 * u.pc,
obstime=test_input_time,
pm_ra_cosdec=0 * u.mas / u.yr,
pm_dec=0 * u.mas / u.yr,
radial_velocity=0 * u.km / u.s,
)
sc_novel = SkyCoord(
10 * u.deg, 20 * u.deg, distance=5 * u.pc, obstime=test_input_time
)
corr = sc.radial_velocity_correction(
obstime=test_input_time, location=test_input_loc
)
corr_novel = sc_novel.radial_velocity_correction(
obstime=test_input_time, location=test_input_loc
)
assert_quantity_allclose(corr, corr_novel)
def test_barycorr_withvels(input_radecs):
barycorr_bvcs = (
np.loadtxt(get_pkg_data_filename("data/barycorr_bvcs_withvels.dat")) * u.m / u.s
)
bvcs_astropy = SkyCoord(
input_radecs.ra,
input_radecs.dec,
pm_ra_cosdec=np.linspace(-1000, 1000, input_radecs.size) * u.mas / u.yr,
pm_dec=np.linspace(0, 1000, input_radecs.size) * u.mas / u.yr,
radial_velocity=np.linspace(0, 100, input_radecs.size) * u.km / u.s,
distance=np.linspace(10, 100, input_radecs.size) * u.pc,
obstime=test_input_time,
).radial_velocity_correction(obstime=test_input_time, location=test_input_loc)
assert_quantity_allclose(bvcs_astropy, barycorr_bvcs, atol=10 * u.mm / u.s)
def test_warning_no_obstime_on_skycoord():
c = SkyCoord(
l=10 * u.degree,
b=45 * u.degree,
pm_l_cosb=34 * u.mas / u.yr,
pm_b=-117 * u.mas / u.yr,
distance=50 * u.pc,
frame="galactic",
)
with pytest.warns(Warning):
c.radial_velocity_correction("barycentric", test_input_time, test_input_loc)
@pytest.mark.remote_data
def test_regression_10094():
"""
Make sure that when we include the proper motion and radial velocity of
a SkyCoord, our velocity corrections remain close to TEMPO2.
We check that tau Ceti is within 5mm/s
"""
# Wright & Eastman (2014) Table2
# Corrections for tau Ceti
wright_table = Table.read(
get_pkg_data_filename("coordinates/wright_eastmann_2014_tau_ceti.fits")
)
reduced_jds = wright_table["JD-2400000"]
tempo2 = wright_table["TEMPO2"]
barycorr = wright_table["BARYCORR"]
# tau Ceti Hipparchos data
tauCet = SkyCoord(
"01 44 05.1275 -15 56 22.4006",
unit=(u.hour, u.deg),
pm_ra_cosdec=-1721.05 * u.mas / u.yr,
pm_dec=854.16 * u.mas / u.yr,
distance=Distance(parallax=273.96 * u.mas),
radial_velocity=-16.597 * u.km / u.s,
obstime=Time(48348.5625, format="mjd"),
)
# CTIO location as used in Wright & Eastmann
xyz = u.Quantity([1814985.3, -5213916.8, -3187738.1], u.m)
obs = EarthLocation(*xyz)
times = Time(2400000, reduced_jds, format="jd")
tempo2 = tempo2 * speed_of_light
barycorr = barycorr * speed_of_light
astropy = tauCet.radial_velocity_correction(location=obs, obstime=times)
assert_quantity_allclose(astropy, tempo2, atol=5 * u.mm / u.s)
assert_quantity_allclose(astropy, barycorr, atol=5 * u.mm / u.s)
|
7773f634a58f4609f7c57d5beccfc6ac615c87be1daca94f47f2223cda3656f0 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
from astropy import units as u
from astropy.coordinates import builtin_frames as bf
from astropy.coordinates import galactocentric_frame_defaults
from astropy.coordinates import representation as r
from astropy.coordinates.builtin_frames import CIRS, ICRS, Galactic, Galactocentric
from astropy.coordinates.errors import ConvertError
from astropy.units import allclose as quantity_allclose
POSITION_ON_SKY = {"ra": 37.4 * u.deg, "dec": -55.8 * u.deg}
DISTANCE = {"distance": 150 * u.pc}
PROPER_MOTION = {"pm_ra_cosdec": -21.2 * u.mas / u.yr, "pm_dec": 17.1 * u.mas / u.yr}
RADIAL_VELOCITY = {"radial_velocity": 105.7 * u.km / u.s}
CARTESIAN_POSITION = {
"x": 1 * u.pc,
"y": 2 * u.pc,
"z": 3 * u.pc,
"representation_type": r.CartesianRepresentation,
}
CARTESIAN_REPRESENTATION_KEYWORD_STR = {"representation_type": "cartesian"}
CARTESIAN_VELOCITY = {
"v_x": 1 * u.km / u.s,
"v_y": 2 * u.km / u.s,
"v_z": 3 * u.km / u.s,
"differential_type": r.CartesianDifferential,
}
CARTESIAN_DIFFERENTIAL_KEYWORD_STR = {"differential_type": "cartesian"}
def test_api():
# transform observed Barycentric velocities to full-space Galactocentric
with galactocentric_frame_defaults.set("latest"):
icrs = ICRS(**POSITION_ON_SKY, **DISTANCE, **PROPER_MOTION, **RADIAL_VELOCITY)
icrs.transform_to(Galactocentric())
# transform a set of ICRS proper motions to Galactic
ICRS(**POSITION_ON_SKY, **PROPER_MOTION).transform_to(Galactic())
@pytest.mark.parametrize(
"kwargs",
[
POSITION_ON_SKY,
# In Python 3.9 we could write `POSITION_ON_SKY | DISTANCE`
{**POSITION_ON_SKY, **DISTANCE},
{**POSITION_ON_SKY, **PROPER_MOTION},
{**POSITION_ON_SKY, **DISTANCE, **PROPER_MOTION},
{**POSITION_ON_SKY, **RADIAL_VELOCITY},
{**POSITION_ON_SKY, **DISTANCE, **RADIAL_VELOCITY},
{**POSITION_ON_SKY, **PROPER_MOTION, **RADIAL_VELOCITY},
{**POSITION_ON_SKY, **DISTANCE, **PROPER_MOTION, **RADIAL_VELOCITY},
# Now test other representation/differential types:
CARTESIAN_POSITION,
{**CARTESIAN_POSITION, **CARTESIAN_REPRESENTATION_KEYWORD_STR},
{**CARTESIAN_POSITION, **CARTESIAN_VELOCITY},
{
**CARTESIAN_POSITION,
**CARTESIAN_VELOCITY,
**CARTESIAN_DIFFERENTIAL_KEYWORD_STR,
},
],
)
def test_all_arg_options(kwargs):
# Here we do a simple thing and just verify that passing kwargs in, we have
# access to the relevant attributes from the resulting object
icrs = ICRS(**kwargs)
gal = icrs.transform_to(Galactic())
repr_gal = repr(gal)
for k in kwargs:
if k == "differential_type":
continue
getattr(icrs, k)
if "pm_ra_cosdec" in kwargs: # should have both
assert "pm_l_cosb" in repr_gal
assert "pm_b" in repr_gal
assert "mas / yr" in repr_gal
if "radial_velocity" not in kwargs:
assert "radial_velocity" not in repr_gal
if "radial_velocity" in kwargs:
assert "radial_velocity" in repr_gal
assert "km / s" in repr_gal
if "pm_ra_cosdec" not in kwargs:
assert "pm_l_cosb" not in repr_gal
assert "pm_b" not in repr_gal
@pytest.mark.parametrize(
"cls,lon,lat",
[
[bf.ICRS, "ra", "dec"],
[bf.FK4, "ra", "dec"],
[bf.FK4NoETerms, "ra", "dec"],
[bf.FK5, "ra", "dec"],
[bf.GCRS, "ra", "dec"],
[bf.HCRS, "ra", "dec"],
[bf.LSR, "ra", "dec"],
[bf.CIRS, "ra", "dec"],
[bf.Galactic, "l", "b"],
[bf.AltAz, "az", "alt"],
[bf.Supergalactic, "sgl", "sgb"],
[bf.GalacticLSR, "l", "b"],
[bf.HeliocentricMeanEcliptic, "lon", "lat"],
[bf.GeocentricMeanEcliptic, "lon", "lat"],
[bf.BarycentricMeanEcliptic, "lon", "lat"],
[bf.PrecessedGeocentric, "ra", "dec"],
],
)
def test_expected_arg_names(cls, lon, lat):
kwargs = {
lon: 37.4 * u.deg,
lat: -55.8 * u.deg,
f"pm_{lon}_cos{lat}": -21.2 * u.mas / u.yr,
f"pm_{lat}": 17.1 * u.mas / u.yr,
}
frame = cls(**kwargs, **DISTANCE, **RADIAL_VELOCITY)
# these data are extracted from the vizier copy of XHIP:
# http://vizier.u-strasbg.fr/viz-bin/VizieR-3?-source=+V/137A/XHIP
_xhip_head = """
------ ------------ ------------ -------- -------- ------------ ------------ ------- -------- -------- ------- ------ ------ ------
R D pmRA pmDE Di pmGLon pmGLat RV U V W
HIP AJ2000 (deg) EJ2000 (deg) (mas/yr) (mas/yr) GLon (deg) GLat (deg) st (pc) (mas/yr) (mas/yr) (km/s) (km/s) (km/s) (km/s)
------ ------------ ------------ -------- -------- ------------ ------------ ------- -------- -------- ------- ------ ------ ------
""".strip()
_xhip_data = """
19 000.05331690 +38.30408633 -3.17 -15.37 112.00026470 -23.47789171 247.12 -6.40 -14.33 6.30 7.3 2.0 -17.9
20 000.06295067 +23.52928427 36.11 -22.48 108.02779304 -37.85659811 95.90 29.35 -30.78 37.80 -19.3 16.1 -34.2
21 000.06623581 +08.00723430 61.48 -0.23 101.69697120 -52.74179515 183.68 58.06 -20.23 -11.72 -45.2 -30.9 -1.3
24917 080.09698238 -33.39874984 -4.30 13.40 236.92324669 -32.58047131 107.38 -14.03 -1.15 36.10 -22.4 -21.3 -19.9
59207 182.13915108 +65.34963517 18.17 5.49 130.04157185 51.18258601 56.00 -18.98 -0.49 5.70 1.5 6.1 4.4
87992 269.60730667 +36.87462906 -89.58 72.46 62.98053142 25.90148234 129.60 45.64 105.79 -4.00 -39.5 -15.8 56.7
115110 349.72322473 -28.74087144 48.86 -9.25 23.00447250 -69.52799804 116.87 -8.37 -49.02 15.00 -16.8 -12.2 -23.6
""".strip()
# in principal we could parse the above as a table, but doing it "manually"
# makes this test less tied to Table working correctly
@pytest.mark.parametrize(
"hip,ra,dec,pmra,pmdec,glon,glat,dist,pmglon,pmglat,rv,U,V,W",
[[float(val) for val in row.split()] for row in _xhip_data.split("\n")],
)
def test_xhip_galactic(
hip, ra, dec, pmra, pmdec, glon, glat, dist, pmglon, pmglat, rv, U, V, W
):
i = ICRS(
ra * u.deg,
dec * u.deg,
dist * u.pc,
pm_ra_cosdec=pmra * u.marcsec / u.yr,
pm_dec=pmdec * u.marcsec / u.yr,
radial_velocity=rv * u.km / u.s,
)
g = i.transform_to(Galactic())
# precision is limited by 2-deciimal digit string representation of pms
assert quantity_allclose(
g.pm_l_cosb, pmglon * u.marcsec / u.yr, atol=0.01 * u.marcsec / u.yr
)
assert quantity_allclose(
g.pm_b, pmglat * u.marcsec / u.yr, atol=0.01 * u.marcsec / u.yr
)
# make sure UVW also makes sense
uvwg = g.cartesian.differentials["s"]
# precision is limited by 1-decimal digit string representation of vels
assert quantity_allclose(uvwg.d_x, U * u.km / u.s, atol=0.1 * u.km / u.s)
assert quantity_allclose(uvwg.d_y, V * u.km / u.s, atol=0.1 * u.km / u.s)
assert quantity_allclose(uvwg.d_z, W * u.km / u.s, atol=0.1 * u.km / u.s)
@pytest.mark.parametrize(
"kwargs,expect_success",
(
(POSITION_ON_SKY, False),
({**POSITION_ON_SKY, **DISTANCE}, True),
({**POSITION_ON_SKY, **PROPER_MOTION}, False),
({**POSITION_ON_SKY, **RADIAL_VELOCITY}, False),
({**POSITION_ON_SKY, **DISTANCE, **RADIAL_VELOCITY}, False),
({**POSITION_ON_SKY, **PROPER_MOTION, **RADIAL_VELOCITY}, False),
({**POSITION_ON_SKY, **DISTANCE, **PROPER_MOTION, **RADIAL_VELOCITY}, True),
),
)
def test_frame_affinetransform(kwargs, expect_success):
"""There are already tests in test_transformations.py that check that
an AffineTransform fails without full-space data, but this just checks that
things work as expected at the frame level as well.
"""
with galactocentric_frame_defaults.set("latest"):
icrs = ICRS(**kwargs)
if expect_success:
_ = icrs.transform_to(Galactocentric())
else:
with pytest.raises(ConvertError):
icrs.transform_to(Galactocentric())
def test_differential_type_arg():
"""
Test passing in an explicit differential class to the initializer or
changing the differential class via set_representation_cls
"""
icrs = ICRS(
**POSITION_ON_SKY,
pm_ra=10 * u.mas / u.yr,
pm_dec=-11 * u.mas / u.yr,
differential_type=r.UnitSphericalDifferential,
)
assert icrs.pm_ra == 10 * u.mas / u.yr
icrs = ICRS(
**POSITION_ON_SKY,
pm_ra=10 * u.mas / u.yr,
pm_dec=-11 * u.mas / u.yr,
differential_type={"s": r.UnitSphericalDifferential},
)
assert icrs.pm_ra == 10 * u.mas / u.yr
icrs = ICRS(
ra=1 * u.deg,
dec=60 * u.deg,
pm_ra_cosdec=10 * u.mas / u.yr,
pm_dec=-11 * u.mas / u.yr,
)
icrs.set_representation_cls(s=r.UnitSphericalDifferential)
assert quantity_allclose(icrs.pm_ra, 20 * u.mas / u.yr)
# incompatible representation and differential
with pytest.raises(TypeError):
ICRS(**POSITION_ON_SKY, **CARTESIAN_VELOCITY)
# specify both
icrs = ICRS(**CARTESIAN_POSITION, **CARTESIAN_VELOCITY)
assert icrs.x == 1 * u.pc
assert icrs.y == 2 * u.pc
assert icrs.z == 3 * u.pc
assert icrs.v_x == 1 * u.km / u.s
assert icrs.v_y == 2 * u.km / u.s
assert icrs.v_z == 3 * u.km / u.s
def test_slicing_preserves_differential():
icrs = ICRS(**POSITION_ON_SKY, **DISTANCE, **PROPER_MOTION, **RADIAL_VELOCITY)
icrs2 = icrs.reshape(1, 1)[:1, 0]
for name in icrs.representation_component_names.keys():
assert getattr(icrs, name) == getattr(icrs2, name)[0]
for name in icrs.get_representation_component_names("s").keys():
assert getattr(icrs, name) == getattr(icrs2, name)[0]
def test_shorthand_attributes():
# Check that attribute access works
# for array data:
n = 4
icrs1 = ICRS(
ra=np.random.uniform(0, 360, n) * u.deg,
dec=np.random.uniform(-90, 90, n) * u.deg,
distance=100 * u.pc,
pm_ra_cosdec=np.random.normal(0, 100, n) * u.mas / u.yr,
pm_dec=np.random.normal(0, 100, n) * u.mas / u.yr,
radial_velocity=np.random.normal(0, 100, n) * u.km / u.s,
)
v = icrs1.velocity
pm = icrs1.proper_motion
assert quantity_allclose(pm[0], icrs1.pm_ra_cosdec)
assert quantity_allclose(pm[1], icrs1.pm_dec)
# for scalar data:
icrs2 = ICRS(**POSITION_ON_SKY, **DISTANCE, **PROPER_MOTION, **RADIAL_VELOCITY)
v = icrs2.velocity
pm = icrs2.proper_motion
assert quantity_allclose(pm[0], icrs2.pm_ra_cosdec)
assert quantity_allclose(pm[1], icrs2.pm_dec)
# check that it fails where we expect:
# no distance
icrs3 = ICRS(**POSITION_ON_SKY, **PROPER_MOTION, **RADIAL_VELOCITY)
with pytest.raises(ValueError):
icrs3.velocity
icrs3.set_representation_cls("cartesian")
assert hasattr(icrs3, "radial_velocity")
assert quantity_allclose(icrs3.radial_velocity, 105.7 * u.km / u.s)
icrs4 = ICRS(**CARTESIAN_POSITION, **CARTESIAN_VELOCITY)
icrs4.radial_velocity
@pytest.mark.parametrize(
"icrs_coords", [POSITION_ON_SKY, {**POSITION_ON_SKY, **PROPER_MOTION}]
)
def test_negative_distance(icrs_coords):
"""Regression test: #7408
Make sure that negative parallaxes turned into distances are handled right
"""
c = ICRS(distance=(-10 * u.mas).to(u.pc, u.parallax()), **icrs_coords)
assert quantity_allclose(c.ra, 37.4 * u.deg)
assert quantity_allclose(c.dec, -55.8 * u.deg)
def test_velocity_units():
"""Check that the differential data given has compatible units
with the time-derivative of representation data"""
with pytest.raises(
ValueError,
match=(
'^x has unit "pc" with physical type "length", but v_x has incompatible'
' unit "" with physical type "dimensionless" instead of the expected'
r' "speed/velocity".$'
),
):
ICRS(**CARTESIAN_POSITION, v_x=1, v_y=2, v_z=3, differential_type="cartesian")
def test_frame_with_velocity_without_distance_can_be_transformed():
rep = CIRS(**POSITION_ON_SKY, **PROPER_MOTION).transform_to(ICRS())
assert "<ICRS Coordinate: (ra, dec, distance) in" in repr(rep)
|
0babc602ce72ae5b5576067b344f87d5550cf9a650e8585004b70cd11987e164 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from itertools import combinations
import numpy as np
import pytest
from astropy import units as u
from astropy.coordinates import Angle, EarthLocation, SkyCoord
from astropy.coordinates.builtin_frames import (
FK5,
ICRS,
AltAz,
Galactic,
SkyOffsetFrame,
)
from astropy.coordinates.distances import Distance
from astropy.tests.helper import assert_quantity_allclose as assert_allclose
from astropy.time import Time
CONVERT_PRECISION = 1 * u.arcsec
ICRS_45_45 = SkyCoord(ra=45 * u.deg, dec=45 * u.deg, frame=ICRS())
M31_DISTANCE = Distance(770 * u.kpc)
POSITION_ON_SKY = {"ra": 36.4 * u.deg, "dec": -55.8 * u.deg}
DISTANCE = {"distance": 150 * u.pc}
PROPER_MOTION = {"pm_ra_cosdec": -21.2 * u.mas / u.yr, "pm_dec": 17.1 * u.mas / u.yr}
@pytest.fixture(scope="module")
def icrs_coords_with_trig_values():
# we do the 12)[1:-1] business because sometimes machine precision issues
# lead to results that are either ~0 or ~360, which mucks up the final
# comparison and leads to spurious failures. So this just avoids that by
# staying away from the edges. Explicit conversion to radians in the trig
# functions is needed so that output would be a bare `ndarray`, not a `Quantity`.
icrs_coord = ICRS(
ra=np.linspace(0, 360, 12)[1:-1] * u.deg,
dec=np.linspace(-90, 90, 12)[1:-1] * u.deg,
distance=1.0 * u.kpc,
)
return (
icrs_coord,
np.sin(icrs_coord.dec.rad),
np.cos(icrs_coord.dec.rad),
np.sin(icrs_coord.ra.rad),
np.cos(icrs_coord.ra.rad),
)
def test_altaz_attribute_transforms():
"""Test transforms between AltAz frames with different attributes."""
el1 = EarthLocation(0 * u.deg, 0 * u.deg, 0 * u.m)
origin1 = AltAz(
0 * u.deg, 0 * u.deg, obstime=Time("2000-01-01T12:00:00"), location=el1
)
coo1 = SkyCoord(1 * u.deg, 1 * u.deg, frame=SkyOffsetFrame(origin=origin1))
origin2 = AltAz(
0 * u.deg, 0 * u.deg, obstime=Time("2000-01-01T11:00:00"), location=el1
)
coo2 = coo1.transform_to(SkyOffsetFrame(origin=origin2))
assert_allclose(
[coo2.lon.wrap_at(180 * u.deg), coo2.lat],
[1.22522446, 0.70624298] * u.deg,
atol=CONVERT_PRECISION,
)
el3 = EarthLocation(0 * u.deg, 90 * u.deg, 0 * u.m)
origin3 = AltAz(
0 * u.deg, 90 * u.deg, obstime=Time("2000-01-01T12:00:00"), location=el3
)
coo3 = coo2.transform_to(SkyOffsetFrame(origin=origin3))
assert_allclose(
[coo3.lon.wrap_at(180 * u.deg), coo3.lat],
[1 * u.deg, 1 * u.deg],
atol=CONVERT_PRECISION,
)
@pytest.mark.parametrize(
"inradec,expectedlatlon, tolsep",
[
((45, 45) * u.deg, (0, 0) * u.deg, 0.001 * u.arcsec),
((45, 0) * u.deg, (0, -45) * u.deg, 0.001 * u.arcsec),
((45, 90) * u.deg, (0, 45) * u.deg, 0.001 * u.arcsec),
((46, 45) * u.deg, (1 * np.cos(45 * u.deg), 0) * u.deg, 16 * u.arcsec),
],
)
def test_skyoffset(inradec, expectedlatlon, tolsep):
skyoffset_frame = SkyOffsetFrame(origin=ICRS_45_45)
skycoord = SkyCoord(*inradec, frame=ICRS)
skycoord_inaf = skycoord.transform_to(skyoffset_frame)
assert hasattr(skycoord_inaf, "lon")
assert hasattr(skycoord_inaf, "lat")
expected = SkyCoord(*expectedlatlon, frame=skyoffset_frame)
assert skycoord_inaf.separation(expected) < tolsep
# Check we can also transform back (regression test for gh-11254).
roundtrip = skycoord_inaf.transform_to(ICRS())
assert roundtrip.separation(skycoord) < 1 * u.uas
def test_skyoffset_functional_ra():
# we do the 12)[1:-1] business because sometimes machine precision issues
# lead to results that are either ~0 or ~360, which mucks up the final
# comparison and leads to spurious failures. So this just avoids that by
# staying away from the edges
input_ra = np.linspace(0, 360, 12)[1:-1]
input_dec = np.linspace(-90, 90, 12)[1:-1]
icrs_coord = ICRS(ra=input_ra * u.deg, dec=input_dec * u.deg, distance=1.0 * u.kpc)
for ra in np.linspace(0, 360, 24):
# expected rotation
expected = ICRS(
ra=np.linspace(0 - ra, 360 - ra, 12)[1:-1] * u.deg,
dec=np.linspace(-90, 90, 12)[1:-1] * u.deg,
distance=1.0 * u.kpc,
)
expected_xyz = expected.cartesian.xyz
# actual transformation to the frame
skyoffset_frame = SkyOffsetFrame(origin=ICRS(ra * u.deg, 0 * u.deg))
actual = icrs_coord.transform_to(skyoffset_frame)
actual_xyz = actual.cartesian.xyz
# back to ICRS
roundtrip = actual.transform_to(ICRS())
roundtrip_xyz = roundtrip.cartesian.xyz
# Verify
assert_allclose(actual_xyz, expected_xyz, atol=1e-5 * u.kpc)
assert_allclose(icrs_coord.ra, roundtrip.ra, atol=1e-5 * u.deg)
assert_allclose(icrs_coord.dec, roundtrip.dec, atol=1e-5 * u.deg)
assert_allclose(icrs_coord.distance, roundtrip.distance, atol=1e-5 * u.kpc)
@pytest.mark.parametrize("dec", Angle(np.linspace(-90, 90, 13), u.deg))
def test_skyoffset_functional_dec(dec, icrs_coords_with_trig_values):
icrs_coord, sin_dec_i, cos_dec_i, sin_ra_i, cos_ra_i = icrs_coords_with_trig_values
# Dec rotations
# Done in xyz space because dec must be [-90,90]
# expected rotation
sin_dec = np.sin(-dec.rad)
cos_dec = np.cos(dec.rad)
expected = SkyCoord(
x=-sin_dec_i * sin_dec + cos_ra_i * cos_dec_i * cos_dec,
y=sin_ra_i * cos_dec_i,
z=sin_dec_i * cos_dec + sin_dec * cos_ra_i * cos_dec_i,
unit="kpc",
representation_type="cartesian",
)
# actual transformation to the frame
actual = icrs_coord.transform_to(SkyOffsetFrame(origin=ICRS(0 * u.deg, dec)))
# back to ICRS
roundtrip = actual.transform_to(ICRS())
# Verify
assert_allclose(actual.cartesian.xyz, expected.cartesian.xyz, atol=1e-5 * u.kpc)
assert_allclose(icrs_coord.ra, roundtrip.ra, atol=1e-5 * u.deg)
assert_allclose(icrs_coord.dec, roundtrip.dec, atol=1e-5 * u.deg)
assert_allclose(icrs_coord.distance, roundtrip.distance, atol=1e-5 * u.kpc)
@pytest.mark.parametrize("ra", Angle(np.linspace(0, 360, 10), u.deg))
@pytest.mark.parametrize("dec", Angle(np.linspace(-90, 90, 5), u.deg))
def test_skyoffset_functional_ra_dec(ra, dec, icrs_coords_with_trig_values):
icrs_coord, sin_dec_i, cos_dec_i, sin_ra_i, cos_ra_i = icrs_coords_with_trig_values
cos_dec = np.cos(dec.rad)
sin_dec = np.sin(-dec.rad)
cos_ra = np.cos(ra.rad)
sin_ra = np.sin(ra.rad)
# expected rotation
expected = SkyCoord(
x=(
-sin_dec_i * sin_dec
+ cos_ra_i * cos_dec_i * cos_dec * cos_ra
+ sin_ra_i * cos_dec_i * cos_dec * sin_ra
),
y=sin_ra_i * cos_dec_i * cos_ra - cos_ra_i * cos_dec_i * sin_ra,
z=(
sin_dec_i * cos_dec
+ sin_dec * cos_ra * cos_ra_i * cos_dec_i
+ sin_dec * sin_ra * sin_ra_i * cos_dec_i
),
unit="kpc",
representation_type="cartesian",
)
# actual transformation to the frame
actual = icrs_coord.transform_to(SkyOffsetFrame(origin=ICRS(ra, dec)))
# back to ICRS
roundtrip = actual.transform_to(ICRS())
# Verify
assert_allclose(actual.cartesian.xyz, expected.cartesian.xyz, atol=1e-5 * u.kpc)
assert_allclose(icrs_coord.ra, roundtrip.ra, atol=1e-4 * u.deg)
assert_allclose(icrs_coord.dec, roundtrip.dec, atol=1e-5 * u.deg)
assert_allclose(icrs_coord.distance, roundtrip.distance, atol=1e-5 * u.kpc)
def test_skycoord_skyoffset_frame():
m31 = SkyCoord(10.6847083, 41.26875, frame="icrs", unit=u.deg)
m33 = SkyCoord(23.4621, 30.6599417, frame="icrs", unit=u.deg)
m31_astro = m31.skyoffset_frame()
m31_in_m31 = m31.transform_to(m31_astro)
m33_in_m31 = m33.transform_to(m31_astro)
assert_allclose(
[m31_in_m31.lon, m31_in_m31.lat], [0, 0] * u.deg, atol=1e-10 * u.deg
)
assert_allclose(
[m33_in_m31.lon, m33_in_m31.lat], [11.13135175, -9.79084759] * u.deg
)
assert_allclose(
m33.separation(m31), np.hypot(m33_in_m31.lon, m33_in_m31.lat), atol=0.1 * u.deg
)
@pytest.mark.parametrize(
"from_origin,to_origin",
combinations(
(
ICRS(10.6847929 * u.deg, 41.2690650 * u.deg, M31_DISTANCE),
FK5(10.6847929 * u.deg, 41.2690650 * u.deg, M31_DISTANCE),
Galactic(121.1744050 * u.deg, -21.5729360 * u.deg, M31_DISTANCE),
),
r=2,
),
)
def test_m31_coord_transforms(from_origin, to_origin):
"""
This tests a variety of coordinate conversions for the Chandra point-source
catalog location of M31 from NED, via SkyOffsetFrames
"""
from_pos = SkyOffsetFrame(1 * u.deg, 1 * u.deg, origin=from_origin)
to_astroframe = SkyOffsetFrame(origin=to_origin)
target_pos = from_pos.transform_to(to_astroframe)
assert_allclose(
to_origin.separation(target_pos),
np.hypot(from_pos.lon, from_pos.lat),
atol=CONVERT_PRECISION,
)
roundtrip_pos = target_pos.transform_to(from_pos)
assert_allclose(
[roundtrip_pos.lon.wrap_at(180 * u.deg), roundtrip_pos.lat],
[1.0 * u.deg, 1.0 * u.deg],
atol=CONVERT_PRECISION,
)
@pytest.mark.parametrize(
"rotation, expectedlatlon",
[
(0 * u.deg, [0, 1] * u.deg),
(180 * u.deg, [0, -1] * u.deg),
(90 * u.deg, [-1, 0] * u.deg),
(-90 * u.deg, [1, 0] * u.deg),
],
)
def test_rotation(rotation, expectedlatlon):
target = ICRS(45 * u.deg, 46 * u.deg)
trans = target.transform_to(SkyOffsetFrame(origin=ICRS_45_45, rotation=rotation))
assert_allclose(
[trans.lon.wrap_at(180 * u.deg), trans.lat], expectedlatlon, atol=1e-10 * u.deg
)
@pytest.mark.parametrize(
"rotation, expectedlatlon",
[
(0 * u.deg, [0, 1] * u.deg),
(180 * u.deg, [0, -1] * u.deg),
(90 * u.deg, [-1, 0] * u.deg),
(-90 * u.deg, [1, 0] * u.deg),
],
)
def test_skycoord_skyoffset_frame_rotation(rotation, expectedlatlon):
"""Test if passing a rotation argument via SkyCoord works"""
target = SkyCoord(45 * u.deg, 46 * u.deg)
trans = target.transform_to(ICRS_45_45.skyoffset_frame(rotation=rotation))
assert_allclose(
[trans.lon.wrap_at(180 * u.deg), trans.lat], expectedlatlon, atol=1e-10 * u.deg
)
def test_skyoffset_names():
aframe1 = SkyOffsetFrame(origin=ICRS_45_45)
assert type(aframe1).__name__ == "SkyOffsetICRS"
aframe2 = SkyOffsetFrame(origin=Galactic(45 * u.deg, 45 * u.deg))
assert type(aframe2).__name__ == "SkyOffsetGalactic"
def test_skyoffset_origindata():
origin = ICRS()
with pytest.raises(ValueError):
SkyOffsetFrame(origin=origin)
@pytest.mark.parametrize("lon", (190, -10) * u.deg)
def test_skyoffset_lonwrap(lon):
sc = SkyCoord(lon=lon, lat=-45 * u.deg, frame=SkyOffsetFrame(origin=ICRS_45_45))
assert sc.lon < 180 * u.deg
assert sc.realize_frame(sc.represent_as("cartesian")).lon < 180 * u.deg
def test_skyoffset_velocity():
c = ICRS(**POSITION_ON_SKY, **PROPER_MOTION)
c_skyoffset = c.transform_to(SkyOffsetFrame(origin=c))
assert_allclose(c_skyoffset.pm_lon_coslat, c.pm_ra_cosdec)
assert_allclose(c_skyoffset.pm_lat, c.pm_dec)
@pytest.mark.parametrize(
"rotation, expectedpmlonlat",
[
(0 * u.deg, [1, 2] * u.mas / u.yr),
(45 * u.deg, [-(2**-0.5), 3 * 2**-0.5] * u.mas / u.yr),
(90 * u.deg, [-2, 1] * u.mas / u.yr),
(180 * u.deg, [-1, -2] * u.mas / u.yr),
(-90 * u.deg, [2, -1] * u.mas / u.yr),
],
)
def test_skyoffset_velocity_rotation(rotation, expectedpmlonlat):
sc = SkyCoord(
**POSITION_ON_SKY, pm_ra_cosdec=1 * u.mas / u.yr, pm_dec=2 * u.mas / u.yr
)
c_skyoffset0 = sc.transform_to(sc.skyoffset_frame(rotation=rotation))
assert_allclose(c_skyoffset0.pm_lon_coslat, expectedpmlonlat[0])
assert_allclose(c_skyoffset0.pm_lat, expectedpmlonlat[1])
def test_skyoffset_two_frames_interfering():
"""Regression test for gh-11277, where it turned out that the
origin argument validation from one SkyOffsetFrame could interfere
with that of another.
Note that this example brought out a different bug than that at the
top of gh-11277, viz., that an attempt was made to set origin on a SkyCoord
when it should just be stay as part of the SkyOffsetFrame.
"""
# Example adapted from @bmerry's minimal example at
# https://github.com/astropy/astropy/issues/11277#issuecomment-825492335
altaz_frame = AltAz(
obstime=Time("2020-04-22T13:00:00Z"), location=EarthLocation(18, -30)
)
target = SkyCoord(alt=70 * u.deg, az=150 * u.deg, frame=altaz_frame)
dirs_altaz_offset = SkyCoord(
lon=[-0.02, 0.01, 0.0, 0.0, 0.0] * u.rad,
lat=[0.0, 0.2, 0.0, -0.3, 0.1] * u.rad,
frame=target.skyoffset_frame(),
)
dirs_altaz = dirs_altaz_offset.transform_to(altaz_frame)
dirs_icrs = dirs_altaz.transform_to(ICRS())
target_icrs = target.transform_to(ICRS())
# The line below was almost guaranteed to fail.
dirs_icrs.transform_to(target_icrs.skyoffset_frame())
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.