hash
stringlengths 64
64
| content
stringlengths 0
1.51M
|
---|---|
e315de4ce9be4a1a98ffc1717946b6d49e32405e91494f93ae2cbb3bde276db5 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Handles the "FITS" unit format.
"""
import copy
import keyword
import operator
import numpy as np
from . import core, generic, utils
class Fits(generic.Generic):
"""
The FITS standard unit format.
This supports the format defined in the Units section of the `FITS
Standard <https://fits.gsfc.nasa.gov/fits_standard.html>`_.
"""
name = 'fits'
@staticmethod
def _generate_unit_names():
from astropy import units as u
names = {}
deprecated_names = set()
# Note about deprecated units: before v2.0, several units were treated
# as deprecated (G, barn, erg, Angstrom, angstrom). However, in the
# FITS 3.0 standard, these units are explicitly listed in the allowed
# units, but deprecated in the IAU Style Manual (McNally 1988). So
# after discussion (https://github.com/astropy/astropy/issues/2933),
# these units have been removed from the lists of deprecated units and
# bases.
bases = [
'm', 'g', 's', 'rad', 'sr', 'K', 'A', 'mol', 'cd',
'Hz', 'J', 'W', 'V', 'N', 'Pa', 'C', 'Ohm', 'S',
'F', 'Wb', 'T', 'H', 'lm', 'lx', 'a', 'yr', 'eV',
'pc', 'Jy', 'mag', 'R', 'bit', 'byte', 'G', 'barn'
]
deprecated_bases = []
prefixes = [
'y', 'z', 'a', 'f', 'p', 'n', 'u', 'm', 'c', 'd',
'', 'da', 'h', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y']
special_cases = {'dbyte': u.Unit('dbyte', 0.1*u.byte)}
for base in bases + deprecated_bases:
for prefix in prefixes:
key = prefix + base
if keyword.iskeyword(key):
continue
elif key in special_cases:
names[key] = special_cases[key]
else:
names[key] = getattr(u, key)
for base in deprecated_bases:
for prefix in prefixes:
deprecated_names.add(prefix + base)
simple_units = [
'deg', 'arcmin', 'arcsec', 'mas', 'min', 'h', 'd', 'Ry',
'solMass', 'u', 'solLum', 'solRad', 'AU', 'lyr', 'count',
'ct', 'photon', 'ph', 'pixel', 'pix', 'D', 'Sun', 'chan',
'bin', 'voxel', 'adu', 'beam', 'erg', 'Angstrom', 'angstrom'
]
deprecated_units = []
for unit in simple_units + deprecated_units:
names[unit] = getattr(u, unit)
for unit in deprecated_units:
deprecated_names.add(unit)
return names, deprecated_names, []
@classmethod
def _validate_unit(cls, unit, detailed_exception=True):
if unit not in cls._units:
if detailed_exception:
raise ValueError(
"Unit '{}' not supported by the FITS standard. {}".format(
unit, utils.did_you_mean_units(
unit, cls._units, cls._deprecated_units,
cls._to_decomposed_alternative)))
else:
raise ValueError()
if unit in cls._deprecated_units:
utils.unit_deprecation_warning(
unit, cls._units[unit], 'FITS',
cls._to_decomposed_alternative)
@classmethod
def _parse_unit(cls, unit, detailed_exception=True):
cls._validate_unit(unit, detailed_exception=detailed_exception)
return cls._units[unit]
@classmethod
def _get_unit_name(cls, unit):
name = unit.get_format_name('fits')
cls._validate_unit(name)
return name
@classmethod
def to_string(cls, unit):
# Remove units that aren't known to the format
unit = utils.decompose_to_known_units(unit, cls._get_unit_name)
parts = []
if isinstance(unit, core.CompositeUnit):
base = np.log10(unit.scale)
if base % 1.0 != 0.0:
raise core.UnitScaleError(
"The FITS unit format is not able to represent scales "
"that are not powers of 10. Multiply your data by "
"{:e}.".format(unit.scale))
elif unit.scale != 1.0:
parts.append(f'10**{int(base)}')
pairs = list(zip(unit.bases, unit.powers))
if len(pairs):
pairs.sort(key=operator.itemgetter(1), reverse=True)
parts.append(cls._format_unit_list(pairs))
s = ' '.join(parts)
elif isinstance(unit, core.NamedUnit):
s = cls._get_unit_name(unit)
return s
@classmethod
def _to_decomposed_alternative(cls, unit):
try:
s = cls.to_string(unit)
except core.UnitScaleError:
scale = unit.scale
unit = copy.copy(unit)
unit._scale = 1.0
return f'{cls.to_string(unit)} (with data multiplied by {scale})'
return s
@classmethod
def parse(cls, s, debug=False):
result = super().parse(s, debug)
if hasattr(result, 'function_unit'):
raise ValueError("Function units are not yet supported for "
"FITS units.")
return result
|
e3e58728c8c3f4b01b6101e344a55350629be010798fa5237dbf8f602a555122 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# The idea for this module (but no code) was borrowed from the
# quantities (http://pythonhosted.org/quantities/) package.
"""Helper functions for Quantity.
In particular, this implements the logic that determines scaling and result
units for a given ufunc, given input units.
"""
from fractions import Fraction
import numpy as np
from astropy.units.core import (
UnitConversionError, UnitsError, UnitTypeError, dimensionless_unscaled,
get_current_unit_registry, unit_scale_converter)
from . import UFUNC_HELPERS, UNSUPPORTED_UFUNCS
def _d(unit):
if unit is None:
return dimensionless_unscaled
else:
return unit
def get_converter(from_unit, to_unit):
"""Like Unit._get_converter, except returns None if no scaling is needed,
i.e., if the inferred scale is unity."""
converter = from_unit._get_converter(to_unit)
return None if converter is unit_scale_converter else converter
def get_converters_and_unit(f, unit1, unit2):
converters = [None, None]
# By default, we try adjusting unit2 to unit1, so that the result will
# be unit1 as well. But if there is no second unit, we have to try
# adjusting unit1 (to dimensionless, see below).
if unit2 is None:
if unit1 is None:
# No units for any input -- e.g., np.add(a1, a2, out=q)
return converters, dimensionless_unscaled
changeable = 0
# swap units.
unit2 = unit1
unit1 = None
elif unit2 is unit1:
# ensure identical units is fast ("==" is slow, so avoid that).
return converters, unit1
else:
changeable = 1
# Try to get a converter from unit2 to unit1.
if unit1 is None:
try:
converters[changeable] = get_converter(unit2,
dimensionless_unscaled)
except UnitsError:
# special case: would be OK if unitless number is zero, inf, nan
converters[1-changeable] = False
return converters, unit2
else:
return converters, dimensionless_unscaled
else:
try:
converters[changeable] = get_converter(unit2, unit1)
except UnitsError:
raise UnitConversionError(
"Can only apply '{}' function to quantities "
"with compatible dimensions"
.format(f.__name__))
return converters, unit1
# SINGLE ARGUMENT UFUNC HELPERS
#
# The functions below take a single argument, which is the quantity upon which
# the ufunc is being used. The output of the helper function should be two
# values: a list with a single converter to be used to scale the input before
# it is being passed to the ufunc (or None if no conversion is needed), and
# the unit the output will be in.
def helper_onearg_test(f, unit):
return ([None], None)
def helper_invariant(f, unit):
return ([None], _d(unit))
def helper_square(f, unit):
return ([None], unit ** 2 if unit is not None else dimensionless_unscaled)
def helper_reciprocal(f, unit):
return ([None], unit ** -1 if unit is not None else dimensionless_unscaled)
one_half = 0.5 # faster than Fraction(1, 2)
one_third = Fraction(1, 3)
def helper_sqrt(f, unit):
return ([None], unit ** one_half if unit is not None
else dimensionless_unscaled)
def helper_cbrt(f, unit):
return ([None], (unit ** one_third if unit is not None
else dimensionless_unscaled))
def helper_modf(f, unit):
if unit is None:
return [None], (dimensionless_unscaled, dimensionless_unscaled)
try:
return ([get_converter(unit, dimensionless_unscaled)],
(dimensionless_unscaled, dimensionless_unscaled))
except UnitsError:
raise UnitTypeError("Can only apply '{}' function to "
"dimensionless quantities"
.format(f.__name__))
def helper__ones_like(f, unit):
return [None], dimensionless_unscaled
def helper_dimensionless_to_dimensionless(f, unit):
if unit is None:
return [None], dimensionless_unscaled
try:
return ([get_converter(unit, dimensionless_unscaled)],
dimensionless_unscaled)
except UnitsError:
raise UnitTypeError("Can only apply '{}' function to "
"dimensionless quantities"
.format(f.__name__))
def helper_dimensionless_to_radian(f, unit):
from astropy.units.si import radian
if unit is None:
return [None], radian
try:
return [get_converter(unit, dimensionless_unscaled)], radian
except UnitsError:
raise UnitTypeError("Can only apply '{}' function to "
"dimensionless quantities"
.format(f.__name__))
def helper_degree_to_radian(f, unit):
from astropy.units.si import degree, radian
try:
return [get_converter(unit, degree)], radian
except UnitsError:
raise UnitTypeError("Can only apply '{}' function to "
"quantities with angle units"
.format(f.__name__))
def helper_radian_to_degree(f, unit):
from astropy.units.si import degree, radian
try:
return [get_converter(unit, radian)], degree
except UnitsError:
raise UnitTypeError("Can only apply '{}' function to "
"quantities with angle units"
.format(f.__name__))
def helper_radian_to_dimensionless(f, unit):
from astropy.units.si import radian
try:
return [get_converter(unit, radian)], dimensionless_unscaled
except UnitsError:
raise UnitTypeError("Can only apply '{}' function to "
"quantities with angle units"
.format(f.__name__))
def helper_frexp(f, unit):
if not unit.is_unity():
raise UnitTypeError("Can only apply '{}' function to "
"unscaled dimensionless quantities"
.format(f.__name__))
return [None], (None, None)
# TWO ARGUMENT UFUNC HELPERS
#
# The functions below take a two arguments. The output of the helper function
# should be two values: a tuple of two converters to be used to scale the
# inputs before being passed to the ufunc (None if no conversion is needed),
# and the unit the output will be in.
def helper_multiplication(f, unit1, unit2):
return [None, None], _d(unit1) * _d(unit2)
def helper_division(f, unit1, unit2):
return [None, None], _d(unit1) / _d(unit2)
def helper_power(f, unit1, unit2):
# TODO: find a better way to do this, currently need to signal that one
# still needs to raise power of unit1 in main code
if unit2 is None:
return [None, None], False
try:
return [None, get_converter(unit2, dimensionless_unscaled)], False
except UnitsError:
raise UnitTypeError("Can only raise something to a "
"dimensionless quantity")
def helper_ldexp(f, unit1, unit2):
if unit2 is not None:
raise TypeError("Cannot use ldexp with a quantity "
"as second argument.")
else:
return [None, None], _d(unit1)
def helper_copysign(f, unit1, unit2):
# if first arg is not a quantity, just return plain array
if unit1 is None:
return [None, None], None
else:
return [None, None], unit1
def helper_heaviside(f, unit1, unit2):
try:
converter2 = (get_converter(unit2, dimensionless_unscaled)
if unit2 is not None else None)
except UnitsError:
raise UnitTypeError("Can only apply 'heaviside' function with a "
"dimensionless second argument.")
return ([None, converter2], dimensionless_unscaled)
def helper_two_arg_dimensionless(f, unit1, unit2):
try:
converter1 = (get_converter(unit1, dimensionless_unscaled)
if unit1 is not None else None)
converter2 = (get_converter(unit2, dimensionless_unscaled)
if unit2 is not None else None)
except UnitsError:
raise UnitTypeError("Can only apply '{}' function to "
"dimensionless quantities"
.format(f.__name__))
return ([converter1, converter2], dimensionless_unscaled)
# This used to be a separate function that just called get_converters_and_unit.
# Using it directly saves a few us; keeping the clearer name.
helper_twoarg_invariant = get_converters_and_unit
def helper_twoarg_comparison(f, unit1, unit2):
converters, _ = get_converters_and_unit(f, unit1, unit2)
return converters, None
def helper_twoarg_invtrig(f, unit1, unit2):
from astropy.units.si import radian
converters, _ = get_converters_and_unit(f, unit1, unit2)
return converters, radian
def helper_twoarg_floor_divide(f, unit1, unit2):
converters, _ = get_converters_and_unit(f, unit1, unit2)
return converters, dimensionless_unscaled
def helper_divmod(f, unit1, unit2):
converters, result_unit = get_converters_and_unit(f, unit1, unit2)
return converters, (dimensionless_unscaled, result_unit)
def helper_clip(f, unit1, unit2, unit3):
# Treat the array being clipped as primary.
converters = [None]
if unit1 is None:
result_unit = dimensionless_unscaled
try:
converters += [(None if unit is None else
get_converter(unit, dimensionless_unscaled))
for unit in (unit2, unit3)]
except UnitsError:
raise UnitConversionError(
"Can only apply '{}' function to quantities with "
"compatible dimensions".format(f.__name__))
else:
result_unit = unit1
for unit in unit2, unit3:
try:
converter = get_converter(_d(unit), result_unit)
except UnitsError:
if unit is None:
# special case: OK if unitless number is zero, inf, nan
converters.append(False)
else:
raise UnitConversionError(
"Can only apply '{}' function to quantities with "
"compatible dimensions".format(f.__name__))
else:
converters.append(converter)
return converters, result_unit
# list of ufuncs:
# https://numpy.org/doc/stable/reference/ufuncs.html#available-ufuncs
UNSUPPORTED_UFUNCS |= {
np.bitwise_and, np.bitwise_or, np.bitwise_xor, np.invert, np.left_shift,
np.right_shift, np.logical_and, np.logical_or, np.logical_xor,
np.logical_not, np.isnat, np.gcd, np.lcm}
# SINGLE ARGUMENT UFUNCS
# ufuncs that do not care about the unit and do not return a Quantity
# (but rather a boolean, or -1, 0, or +1 for np.sign).
onearg_test_ufuncs = (np.isfinite, np.isinf, np.isnan, np.sign, np.signbit)
for ufunc in onearg_test_ufuncs:
UFUNC_HELPERS[ufunc] = helper_onearg_test
# ufuncs that return a value with the same unit as the input
invariant_ufuncs = (np.absolute, np.fabs, np.conj, np.conjugate, np.negative,
np.spacing, np.rint, np.floor, np.ceil, np.trunc,
np.positive)
for ufunc in invariant_ufuncs:
UFUNC_HELPERS[ufunc] = helper_invariant
# ufuncs that require dimensionless input and and give dimensionless output
dimensionless_to_dimensionless_ufuncs = (np.exp, np.expm1, np.exp2, np.log,
np.log10, np.log2, np.log1p)
# Default numpy does not ship an "erf" ufunc, but some versions hacked by
# intel do. This is bad, since it means code written for that numpy will
# not run on non-hacked numpy. But still, we might as well support it.
if isinstance(getattr(np.core.umath, 'erf', None), np.ufunc):
dimensionless_to_dimensionless_ufuncs += (np.core.umath.erf,)
for ufunc in dimensionless_to_dimensionless_ufuncs:
UFUNC_HELPERS[ufunc] = helper_dimensionless_to_dimensionless
# ufuncs that require dimensionless input and give output in radians
dimensionless_to_radian_ufuncs = (np.arccos, np.arcsin, np.arctan, np.arccosh,
np.arcsinh, np.arctanh)
for ufunc in dimensionless_to_radian_ufuncs:
UFUNC_HELPERS[ufunc] = helper_dimensionless_to_radian
# ufuncs that require input in degrees and give output in radians
degree_to_radian_ufuncs = (np.radians, np.deg2rad)
for ufunc in degree_to_radian_ufuncs:
UFUNC_HELPERS[ufunc] = helper_degree_to_radian
# ufuncs that require input in radians and give output in degrees
radian_to_degree_ufuncs = (np.degrees, np.rad2deg)
for ufunc in radian_to_degree_ufuncs:
UFUNC_HELPERS[ufunc] = helper_radian_to_degree
# ufuncs that require input in radians and give dimensionless output
radian_to_dimensionless_ufuncs = (np.cos, np.sin, np.tan, np.cosh, np.sinh,
np.tanh)
for ufunc in radian_to_dimensionless_ufuncs:
UFUNC_HELPERS[ufunc] = helper_radian_to_dimensionless
# ufuncs handled as special cases
UFUNC_HELPERS[np.sqrt] = helper_sqrt
UFUNC_HELPERS[np.square] = helper_square
UFUNC_HELPERS[np.reciprocal] = helper_reciprocal
UFUNC_HELPERS[np.cbrt] = helper_cbrt
UFUNC_HELPERS[np.core.umath._ones_like] = helper__ones_like
UFUNC_HELPERS[np.modf] = helper_modf
UFUNC_HELPERS[np.frexp] = helper_frexp
# TWO ARGUMENT UFUNCS
# two argument ufuncs that require dimensionless input and and give
# dimensionless output
two_arg_dimensionless_ufuncs = (np.logaddexp, np.logaddexp2)
for ufunc in two_arg_dimensionless_ufuncs:
UFUNC_HELPERS[ufunc] = helper_two_arg_dimensionless
# two argument ufuncs that return a value with the same unit as the input
twoarg_invariant_ufuncs = (np.add, np.subtract, np.hypot, np.maximum,
np.minimum, np.fmin, np.fmax, np.nextafter,
np.remainder, np.mod, np.fmod)
for ufunc in twoarg_invariant_ufuncs:
UFUNC_HELPERS[ufunc] = helper_twoarg_invariant
# two argument ufuncs that need compatible inputs and return a boolean
twoarg_comparison_ufuncs = (np.greater, np.greater_equal, np.less,
np.less_equal, np.not_equal, np.equal)
for ufunc in twoarg_comparison_ufuncs:
UFUNC_HELPERS[ufunc] = helper_twoarg_comparison
# two argument ufuncs that do inverse trigonometry
twoarg_invtrig_ufuncs = (np.arctan2,)
# another private function in numpy; use getattr in case it disappears
if isinstance(getattr(np.core.umath, '_arg', None), np.ufunc):
twoarg_invtrig_ufuncs += (np.core.umath._arg,)
for ufunc in twoarg_invtrig_ufuncs:
UFUNC_HELPERS[ufunc] = helper_twoarg_invtrig
# ufuncs handled as special cases
UFUNC_HELPERS[np.multiply] = helper_multiplication
if isinstance(getattr(np, 'matmul', None), np.ufunc):
UFUNC_HELPERS[np.matmul] = helper_multiplication
UFUNC_HELPERS[np.divide] = helper_division
UFUNC_HELPERS[np.true_divide] = helper_division
UFUNC_HELPERS[np.power] = helper_power
UFUNC_HELPERS[np.ldexp] = helper_ldexp
UFUNC_HELPERS[np.copysign] = helper_copysign
UFUNC_HELPERS[np.floor_divide] = helper_twoarg_floor_divide
UFUNC_HELPERS[np.heaviside] = helper_heaviside
UFUNC_HELPERS[np.float_power] = helper_power
UFUNC_HELPERS[np.divmod] = helper_divmod
# Check for clip ufunc; note that np.clip is a wrapper function, not the ufunc.
if isinstance(getattr(np.core.umath, 'clip', None), np.ufunc):
UFUNC_HELPERS[np.core.umath.clip] = helper_clip
del ufunc
|
dbbee6c4f882ec9153739ca4957fd506ffed83e064d6387b4641666fadc3edc5 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Converters for Quantity."""
import threading
import numpy as np
from astropy.units.core import (
UnitConversionError, UnitsError, UnitTypeError, dimensionless_unscaled)
__all__ = ['can_have_arbitrary_unit', 'converters_and_unit',
'check_output', 'UFUNC_HELPERS', 'UNSUPPORTED_UFUNCS']
class UfuncHelpers(dict):
"""Registry of unit conversion functions to help ufunc evaluation.
Based on dict for quick access, but with a missing method to load
helpers for additional modules such as scipy.special and erfa.
Such modules should be registered using ``register_module``.
"""
def __init__(self, *args, **kwargs):
self.modules = {}
self.UNSUPPORTED = set() # Upper-case for backwards compatibility
self._lock = threading.RLock()
super().__init__(*args, **kwargs)
def register_module(self, module, names, importer):
"""Register (but do not import) a set of ufunc helpers.
Parameters
----------
module : str
Name of the module with the ufuncs (e.g., 'scipy.special').
names : iterable of str
Names of the module ufuncs for which helpers are available.
importer : callable
Function that imports the ufuncs and returns a dict of helpers
keyed by those ufuncs. If the value is `None`, the ufunc is
explicitly *not* supported.
"""
with self._lock:
self.modules[module] = {'names': names,
'importer': importer}
def import_module(self, module):
"""Import the helpers from the given module using its helper function.
Parameters
----------
module : str
Name of the module. Has to have been registered beforehand.
"""
with self._lock:
module_info = self.modules.pop(module)
self.update(module_info['importer']())
def __missing__(self, ufunc):
"""Called if a ufunc is not found.
Check if the ufunc is in any of the available modules, and, if so,
import the helpers for that module.
"""
with self._lock:
# Check if it was loaded while we waited for the lock
if ufunc in self:
return self[ufunc]
if ufunc in self.UNSUPPORTED:
raise TypeError(f"Cannot use ufunc '{ufunc.__name__}' with quantities")
for module, module_info in list(self.modules.items()):
if ufunc.__name__ in module_info['names']:
# A ufunc with the same name is supported by this module.
# Of course, this doesn't necessarily mean it is the
# right module. So, we try let the importer do its work.
# If it fails (e.g., for `scipy.special`), then that's
# fine, just raise the TypeError. If it succeeds, but
# the ufunc is not found, that is also fine: we will
# enter __missing__ again and either find another
# module or get the TypeError there.
try:
self.import_module(module)
except ImportError: # pragma: no cover
pass
else:
return self[ufunc]
raise TypeError("unknown ufunc {}. If you believe this ufunc "
"should be supported, please raise an issue on "
"https://github.com/astropy/astropy"
.format(ufunc.__name__))
def __setitem__(self, key, value):
# Implementation note: in principle, we could just let `None`
# mean that something is not implemented, but this means an
# extra if clause for the output, slowing down the common
# path where a ufunc is supported.
with self._lock:
if value is None:
self.UNSUPPORTED |= {key}
self.pop(key, None)
else:
super().__setitem__(key, value)
self.UNSUPPORTED -= {key}
UFUNC_HELPERS = UfuncHelpers()
UNSUPPORTED_UFUNCS = UFUNC_HELPERS.UNSUPPORTED
def can_have_arbitrary_unit(value):
"""Test whether the items in value can have arbitrary units
Numbers whose value does not change upon a unit change, i.e.,
zero, infinity, or not-a-number
Parameters
----------
value : number or array
Returns
-------
bool
`True` if each member is either zero or not finite, `False` otherwise
"""
return np.all(np.logical_or(np.equal(value, 0.), ~np.isfinite(value)))
def converters_and_unit(function, method, *args):
"""Determine the required converters and the unit of the ufunc result.
Converters are functions required to convert to a ufunc's expected unit,
e.g., radian for np.sin; or to ensure units of two inputs are consistent,
e.g., for np.add. In these examples, the unit of the result would be
dimensionless_unscaled for np.sin, and the same consistent unit for np.add.
Parameters
----------
function : `~numpy.ufunc`
Numpy universal function
method : str
Method with which the function is evaluated, e.g.,
'__call__', 'reduce', etc.
*args : `~astropy.units.Quantity` or ndarray subclass
Input arguments to the function
Raises
------
TypeError : when the specified function cannot be used with Quantities
(e.g., np.logical_or), or when the routine does not know how to handle
the specified function (in which case an issue should be raised on
https://github.com/astropy/astropy).
UnitTypeError : when the conversion to the required (or consistent) units
is not possible.
"""
# Check whether we support this ufunc, by getting the helper function
# (defined in helpers) which returns a list of function(s) that convert the
# input(s) to the unit required for the ufunc, as well as the unit the
# result will have (a tuple of units if there are multiple outputs).
ufunc_helper = UFUNC_HELPERS[function]
if method == '__call__' or (method == 'outer' and function.nin == 2):
# Find out the units of the arguments passed to the ufunc; usually,
# at least one is a quantity, but for two-argument ufuncs, the second
# could also be a Numpy array, etc. These are given unit=None.
units = [getattr(arg, 'unit', None) for arg in args]
# Determine possible conversion functions, and the result unit.
converters, result_unit = ufunc_helper(function, *units)
if any(converter is False for converter in converters):
# for multi-argument ufuncs with a quantity and a non-quantity,
# the quantity normally needs to be dimensionless, *except*
# if the non-quantity can have arbitrary unit, i.e., when it
# is all zero, infinity or NaN. In that case, the non-quantity
# can just have the unit of the quantity
# (this allows, e.g., `q > 0.` independent of unit)
try:
# Don't fold this loop in the test above: this rare case
# should not make the common case slower.
for i, converter in enumerate(converters):
if converter is not False:
continue
if can_have_arbitrary_unit(args[i]):
converters[i] = None
else:
raise UnitConversionError(
"Can only apply '{}' function to "
"dimensionless quantities when other "
"argument is not a quantity (unless the "
"latter is all zero/infinity/nan)"
.format(function.__name__))
except TypeError:
# _can_have_arbitrary_unit failed: arg could not be compared
# with zero or checked to be finite. Then, ufunc will fail too.
raise TypeError("Unsupported operand type(s) for ufunc {}: "
"'{}'".format(function.__name__,
','.join([arg.__class__.__name__
for arg in args])))
# In the case of np.power and np.float_power, the unit itself needs to
# be modified by an amount that depends on one of the input values,
# so we need to treat this as a special case.
# TODO: find a better way to deal with this.
if result_unit is False:
if units[0] is None or units[0] == dimensionless_unscaled:
result_unit = dimensionless_unscaled
else:
if units[1] is None:
p = args[1]
else:
p = args[1].to(dimensionless_unscaled).value
try:
result_unit = units[0] ** p
except ValueError as exc:
# Changing the unit does not work for, e.g., array-shaped
# power, but this is OK if we're (scaled) dimensionless.
try:
converters[0] = units[0]._get_converter(
dimensionless_unscaled)
except UnitConversionError:
raise exc
else:
result_unit = dimensionless_unscaled
else: # methods for which the unit should stay the same
nin = function.nin
unit = getattr(args[0], 'unit', None)
if method == 'at' and nin <= 2:
if nin == 1:
units = [unit]
else:
units = [unit, getattr(args[2], 'unit', None)]
converters, result_unit = ufunc_helper(function, *units)
# ensure there is no 'converter' for indices (2nd argument)
converters.insert(1, None)
elif method in {'reduce', 'accumulate', 'reduceat'} and nin == 2:
converters, result_unit = ufunc_helper(function, unit, unit)
converters = converters[:1]
if method == 'reduceat':
# add 'scale' for indices (2nd argument)
converters += [None]
else:
if method in {'reduce', 'accumulate',
'reduceat', 'outer'} and nin != 2:
raise ValueError(f"{method} only supported for binary functions")
raise TypeError("Unexpected ufunc method {}. If this should "
"work, please raise an issue on"
"https://github.com/astropy/astropy"
.format(method))
# for all but __call__ method, scaling is not allowed
if unit is not None and result_unit is None:
raise TypeError("Cannot use '{1}' method on ufunc {0} with a "
"Quantity instance as the result is not a "
"Quantity.".format(function.__name__, method))
if (converters[0] is not None or
(unit is not None and unit is not result_unit and
(not result_unit.is_equivalent(unit) or
result_unit.to(unit) != 1.))):
# NOTE: this cannot be the more logical UnitTypeError, since
# then things like np.cumprod will not longer fail (they check
# for TypeError).
raise UnitsError("Cannot use '{1}' method on ufunc {0} with a "
"Quantity instance as it would change the unit."
.format(function.__name__, method))
return converters, result_unit
def check_output(output, unit, inputs, function=None):
"""Check that function output can be stored in the output array given.
Parameters
----------
output : array or `~astropy.units.Quantity` or tuple
Array that should hold the function output (or tuple of such arrays).
unit : `~astropy.units.Unit` or None, or tuple
Unit that the output will have, or `None` for pure numbers (should be
tuple of same if output is a tuple of outputs).
inputs : tuple
Any input arguments. These should be castable to the output.
function : callable
The function that will be producing the output. If given, used to
give a more informative error message.
Returns
-------
arrays : ndarray view or tuple thereof
The view(s) is of ``output``.
Raises
------
UnitTypeError : If ``unit`` is inconsistent with the class of ``output``
TypeError : If the ``inputs`` cannot be cast safely to ``output``.
"""
if isinstance(output, tuple):
return tuple(check_output(output_, unit_, inputs, function)
for output_, unit_ in zip(output, unit))
# ``None`` indicates no actual array is needed. This can happen, e.g.,
# with np.modf(a, out=(None, b)).
if output is None:
return None
if hasattr(output, '__quantity_subclass__'):
# Check that we're not trying to store a plain Numpy array or a
# Quantity with an inconsistent unit (e.g., not angular for Angle).
if unit is None:
raise TypeError("Cannot store non-quantity output{} in {} "
"instance".format(
(f" from {function.__name__} function"
if function is not None else ""),
type(output)))
q_cls, subok = output.__quantity_subclass__(unit)
if not (subok or q_cls is type(output)):
raise UnitTypeError(
"Cannot store output with unit '{}'{} "
"in {} instance. Use {} instance instead."
.format(unit, (f" from {function.__name__} function"
if function is not None else ""),
type(output), q_cls))
# check we can handle the dtype (e.g., that we are not int
# when float is required). Note that we only do this for Quantity
# output; for array output, we defer to numpy's default handling.
# Also, any structured dtype are ignored (likely erfa ufuncs).
# TODO: make more logical; is this necessary at all?
if inputs and not output.dtype.names:
result_type = np.result_type(*inputs)
if not (result_type.names
or np.can_cast(result_type, output.dtype,
casting='same_kind')):
raise TypeError("Arguments cannot be cast safely to inplace "
"output with dtype={}".format(output.dtype))
# Turn into ndarray, so we do not loop into array_wrap/array_ufunc
# if the output is used to store results of a function.
return output.view(np.ndarray)
else:
# output is not a Quantity, so cannot obtain a unit.
if not (unit is None or unit is dimensionless_unscaled):
raise UnitTypeError("Cannot store quantity with dimension "
"{}in a non-Quantity instance."
.format("" if function is None else
"resulting from {} function "
.format(function.__name__)))
return output
|
5f2ce3ec27fe23b15507b46cb5c5c3c49e8932ff30fafdf963603c1474246059 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Quantity helpers for the ERFA ufuncs."""
# Tests for these are in coordinates, not in units.
from erfa import dt_eraASTROM, dt_eraLDBODY, dt_pv
from erfa import ufunc as erfa_ufunc
from astropy.units.core import UnitsError, UnitTypeError, dimensionless_unscaled
from astropy.units.structured import StructuredUnit
from . import UFUNC_HELPERS
from .helpers import (
_d, get_converter, helper_invariant, helper_multiplication, helper_twoarg_invariant)
erfa_ufuncs = ('s2c', 's2p', 'c2s', 'p2s', 'pm', 'pdp', 'pxp', 'rxp',
'cpv', 'p2pv', 'pv2p', 'pv2s', 'pvdpv', 'pvm', 'pvmpv', 'pvppv',
'pvstar', 'pvtob', 'pvu', 'pvup', 'pvxpv', 'rxpv', 's2pv', 's2xpv',
'starpv', 'sxpv', 'trxpv', 'gd2gc', 'gc2gd', 'ldn', 'aper',
'apio', 'atciq', 'atciqn', 'atciqz', 'aticq', 'atioq', 'atoiq')
def has_matching_structure(unit, dtype):
dtype_fields = dtype.fields
if dtype_fields:
return (isinstance(unit, StructuredUnit)
and len(unit) == len(dtype_fields)
and all(has_matching_structure(u, df_v[0])
for (u, df_v) in zip(unit.values(), dtype_fields.values())))
else:
return not isinstance(unit, StructuredUnit)
def check_structured_unit(unit, dtype):
if not has_matching_structure(unit, dtype):
msg = {dt_pv: 'pv',
dt_eraLDBODY: 'ldbody',
dt_eraASTROM: 'astrom'}.get(dtype, 'function')
raise UnitTypeError(f'{msg} input needs unit matching dtype={dtype}.')
def helper_s2c(f, unit1, unit2):
from astropy.units.si import radian
try:
return [get_converter(unit1, radian),
get_converter(unit2, radian)], dimensionless_unscaled
except UnitsError:
raise UnitTypeError("Can only apply '{}' function to "
"quantities with angle units"
.format(f.__name__))
def helper_s2p(f, unit1, unit2, unit3):
from astropy.units.si import radian
try:
return [get_converter(unit1, radian),
get_converter(unit2, radian), None], unit3
except UnitsError:
raise UnitTypeError("Can only apply '{}' function to "
"quantities with angle units"
.format(f.__name__))
def helper_c2s(f, unit1):
from astropy.units.si import radian
return [None], (radian, radian)
def helper_p2s(f, unit1):
from astropy.units.si import radian
return [None], (radian, radian, unit1)
def helper_gc2gd(f, nounit, unit1):
from astropy.units.si import m, radian
if nounit is not None:
raise UnitTypeError("ellipsoid cannot be a quantity.")
try:
return [None, get_converter(unit1, m)], (radian, radian, m, None)
except UnitsError:
raise UnitTypeError("Can only apply '{}' function to "
"quantities with length units"
.format(f.__name__))
def helper_gd2gc(f, nounit, unit1, unit2, unit3):
from astropy.units.si import m, radian
if nounit is not None:
raise UnitTypeError("ellipsoid cannot be a quantity.")
try:
return [None,
get_converter(unit1, radian),
get_converter(unit2, radian),
get_converter(unit3, m)], (m, None)
except UnitsError:
raise UnitTypeError("Can only apply '{}' function to lon, lat "
"with angle and height with length units"
.format(f.__name__))
def helper_p2pv(f, unit1):
from astropy.units.si import s
if isinstance(unit1, StructuredUnit):
raise UnitTypeError("p vector unit cannot be a structured unit.")
return [None], StructuredUnit((unit1, unit1 / s))
def helper_pv2p(f, unit1):
check_structured_unit(unit1, dt_pv)
return [None], unit1[0]
def helper_pv2s(f, unit_pv):
from astropy.units.si import radian
check_structured_unit(unit_pv, dt_pv)
ang_unit = radian * unit_pv[1] / unit_pv[0]
return [None], (radian, radian, unit_pv[0], ang_unit, ang_unit, unit_pv[1])
def helper_s2pv(f, unit_theta, unit_phi, unit_r, unit_td, unit_pd, unit_rd):
from astropy.units.si import radian
time_unit = unit_r / unit_rd
return [get_converter(unit_theta, radian),
get_converter(unit_phi, radian),
None,
get_converter(unit_td, radian / time_unit),
get_converter(unit_pd, radian / time_unit),
None], StructuredUnit((unit_r, unit_rd))
def helper_pv_multiplication(f, unit1, unit2):
check_structured_unit(unit1, dt_pv)
check_structured_unit(unit2, dt_pv)
result_unit = StructuredUnit((unit1[0] * unit2[0], unit1[1] * unit2[0]))
converter = get_converter(unit2, StructuredUnit(
(unit2[0], unit1[1] * unit2[0] / unit1[0])))
return [None, converter], result_unit
def helper_pvm(f, unit1):
check_structured_unit(unit1, dt_pv)
return [None], (unit1[0], unit1[1])
def helper_pvstar(f, unit1):
from astropy.units.astrophys import AU
from astropy.units.si import arcsec, day, km, radian, s, year
return [get_converter(unit1, StructuredUnit((AU, AU/day)))], (
radian, radian, radian / year, radian / year, arcsec, km / s, None)
def helper_starpv(f, unit_ra, unit_dec, unit_pmr, unit_pmd,
unit_px, unit_rv):
from astropy.units.astrophys import AU
from astropy.units.si import arcsec, day, km, radian, s, year
return [get_converter(unit_ra, radian),
get_converter(unit_dec, radian),
get_converter(unit_pmr, radian/year),
get_converter(unit_pmd, radian/year),
get_converter(unit_px, arcsec),
get_converter(unit_rv, km/s)], (StructuredUnit((AU, AU/day)), None)
def helper_pvtob(f, unit_elong, unit_phi, unit_hm,
unit_xp, unit_yp, unit_sp, unit_theta):
from astropy.units.si import m, radian, s
return [get_converter(unit_elong, radian),
get_converter(unit_phi, radian),
get_converter(unit_hm, m),
get_converter(unit_xp, radian),
get_converter(unit_yp, radian),
get_converter(unit_sp, radian),
get_converter(unit_theta, radian)], StructuredUnit((m, m/s))
def helper_pvu(f, unit_t, unit_pv):
check_structured_unit(unit_pv, dt_pv)
return [get_converter(unit_t, unit_pv[0]/unit_pv[1]), None], unit_pv
def helper_pvup(f, unit_t, unit_pv):
check_structured_unit(unit_pv, dt_pv)
return [get_converter(unit_t, unit_pv[0]/unit_pv[1]), None], unit_pv[0]
def helper_s2xpv(f, unit1, unit2, unit_pv):
check_structured_unit(unit_pv, dt_pv)
return [None, None, None], StructuredUnit((_d(unit1) * unit_pv[0],
_d(unit2) * unit_pv[1]))
def ldbody_unit():
from astropy.units.astrophys import AU, Msun
from astropy.units.si import day, radian
return StructuredUnit((Msun, radian, (AU, AU/day)),
erfa_ufunc.dt_eraLDBODY)
def astrom_unit():
from astropy.units.astrophys import AU
from astropy.units.si import rad, year
one = rel2c = dimensionless_unscaled
return StructuredUnit((year, AU, one, AU, rel2c, one, one, rad, rad, rad, rad,
one, one, rel2c, rad, rad, rad),
erfa_ufunc.dt_eraASTROM)
def helper_ldn(f, unit_b, unit_ob, unit_sc):
from astropy.units.astrophys import AU
return [get_converter(unit_b, ldbody_unit()),
get_converter(unit_ob, AU),
get_converter(_d(unit_sc), dimensionless_unscaled)], dimensionless_unscaled
def helper_aper(f, unit_theta, unit_astrom):
check_structured_unit(unit_astrom, dt_eraASTROM)
unit_along = unit_astrom[7] # along
if unit_astrom[14] is unit_along: # eral
result_unit = unit_astrom
else:
result_units = tuple((unit_along if i == 14 else v)
for i, v in enumerate(unit_astrom.values()))
result_unit = unit_astrom.__class__(result_units, names=unit_astrom)
return [get_converter(unit_theta, unit_along), None], result_unit
def helper_apio(f, unit_sp, unit_theta, unit_elong, unit_phi, unit_hm,
unit_xp, unit_yp, unit_refa, unit_refb):
from astropy.units.si import m, radian
return [get_converter(unit_sp, radian),
get_converter(unit_theta, radian),
get_converter(unit_elong, radian),
get_converter(unit_phi, radian),
get_converter(unit_hm, m),
get_converter(unit_xp, radian),
get_converter(unit_xp, radian),
get_converter(unit_xp, radian),
get_converter(unit_xp, radian)], astrom_unit()
def helper_atciq(f, unit_rc, unit_dc, unit_pr, unit_pd, unit_px, unit_rv, unit_astrom):
from astropy.units.si import arcsec, km, radian, s, year
return [get_converter(unit_rc, radian),
get_converter(unit_dc, radian),
get_converter(unit_pr, radian / year),
get_converter(unit_pd, radian / year),
get_converter(unit_px, arcsec),
get_converter(unit_rv, km / s),
get_converter(unit_astrom, astrom_unit())], (radian, radian)
def helper_atciqn(f, unit_rc, unit_dc, unit_pr, unit_pd, unit_px, unit_rv, unit_astrom,
unit_b):
from astropy.units.si import arcsec, km, radian, s, year
return [get_converter(unit_rc, radian),
get_converter(unit_dc, radian),
get_converter(unit_pr, radian / year),
get_converter(unit_pd, radian / year),
get_converter(unit_px, arcsec),
get_converter(unit_rv, km / s),
get_converter(unit_astrom, astrom_unit()),
get_converter(unit_b, ldbody_unit())], (radian, radian)
def helper_atciqz_aticq(f, unit_rc, unit_dc, unit_astrom):
from astropy.units.si import radian
return [get_converter(unit_rc, radian),
get_converter(unit_dc, radian),
get_converter(unit_astrom, astrom_unit())], (radian, radian)
def helper_aticqn(f, unit_rc, unit_dc, unit_astrom, unit_b):
from astropy.units.si import radian
return [get_converter(unit_rc, radian),
get_converter(unit_dc, radian),
get_converter(unit_astrom, astrom_unit()),
get_converter(unit_b, ldbody_unit())], (radian, radian)
def helper_atioq(f, unit_rc, unit_dc, unit_astrom):
from astropy.units.si import radian
return [get_converter(unit_rc, radian),
get_converter(unit_dc, radian),
get_converter(unit_astrom, astrom_unit())], (radian,)*5
def helper_atoiq(f, unit_type, unit_ri, unit_di, unit_astrom):
from astropy.units.si import radian
if unit_type is not None:
raise UnitTypeError("argument 'type' should not have a unit")
return [None,
get_converter(unit_ri, radian),
get_converter(unit_di, radian),
get_converter(unit_astrom, astrom_unit())], (radian, radian)
def get_erfa_helpers():
ERFA_HELPERS = {}
ERFA_HELPERS[erfa_ufunc.s2c] = helper_s2c
ERFA_HELPERS[erfa_ufunc.s2p] = helper_s2p
ERFA_HELPERS[erfa_ufunc.c2s] = helper_c2s
ERFA_HELPERS[erfa_ufunc.p2s] = helper_p2s
ERFA_HELPERS[erfa_ufunc.pm] = helper_invariant
ERFA_HELPERS[erfa_ufunc.cpv] = helper_invariant
ERFA_HELPERS[erfa_ufunc.p2pv] = helper_p2pv
ERFA_HELPERS[erfa_ufunc.pv2p] = helper_pv2p
ERFA_HELPERS[erfa_ufunc.pv2s] = helper_pv2s
ERFA_HELPERS[erfa_ufunc.pvdpv] = helper_pv_multiplication
ERFA_HELPERS[erfa_ufunc.pvxpv] = helper_pv_multiplication
ERFA_HELPERS[erfa_ufunc.pvm] = helper_pvm
ERFA_HELPERS[erfa_ufunc.pvmpv] = helper_twoarg_invariant
ERFA_HELPERS[erfa_ufunc.pvppv] = helper_twoarg_invariant
ERFA_HELPERS[erfa_ufunc.pvstar] = helper_pvstar
ERFA_HELPERS[erfa_ufunc.pvtob] = helper_pvtob
ERFA_HELPERS[erfa_ufunc.pvu] = helper_pvu
ERFA_HELPERS[erfa_ufunc.pvup] = helper_pvup
ERFA_HELPERS[erfa_ufunc.pdp] = helper_multiplication
ERFA_HELPERS[erfa_ufunc.pxp] = helper_multiplication
ERFA_HELPERS[erfa_ufunc.rxp] = helper_multiplication
ERFA_HELPERS[erfa_ufunc.rxpv] = helper_multiplication
ERFA_HELPERS[erfa_ufunc.s2pv] = helper_s2pv
ERFA_HELPERS[erfa_ufunc.s2xpv] = helper_s2xpv
ERFA_HELPERS[erfa_ufunc.starpv] = helper_starpv
ERFA_HELPERS[erfa_ufunc.sxpv] = helper_multiplication
ERFA_HELPERS[erfa_ufunc.trxpv] = helper_multiplication
ERFA_HELPERS[erfa_ufunc.gc2gd] = helper_gc2gd
ERFA_HELPERS[erfa_ufunc.gd2gc] = helper_gd2gc
ERFA_HELPERS[erfa_ufunc.ldn] = helper_ldn
ERFA_HELPERS[erfa_ufunc.aper] = helper_aper
ERFA_HELPERS[erfa_ufunc.apio] = helper_apio
ERFA_HELPERS[erfa_ufunc.atciq] = helper_atciq
ERFA_HELPERS[erfa_ufunc.atciqn] = helper_atciqn
ERFA_HELPERS[erfa_ufunc.atciqz] = helper_atciqz_aticq
ERFA_HELPERS[erfa_ufunc.aticq] = helper_atciqz_aticq
ERFA_HELPERS[erfa_ufunc.aticqn] = helper_aticqn
ERFA_HELPERS[erfa_ufunc.atioq] = helper_atioq
ERFA_HELPERS[erfa_ufunc.atoiq] = helper_atoiq
return ERFA_HELPERS
UFUNC_HELPERS.register_module('erfa.ufunc', erfa_ufuncs,
get_erfa_helpers)
|
2b7989b438fca672dccbed5470e1938557d4d2337a830af464c9f6a8882df556 | # Licensed under a 3-clause BSD style license. See LICENSE.rst except
# for parts explicitly labelled as being (largely) copies of numpy
# implementations; for those, see licenses/NUMPY_LICENSE.rst.
"""Helpers for overriding numpy functions.
We override numpy functions in `~astropy.units.Quantity.__array_function__`.
In this module, the numpy functions are split in four groups, each of
which has an associated `set` or `dict`:
1. SUBCLASS_SAFE_FUNCTIONS (set), if the numpy implementation
supports Quantity; we pass on to ndarray.__array_function__.
2. FUNCTION_HELPERS (dict), if the numpy implementation is usable
after converting quantities to arrays with suitable units,
and possibly setting units on the result.
3. DISPATCHED_FUNCTIONS (dict), if the function makes sense but
requires a Quantity-specific implementation
4. UNSUPPORTED_FUNCTIONS (set), if the function does not make sense.
For the FUNCTION_HELPERS `dict`, the value is a function that does the
unit conversion. It should take the same arguments as the numpy
function would (though one can use ``*args`` and ``**kwargs``) and
return a tuple of ``args, kwargs, unit, out``, where ``args`` and
``kwargs`` will be will be passed on to the numpy implementation,
``unit`` is a possible unit of the result (`None` if it should not be
converted to Quantity), and ``out`` is a possible output Quantity passed
in, which will be filled in-place.
For the DISPATCHED_FUNCTIONS `dict`, the value is a function that
implements the numpy functionality for Quantity input. It should
return a tuple of ``result, unit, out``, where ``result`` is generally
a plain array with the result, and ``unit`` and ``out`` are as above.
If unit is `None`, result gets returned directly, so one can also
return a Quantity directly using ``quantity_result, None, None``.
"""
import functools
import operator
import numpy as np
from numpy.lib import recfunctions as rfn
from astropy.units.core import UnitsError, UnitTypeError, dimensionless_unscaled
from astropy.utils import isiterable
from astropy.utils.compat import NUMPY_LT_1_20, NUMPY_LT_1_23
# In 1.17, overrides are enabled by default, but it is still possible to
# turn them off using an environment variable. We use getattr since it
# is planned to remove that possibility in later numpy versions.
ARRAY_FUNCTION_ENABLED = getattr(np.core.overrides,
'ENABLE_ARRAY_FUNCTION', True)
SUBCLASS_SAFE_FUNCTIONS = set()
"""Functions with implementations supporting subclasses like Quantity."""
FUNCTION_HELPERS = {}
"""Functions with implementations usable with proper unit conversion."""
DISPATCHED_FUNCTIONS = {}
"""Functions for which we provide our own implementation."""
UNSUPPORTED_FUNCTIONS = set()
"""Functions that cannot sensibly be used with quantities."""
SUBCLASS_SAFE_FUNCTIONS |= {
np.shape, np.size, np.ndim,
np.reshape, np.ravel, np.moveaxis, np.rollaxis, np.swapaxes,
np.transpose, np.atleast_1d, np.atleast_2d, np.atleast_3d,
np.expand_dims, np.squeeze, np.broadcast_to, np.broadcast_arrays,
np.flip, np.fliplr, np.flipud, np.rot90,
np.argmin, np.argmax, np.argsort, np.lexsort, np.searchsorted,
np.nonzero, np.argwhere, np.flatnonzero,
np.diag_indices_from, np.triu_indices_from, np.tril_indices_from,
np.real, np.imag, np.diagonal, np.diagflat,
np.empty_like,
np.compress, np.extract, np.delete, np.trim_zeros, np.roll, np.take,
np.put, np.fill_diagonal, np.tile, np.repeat,
np.split, np.array_split, np.hsplit, np.vsplit, np.dsplit,
np.stack, np.column_stack, np.hstack, np.vstack, np.dstack,
np.amax, np.amin, np.ptp, np.sum, np.cumsum,
np.prod, np.product, np.cumprod, np.cumproduct,
np.round, np.around,
np.fix, np.angle, np.i0, np.clip,
np.isposinf, np.isneginf, np.isreal, np.iscomplex,
np.average, np.mean, np.std, np.var, np.median, np.trace,
np.nanmax, np.nanmin, np.nanargmin, np.nanargmax, np.nanmean,
np.nanmedian, np.nansum, np.nancumsum, np.nanstd, np.nanvar,
np.nanprod, np.nancumprod,
np.einsum_path, np.trapz, np.linspace,
np.sort, np.msort, np.partition, np.meshgrid,
np.common_type, np.result_type, np.can_cast, np.min_scalar_type,
np.iscomplexobj, np.isrealobj,
np.shares_memory, np.may_share_memory,
np.apply_along_axis, np.take_along_axis, np.put_along_axis,
np.linalg.cond, np.linalg.multi_dot}
# Implemented as methods on Quantity:
# np.ediff1d is from setops, but we support it anyway; the others
# currently return NotImplementedError.
# TODO: move latter to UNSUPPORTED? Would raise TypeError instead.
SUBCLASS_SAFE_FUNCTIONS |= {np.ediff1d}
# Nonsensical for quantities.
UNSUPPORTED_FUNCTIONS |= {
np.packbits, np.unpackbits, np.unravel_index,
np.ravel_multi_index, np.ix_, np.cov, np.corrcoef,
np.busday_count, np.busday_offset, np.datetime_as_string,
np.is_busday, np.all, np.any, np.sometrue, np.alltrue}
# Could be supported if we had a natural logarithm unit.
UNSUPPORTED_FUNCTIONS |= {np.linalg.slogdet}
# TODO! support whichever of these functions it makes sense to support
TBD_FUNCTIONS = {
rfn.drop_fields, rfn.rename_fields, rfn.append_fields, rfn.join_by,
rfn.apply_along_fields, rfn.assign_fields_by_name, rfn.merge_arrays,
rfn.find_duplicates, rfn.recursive_fill_fields, rfn.require_fields,
rfn.repack_fields, rfn.stack_arrays
}
UNSUPPORTED_FUNCTIONS |= TBD_FUNCTIONS
# The following are not just unsupported, but so unlikely to be thought
# to be supported that we ignore them in testing. (Kept in a separate
# variable so that we can check consistency in the test routine -
# test_quantity_non_ufuncs.py)
IGNORED_FUNCTIONS = {
# I/O - useless for Quantity, since no way to store the unit.
np.save, np.savez, np.savetxt, np.savez_compressed,
# Polynomials
np.poly, np.polyadd, np.polyder, np.polydiv, np.polyfit, np.polyint,
np.polymul, np.polysub, np.polyval, np.roots, np.vander,
# functions taking record arrays (which are deprecated)
rfn.rec_append_fields, rfn.rec_drop_fields, rfn.rec_join,
}
if NUMPY_LT_1_20:
# financial
IGNORED_FUNCTIONS |= {np.fv, np.ipmt, np.irr, np.mirr, np.nper,
np.npv, np.pmt, np.ppmt, np.pv, np.rate}
if NUMPY_LT_1_23:
IGNORED_FUNCTIONS |= {
# Deprecated, removed in numpy 1.23
np.asscalar, np.alen,
}
UNSUPPORTED_FUNCTIONS |= IGNORED_FUNCTIONS
class FunctionAssigner:
def __init__(self, assignments):
self.assignments = assignments
def __call__(self, f=None, helps=None, module=np):
"""Add a helper to a numpy function.
Normally used as a decorator.
If ``helps`` is given, it should be the numpy function helped (or an
iterable of numpy functions helped).
If ``helps`` is not given, it is assumed the function helped is the
numpy function with the same name as the decorated function.
"""
if f is not None:
if helps is None:
helps = getattr(module, f.__name__)
if not isiterable(helps):
helps = (helps,)
for h in helps:
self.assignments[h] = f
return f
elif helps is not None or module is not np:
return functools.partial(self.__call__, helps=helps, module=module)
else: # pragma: no cover
raise ValueError("function_helper requires at least one argument.")
function_helper = FunctionAssigner(FUNCTION_HELPERS)
dispatched_function = FunctionAssigner(DISPATCHED_FUNCTIONS)
@function_helper(helps={
np.copy, np.asfarray, np.real_if_close, np.sort_complex, np.resize,
np.fft.fft, np.fft.ifft, np.fft.rfft, np.fft.irfft,
np.fft.fft2, np.fft.ifft2, np.fft.rfft2, np.fft.irfft2,
np.fft.fftn, np.fft.ifftn, np.fft.rfftn, np.fft.irfftn,
np.fft.hfft, np.fft.ihfft,
np.linalg.eigvals, np.linalg.eigvalsh})
def invariant_a_helper(a, *args, **kwargs):
return (a.view(np.ndarray),) + args, kwargs, a.unit, None
@function_helper(helps={np.tril, np.triu})
def invariant_m_helper(m, *args, **kwargs):
return (m.view(np.ndarray),) + args, kwargs, m.unit, None
@function_helper(helps={np.fft.fftshift, np.fft.ifftshift})
def invariant_x_helper(x, *args, **kwargs):
return (x.view(np.ndarray),) + args, kwargs, x.unit, None
# Note that ones_like does *not* work by default since if one creates an empty
# array with a unit, one cannot just fill it with unity. Indeed, in this
# respect, it is a bit of an odd function for Quantity. On the other hand, it
# matches the idea that a unit is the same as the quantity with that unit and
# value of 1. Also, it used to work without __array_function__.
# zeros_like does work by default for regular quantities, because numpy first
# creates an empty array with the unit and then fills it with 0 (which can have
# any unit), but for structured dtype this fails (0 cannot have an arbitrary
# structured unit), so we include it here too.
@function_helper(helps={np.ones_like, np.zeros_like})
def like_helper(a, *args, **kwargs):
subok = args[2] if len(args) > 2 else kwargs.pop('subok', True)
unit = a.unit if subok else None
return (a.view(np.ndarray),) + args, kwargs, unit, None
@function_helper
def sinc(x):
from astropy.units.si import radian
try:
x = x.to_value(radian)
except UnitsError:
raise UnitTypeError("Can only apply 'sinc' function to "
"quantities with angle units")
return (x,), {}, dimensionless_unscaled, None
@dispatched_function
def unwrap(p, discont=None, axis=-1):
from astropy.units.si import radian
if discont is None:
discont = np.pi << radian
p, discont = _as_quantities(p, discont)
result = np.unwrap.__wrapped__(p.to_value(radian),
discont.to_value(radian), axis=axis)
result = radian.to(p.unit, result)
return result, p.unit, None
@function_helper
def argpartition(a, *args, **kwargs):
return (a.view(np.ndarray),) + args, kwargs, None, None
@function_helper
def full_like(a, fill_value, *args, **kwargs):
unit = a.unit if kwargs.get('subok', True) else None
return (a.view(np.ndarray),
a._to_own_unit(fill_value)) + args, kwargs, unit, None
@function_helper
def putmask(a, mask, values):
from astropy.units import Quantity
if isinstance(a, Quantity):
return (a.view(np.ndarray), mask,
a._to_own_unit(values)), {}, a.unit, None
elif isinstance(values, Quantity):
return (a, mask,
values.to_value(dimensionless_unscaled)), {}, None, None
else:
raise NotImplementedError
@function_helper
def place(arr, mask, vals):
from astropy.units import Quantity
if isinstance(arr, Quantity):
return (arr.view(np.ndarray), mask,
arr._to_own_unit(vals)), {}, arr.unit, None
elif isinstance(vals, Quantity):
return (arr, mask,
vals.to_value(dimensionless_unscaled)), {}, None, None
else:
raise NotImplementedError
@function_helper
def copyto(dst, src, *args, **kwargs):
from astropy.units import Quantity
if isinstance(dst, Quantity):
return ((dst.view(np.ndarray), dst._to_own_unit(src)) + args,
kwargs, None, None)
elif isinstance(src, Quantity):
return ((dst, src.to_value(dimensionless_unscaled)) + args,
kwargs, None, None)
else:
raise NotImplementedError
@function_helper
def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None):
nan = x._to_own_unit(nan)
if posinf is not None:
posinf = x._to_own_unit(posinf)
if neginf is not None:
neginf = x._to_own_unit(neginf)
return ((x.view(np.ndarray),),
dict(copy=True, nan=nan, posinf=posinf, neginf=neginf),
x.unit, None)
def _as_quantity(a):
"""Convert argument to a Quantity (or raise NotImplementedError)."""
from astropy.units import Quantity
try:
return Quantity(a, copy=False, subok=True)
except Exception:
# If we cannot convert to Quantity, we should just bail.
raise NotImplementedError
def _as_quantities(*args):
"""Convert arguments to Quantity (or raise NotImplentedError)."""
from astropy.units import Quantity
try:
return tuple(Quantity(a, copy=False, subok=True)
for a in args)
except Exception:
# If we cannot convert to Quantity, we should just bail.
raise NotImplementedError
def _quantities2arrays(*args, unit_from_first=False):
"""Convert to arrays in units of the first argument that has a unit.
If unit_from_first, take the unit of the first argument regardless
whether it actually defined a unit (e.g., dimensionless for arrays).
"""
# Turn first argument into a quantity.
q = _as_quantity(args[0])
if len(args) == 1:
return (q.value,), q.unit
# If we care about the unit being explicit, then check whether this
# argument actually had a unit, or was likely inferred.
if not unit_from_first and (q.unit is q._default_unit
and not hasattr(args[0], 'unit')):
# Here, the argument could still be things like [10*u.one, 11.*u.one]),
# i.e., properly dimensionless. So, we only override with anything
# that has a unit not equivalent to dimensionless (fine to ignore other
# dimensionless units pass, even if explicitly given).
for arg in args[1:]:
trial = _as_quantity(arg)
if not trial.unit.is_equivalent(q.unit):
# Use any explicit unit not equivalent to dimensionless.
q = trial
break
# We use the private _to_own_unit method here instead of just
# converting everything to quantity and then do .to_value(qs0.unit)
# as we want to allow arbitrary unit for 0, inf, and nan.
try:
arrays = tuple((q._to_own_unit(arg)) for arg in args)
except TypeError:
raise NotImplementedError
return arrays, q.unit
def _iterable_helper(*args, out=None, **kwargs):
"""Convert arguments to Quantity, and treat possible 'out'."""
from astropy.units import Quantity
if out is not None:
if isinstance(out, Quantity):
kwargs['out'] = out.view(np.ndarray)
else:
# TODO: for an ndarray output, we could in principle
# try converting all Quantity to dimensionless.
raise NotImplementedError
arrays, unit = _quantities2arrays(*args)
return arrays, kwargs, unit, out
@function_helper
def concatenate(arrays, axis=0, out=None, **kwargs):
# TODO: make this smarter by creating an appropriately shaped
# empty output array and just filling it.
arrays, kwargs, unit, out = _iterable_helper(*arrays, out=out, axis=axis, **kwargs)
return (arrays,), kwargs, unit, out
@dispatched_function
def block(arrays):
# We need to override block since the numpy implementation can take two
# different paths, one for concatenation, one for creating a large empty
# result array in which parts are set. Each assumes array input and
# cannot be used directly. Since it would be very costly to inspect all
# arrays and then turn them back into a nested list, we just copy here the
# second implementation, np.core.shape_base._block_slicing, since it is
# shortest and easiest.
(arrays, list_ndim, result_ndim,
final_size) = np.core.shape_base._block_setup(arrays)
shape, slices, arrays = np.core.shape_base._block_info_recursion(
arrays, list_ndim, result_ndim)
# Here, one line of difference!
arrays, unit = _quantities2arrays(*arrays)
# Back to _block_slicing
dtype = np.result_type(*[arr.dtype for arr in arrays])
F_order = all(arr.flags['F_CONTIGUOUS'] for arr in arrays)
C_order = all(arr.flags['C_CONTIGUOUS'] for arr in arrays)
order = 'F' if F_order and not C_order else 'C'
result = np.empty(shape=shape, dtype=dtype, order=order)
for the_slice, arr in zip(slices, arrays):
result[(Ellipsis,) + the_slice] = arr
return result, unit, None
@function_helper
def choose(a, choices, out=None, **kwargs):
choices, kwargs, unit, out = _iterable_helper(*choices, out=out, **kwargs)
return (a, choices,), kwargs, unit, out
@function_helper
def select(condlist, choicelist, default=0):
choicelist, kwargs, unit, out = _iterable_helper(*choicelist)
if default != 0:
default = (1 * unit)._to_own_unit(default)
return (condlist, choicelist, default), kwargs, unit, out
@dispatched_function
def piecewise(x, condlist, funclist, *args, **kw):
from astropy.units import Quantity
# Copied implementation from numpy.lib.function_base.piecewise,
# taking care of units of function outputs.
n2 = len(funclist)
# undocumented: single condition is promoted to a list of one condition
if np.isscalar(condlist) or (
not isinstance(condlist[0], (list, np.ndarray)) and x.ndim != 0):
condlist = [condlist]
if any(isinstance(c, Quantity) for c in condlist):
raise NotImplementedError
condlist = np.array(condlist, dtype=bool)
n = len(condlist)
if n == n2 - 1: # compute the "otherwise" condition.
condelse = ~np.any(condlist, axis=0, keepdims=True)
condlist = np.concatenate([condlist, condelse], axis=0)
n += 1
elif n != n2:
raise ValueError(
f"with {n} condition(s), either {n} or {n + 1} functions are expected"
)
y = np.zeros(x.shape, x.dtype)
where = []
what = []
for k in range(n):
item = funclist[k]
if not callable(item):
where.append(condlist[k])
what.append(item)
else:
vals = x[condlist[k]]
if vals.size > 0:
where.append(condlist[k])
what.append(item(vals, *args, **kw))
what, unit = _quantities2arrays(*what)
for item, value in zip(where, what):
y[item] = value
return y, unit, None
@function_helper
def append(arr, values, *args, **kwargs):
arrays, unit = _quantities2arrays(arr, values, unit_from_first=True)
return arrays + args, kwargs, unit, None
@function_helper
def insert(arr, obj, values, *args, **kwargs):
from astropy.units import Quantity
if isinstance(obj, Quantity):
raise NotImplementedError
(arr, values), unit = _quantities2arrays(arr, values,
unit_from_first=True)
return (arr, obj, values) + args, kwargs, unit, None
@function_helper
def pad(array, pad_width, mode='constant', **kwargs):
# pad dispatches only on array, so that must be a Quantity.
for key in 'constant_values', 'end_values':
value = kwargs.pop(key, None)
if value is None:
continue
if not isinstance(value, tuple):
value = (value,)
new_value = []
for v in value:
new_value.append(
tuple(array._to_own_unit(_v) for _v in v)
if isinstance(v, tuple) else array._to_own_unit(v))
kwargs[key] = new_value
return (array.view(np.ndarray), pad_width, mode), kwargs, array.unit, None
@function_helper
def where(condition, *args):
from astropy.units import Quantity
if isinstance(condition, Quantity) or len(args) != 2:
raise NotImplementedError
args, unit = _quantities2arrays(*args)
return (condition,) + args, {}, unit, None
@function_helper(helps=({np.quantile, np.nanquantile}))
def quantile(a, q, *args, _q_unit=dimensionless_unscaled, **kwargs):
if len(args) >= 2:
out = args[1]
args = args[:1] + args[2:]
else:
out = kwargs.pop('out', None)
from astropy.units import Quantity
if isinstance(q, Quantity):
q = q.to_value(_q_unit)
(a,), kwargs, unit, out = _iterable_helper(a, out=out, **kwargs)
return (a, q) + args, kwargs, unit, out
@function_helper(helps={np.percentile, np.nanpercentile})
def percentile(a, q, *args, **kwargs):
from astropy.units import percent
return quantile(a, q, *args, _q_unit=percent, **kwargs)
@function_helper
def count_nonzero(a, *args, **kwargs):
return (a.value,) + args, kwargs, None, None
@function_helper(helps={np.isclose, np.allclose})
def close(a, b, rtol=1e-05, atol=1e-08, *args, **kwargs):
from astropy.units import Quantity
(a, b), unit = _quantities2arrays(a, b, unit_from_first=True)
# Allow number without a unit as having the unit.
atol = Quantity(atol, unit).value
return (a, b, rtol, atol) + args, kwargs, None, None
@function_helper
def array_equal(a1, a2):
args, unit = _quantities2arrays(a1, a2)
return args, {}, None, None
@function_helper
def array_equiv(a1, a2):
args, unit = _quantities2arrays(a1, a2)
return args, {}, None, None
@function_helper(helps={np.dot, np.outer})
def dot_like(a, b, out=None):
from astropy.units import Quantity
a, b = _as_quantities(a, b)
unit = a.unit * b.unit
if out is not None:
if not isinstance(out, Quantity):
raise NotImplementedError
return tuple(x.view(np.ndarray) for x in (a, b, out)), {}, unit, out
else:
return (a.view(np.ndarray), b.view(np.ndarray)), {}, unit, None
@function_helper(helps={np.cross, np.inner, np.vdot, np.tensordot, np.kron,
np.correlate, np.convolve})
def cross_like(a, b, *args, **kwargs):
a, b = _as_quantities(a, b)
unit = a.unit * b.unit
return (a.view(np.ndarray), b.view(np.ndarray)) + args, kwargs, unit, None
@function_helper
def einsum(subscripts, *operands, out=None, **kwargs):
from astropy.units import Quantity
if not isinstance(subscripts, str):
raise ValueError('only "subscripts" string mode supported for einsum.')
if out is not None:
if not isinstance(out, Quantity):
raise NotImplementedError
else:
kwargs['out'] = out.view(np.ndarray)
qs = _as_quantities(*operands)
unit = functools.reduce(operator.mul, (q.unit for q in qs),
dimensionless_unscaled)
arrays = tuple(q.view(np.ndarray) for q in qs)
return (subscripts,) + arrays, kwargs, unit, out
@function_helper
def bincount(x, weights=None, minlength=0):
from astropy.units import Quantity
if isinstance(x, Quantity):
raise NotImplementedError
return (x, weights.value, minlength), {}, weights.unit, None
@function_helper
def digitize(x, bins, *args, **kwargs):
arrays, unit = _quantities2arrays(x, bins, unit_from_first=True)
return arrays + args, kwargs, None, None
def _check_bins(bins, unit):
from astropy.units import Quantity
check = _as_quantity(bins)
if check.ndim > 0:
return check.to_value(unit)
elif isinstance(bins, Quantity):
# bins should be an integer (or at least definitely not a Quantity).
raise NotImplementedError
else:
return bins
@function_helper
def histogram(a, bins=10, range=None, weights=None, density=None):
if weights is not None:
weights = _as_quantity(weights)
unit = weights.unit
weights = weights.value
else:
unit = None
a = _as_quantity(a)
if not isinstance(bins, str):
bins = _check_bins(bins, a.unit)
if density:
unit = (unit or 1) / a.unit
return ((a.value, bins, range), {'weights': weights, 'density': density},
(unit, a.unit), None)
@function_helper(helps=np.histogram_bin_edges)
def histogram_bin_edges(a, bins=10, range=None, weights=None):
# weights is currently unused
a = _as_quantity(a)
if not isinstance(bins, str):
bins = _check_bins(bins, a.unit)
return (a.value, bins, range, weights), {}, a.unit, None
@function_helper
def histogram2d(x, y, bins=10, range=None, weights=None, density=None):
from astropy.units import Quantity
if weights is not None:
weights = _as_quantity(weights)
unit = weights.unit
weights = weights.value
else:
unit = None
x, y = _as_quantities(x, y)
try:
n = len(bins)
except TypeError:
# bins should be an integer (or at least definitely not a Quantity).
if isinstance(bins, Quantity):
raise NotImplementedError
else:
if n == 1:
raise NotImplementedError
elif n == 2 and not isinstance(bins, Quantity):
bins = [_check_bins(b, unit)
for (b, unit) in zip(bins, (x.unit, y.unit))]
else:
bins = _check_bins(bins, x.unit)
y = y.to(x.unit)
if density:
unit = (unit or 1) / x.unit / y.unit
return ((x.value, y.value, bins, range),
{'weights': weights, 'density': density},
(unit, x.unit, y.unit), None)
@function_helper
def histogramdd(sample, bins=10, range=None, weights=None, density=None):
if weights is not None:
weights = _as_quantity(weights)
unit = weights.unit
weights = weights.value
else:
unit = None
try:
# Sample is an ND-array.
_, D = sample.shape
except (AttributeError, ValueError):
# Sample is a sequence of 1D arrays.
sample = _as_quantities(*sample)
sample_units = [s.unit for s in sample]
sample = [s.value for s in sample]
D = len(sample)
else:
sample = _as_quantity(sample)
sample_units = [sample.unit] * D
try:
M = len(bins)
except TypeError:
# bins should be an integer
from astropy.units import Quantity
if isinstance(bins, Quantity):
raise NotImplementedError
else:
if M != D:
raise ValueError(
'The dimension of bins must be equal to the dimension of the '
' sample x.')
bins = [_check_bins(b, unit)
for (b, unit) in zip(bins, sample_units)]
if density:
unit = functools.reduce(operator.truediv, sample_units, (unit or 1))
return ((sample, bins, range), {'weights': weights, 'density': density},
(unit, sample_units), None)
@function_helper
def diff(a, n=1, axis=-1, prepend=np._NoValue, append=np._NoValue):
a = _as_quantity(a)
if prepend is not np._NoValue:
prepend = _as_quantity(prepend).to_value(a.unit)
if append is not np._NoValue:
append = _as_quantity(append).to_value(a.unit)
return (a.value, n, axis, prepend, append), {}, a.unit, None
@function_helper
def gradient(f, *varargs, **kwargs):
f = _as_quantity(f)
axis = kwargs.get('axis', None)
if axis is None:
n_axis = f.ndim
elif isinstance(axis, tuple):
n_axis = len(axis)
else:
n_axis = 1
if varargs:
varargs = _as_quantities(*varargs)
if len(varargs) == 1 and n_axis > 1:
varargs = varargs * n_axis
if varargs:
units = [f.unit / q.unit for q in varargs]
varargs = tuple(q.value for q in varargs)
else:
units = [f.unit] * n_axis
if len(units) == 1:
units = units[0]
return (f.value,) + varargs, kwargs, units, None
@function_helper
def logspace(start, stop, *args, **kwargs):
from astropy.units import LogQuantity, dex
if (not isinstance(start, LogQuantity) or
not isinstance(stop, LogQuantity)):
raise NotImplementedError
# Get unit from end point as for linspace.
stop = stop.to(dex(stop.unit.physical_unit))
start = start.to(stop.unit)
unit = stop.unit.physical_unit
return (start.value, stop.value) + args, kwargs, unit, None
@function_helper
def geomspace(start, stop, *args, **kwargs):
# Get unit from end point as for linspace.
(stop, start), unit = _quantities2arrays(stop, start)
return (start, stop) + args, kwargs, unit, None
@function_helper
def interp(x, xp, fp, *args, **kwargs):
from astropy.units import Quantity
(x, xp), _ = _quantities2arrays(x, xp)
if isinstance(fp, Quantity):
unit = fp.unit
fp = fp.value
else:
unit = None
return (x, xp, fp) + args, kwargs, unit, None
@function_helper
def unique(ar, return_index=False, return_inverse=False,
return_counts=False, axis=None):
unit = ar.unit
n_index = sum(bool(i) for i in
(return_index, return_inverse, return_counts))
if n_index:
unit = [unit] + n_index * [None]
return (ar.value, return_index, return_inverse, return_counts,
axis), {}, unit, None
@function_helper
def intersect1d(ar1, ar2, assume_unique=False, return_indices=False):
(ar1, ar2), unit = _quantities2arrays(ar1, ar2)
if return_indices:
unit = [unit, None, None]
return (ar1, ar2, assume_unique, return_indices), {}, unit, None
@function_helper(helps=(np.setxor1d, np.union1d, np.setdiff1d))
def twosetop(ar1, ar2, *args, **kwargs):
(ar1, ar2), unit = _quantities2arrays(ar1, ar2)
return (ar1, ar2) + args, kwargs, unit, None
@function_helper(helps=(np.isin, np.in1d))
def setcheckop(ar1, ar2, *args, **kwargs):
# This tests whether ar1 is in ar2, so we should change the unit of
# a1 to that of a2.
(ar2, ar1), unit = _quantities2arrays(ar2, ar1)
return (ar1, ar2) + args, kwargs, None, None
@dispatched_function
def apply_over_axes(func, a, axes):
# Copied straight from numpy/lib/shape_base, just to omit its
# val = asarray(a); if only it had been asanyarray, or just not there
# since a is assumed to an an array in the next line...
# Which is what we do here - we can only get here if it is a Quantity.
val = a
N = a.ndim
if np.array(axes).ndim == 0:
axes = (axes,)
for axis in axes:
if axis < 0:
axis = N + axis
args = (val, axis)
res = func(*args)
if res.ndim == val.ndim:
val = res
else:
res = np.expand_dims(res, axis)
if res.ndim == val.ndim:
val = res
else:
raise ValueError("function is not returning "
"an array of the correct shape")
# Returning unit is None to signal nothing should happen to
# the output.
return val, None, None
@dispatched_function
def array_repr(arr, *args, **kwargs):
# TODO: The addition of "unit='...'" doesn't worry about line
# length. Could copy & adapt _array_repr_implementation from
# numpy.core.arrayprint.py
cls_name = arr.__class__.__name__
fake_name = '_' * len(cls_name)
fake_cls = type(fake_name, (np.ndarray,), {})
no_unit = np.array_repr(arr.view(fake_cls),
*args, **kwargs).replace(fake_name, cls_name)
unit_part = f"unit='{arr.unit}'"
pre, dtype, post = no_unit.rpartition('dtype')
if dtype:
return f"{pre}{unit_part}, {dtype}{post}", None, None
else:
return f"{no_unit[:-1]}, {unit_part})", None, None
@dispatched_function
def array_str(arr, *args, **kwargs):
# TODO: The addition of the unit doesn't worry about line length.
# Could copy & adapt _array_repr_implementation from
# numpy.core.arrayprint.py
no_unit = np.array_str(arr.value, *args, **kwargs)
return no_unit + arr._unitstr, None, None
@function_helper
def array2string(a, *args, **kwargs):
# array2string breaks on quantities as it tries to turn individual
# items into float, which works only for dimensionless. Since the
# defaults would not keep any unit anyway, this is rather pointless -
# we're better off just passing on the array view. However, one can
# also work around this by passing on a formatter (as is done in Angle).
# So, we do nothing if the formatter argument is present and has the
# relevant formatter for our dtype.
formatter = args[6] if len(args) >= 7 else kwargs.get('formatter', None)
if formatter is None:
a = a.value
else:
# See whether it covers our dtype.
from numpy.core.arrayprint import _get_format_function
with np.printoptions(formatter=formatter) as options:
try:
ff = _get_format_function(a.value, **options)
except Exception:
# Shouldn't happen, but possibly we're just not being smart
# enough, so let's pass things on as is.
pass
else:
# If the selected format function is that of numpy, we know
# things will fail
if 'numpy' in ff.__module__:
a = a.value
return (a,) + args, kwargs, None, None
@function_helper
def diag(v, *args, **kwargs):
# Function works for *getting* the diagonal, but not *setting*.
# So, override always.
return (v.value,) + args, kwargs, v.unit, None
@function_helper(module=np.linalg)
def svd(a, full_matrices=True, compute_uv=True, hermitian=False):
unit = a.unit
if compute_uv:
unit = (None, unit, None)
return ((a.view(np.ndarray), full_matrices, compute_uv, hermitian),
{}, unit, None)
def _interpret_tol(tol, unit):
from astropy.units import Quantity
return Quantity(tol, unit).value
@function_helper(module=np.linalg)
def matrix_rank(M, tol=None, *args, **kwargs):
if tol is not None:
tol = _interpret_tol(tol, M.unit)
return (M.view(np.ndarray), tol) + args, kwargs, None, None
@function_helper(helps={np.linalg.inv, np.linalg.tensorinv})
def inv(a, *args, **kwargs):
return (a.view(np.ndarray),)+args, kwargs, 1/a.unit, None
@function_helper(module=np.linalg)
def pinv(a, rcond=1e-15, *args, **kwargs):
rcond = _interpret_tol(rcond, a.unit)
return (a.view(np.ndarray), rcond) + args, kwargs, 1/a.unit, None
@function_helper(module=np.linalg)
def det(a):
return (a.view(np.ndarray),), {}, a.unit ** a.shape[-1], None
@function_helper(helps={np.linalg.solve, np.linalg.tensorsolve})
def solve(a, b, *args, **kwargs):
a, b = _as_quantities(a, b)
return ((a.view(np.ndarray), b.view(np.ndarray)) + args, kwargs,
b.unit / a.unit, None)
@function_helper(module=np.linalg)
def lstsq(a, b, rcond="warn"):
a, b = _as_quantities(a, b)
if rcond not in (None, "warn", -1):
rcond = _interpret_tol(rcond, a.unit)
return ((a.view(np.ndarray), b.view(np.ndarray), rcond), {},
(b.unit / a.unit, b.unit ** 2, None, a.unit), None)
@function_helper(module=np.linalg)
def norm(x, ord=None, *args, **kwargs):
if ord == 0:
from astropy.units import dimensionless_unscaled
unit = dimensionless_unscaled
else:
unit = x.unit
return (x.view(np.ndarray), ord)+args, kwargs, unit, None
@function_helper(module=np.linalg)
def matrix_power(a, n):
return (a.value, n), {}, a.unit ** n, None
@function_helper(module=np.linalg)
def cholesky(a):
return (a.value,), {}, a.unit ** 0.5, None
@function_helper(module=np.linalg)
def qr(a, mode='reduced'):
if mode.startswith('e'):
units = None
elif mode == 'r':
units = a.unit
else:
from astropy.units import dimensionless_unscaled
units = (dimensionless_unscaled, a.unit)
return (a.value, mode), {}, units, None
@function_helper(helps={np.linalg.eig, np.linalg.eigh})
def eig(a, *args, **kwargs):
from astropy.units import dimensionless_unscaled
return (a.value,)+args, kwargs, (a.unit, dimensionless_unscaled), None
@function_helper(module=np.lib.recfunctions)
def structured_to_unstructured(arr, *args, **kwargs):
"""
Convert a structured quantity to an unstructured one.
This only works if all the units are compatible.
"""
from astropy.units import StructuredUnit
target_unit = arr.unit.values()[0]
def replace_unit(x):
if isinstance(x, StructuredUnit):
return x._recursively_apply(replace_unit)
else:
return target_unit
to_unit = arr.unit._recursively_apply(replace_unit)
return (arr.to_value(to_unit), ) + args, kwargs, target_unit, None
def _build_structured_unit(dtype, unit):
"""Build structured unit from dtype
Parameters
----------
dtype : `numpy.dtype`
unit : `astropy.units.Unit`
Returns
-------
`astropy.units.Unit` or tuple
"""
if dtype.fields is None:
return unit
return tuple(_build_structured_unit(v[0], unit) for v in dtype.fields.values())
@function_helper(module=np.lib.recfunctions)
def unstructured_to_structured(arr, dtype, *args, **kwargs):
from astropy.units import StructuredUnit
target_unit = StructuredUnit(_build_structured_unit(dtype, arr.unit))
return (arr.to_value(arr.unit), dtype) + args, kwargs, target_unit, None
|
fb64a76d63b3543a2d918e5d40fd1eeccc478f6d02cf21323a8d9d33991a57f3 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Quantity helpers for the scipy.special ufuncs.
Available ufuncs in this module are at
https://docs.scipy.org/doc/scipy/reference/special.html
"""
import numpy as np
from astropy.units.core import UnitsError, UnitTypeError, dimensionless_unscaled
from . import UFUNC_HELPERS
from .helpers import (
get_converter, helper_cbrt, helper_dimensionless_to_dimensionless, helper_two_arg_dimensionless)
# ufuncs that require dimensionless input and give dimensionless output.
dimensionless_to_dimensionless_sps_ufuncs = (
'erf', 'erfc', 'erfcx', 'erfi', 'erfinv', 'erfcinv',
'gamma', 'gammaln', 'loggamma', 'gammasgn', 'psi', 'rgamma', 'digamma',
'wofz', 'dawsn', 'entr', 'exprel', 'expm1', 'log1p', 'exp2', 'exp10',
'j0', 'j1', 'y0', 'y1', 'i0', 'i0e', 'i1', 'i1e',
'k0', 'k0e', 'k1', 'k1e', 'itj0y0', 'it2j0y0', 'iti0k0', 'it2i0k0',
'ndtr', 'ndtri')
scipy_special_ufuncs = dimensionless_to_dimensionless_sps_ufuncs
# ufuncs that require input in degrees and give dimensionless output.
degree_to_dimensionless_sps_ufuncs = ('cosdg', 'sindg', 'tandg', 'cotdg')
scipy_special_ufuncs += degree_to_dimensionless_sps_ufuncs
# ufuncs that require 2 dimensionless inputs and give dimensionless output.
# note: 'jv' and 'jn' are aliases in some scipy versions, which will
# cause the same key to be written twice, but since both are handled by the
# same helper there is no harm done.
two_arg_dimensionless_sps_ufuncs = (
'jv', 'jn', 'jve', 'yn', 'yv', 'yve', 'kn', 'kv', 'kve', 'iv', 'ive',
'hankel1', 'hankel1e', 'hankel2', 'hankel2e')
scipy_special_ufuncs += two_arg_dimensionless_sps_ufuncs
# ufuncs handled as special cases
scipy_special_ufuncs += ('cbrt', 'radian')
def helper_degree_to_dimensionless(f, unit):
from astropy.units.si import degree
try:
return [get_converter(unit, degree)], dimensionless_unscaled
except UnitsError:
raise UnitTypeError("Can only apply '{}' function to "
"quantities with angle units"
.format(f.__name__))
def helper_degree_minute_second_to_radian(f, unit1, unit2, unit3):
from astropy.units.si import arcmin, arcsec, degree, radian
try:
return [get_converter(unit1, degree),
get_converter(unit2, arcmin),
get_converter(unit3, arcsec)], radian
except UnitsError:
raise UnitTypeError("Can only apply '{}' function to "
"quantities with angle units"
.format(f.__name__))
def get_scipy_special_helpers():
import scipy.special as sps
SCIPY_HELPERS = {}
for name in dimensionless_to_dimensionless_sps_ufuncs:
# In SCIPY_LT_1_5, erfinv and erfcinv are not ufuncs.
ufunc = getattr(sps, name, None)
if isinstance(ufunc, np.ufunc):
SCIPY_HELPERS[ufunc] = helper_dimensionless_to_dimensionless
for ufunc in degree_to_dimensionless_sps_ufuncs:
SCIPY_HELPERS[getattr(sps, ufunc)] = helper_degree_to_dimensionless
for ufunc in two_arg_dimensionless_sps_ufuncs:
SCIPY_HELPERS[getattr(sps, ufunc)] = helper_two_arg_dimensionless
# ufuncs handled as special cases
SCIPY_HELPERS[sps.cbrt] = helper_cbrt
SCIPY_HELPERS[sps.radian] = helper_degree_minute_second_to_radian
return SCIPY_HELPERS
UFUNC_HELPERS.register_module('scipy.special', scipy_special_ufuncs,
get_scipy_special_helpers)
|
d3adfed999e4d5f4a53f7bb0eb8cb5f7c8a59f5da6c99464c33507d376248550 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Separate tests specifically for equivalencies."""
import numpy as np
# THIRD-PARTY
import pytest
from numpy.testing import assert_allclose
# LOCAL
from astropy import constants
from astropy import units as u
from astropy.tests.helper import assert_quantity_allclose
from astropy.units.equivalencies import Equivalency
from astropy.utils.exceptions import AstropyDeprecationWarning
def test_dimensionless_angles():
# test that the angles_dimensionless option allows one to change
# by any order in radian in the unit (#1161)
rad1 = u.dimensionless_angles()
assert u.radian.to(1, equivalencies=rad1) == 1.
assert u.deg.to(1, equivalencies=rad1) == u.deg.to(u.rad)
assert u.steradian.to(1, equivalencies=rad1) == 1.
assert u.dimensionless_unscaled.to(u.steradian, equivalencies=rad1) == 1.
# now quantities
assert (1.*u.radian).to_value(1, equivalencies=rad1) == 1.
assert (1.*u.deg).to_value(1, equivalencies=rad1) == u.deg.to(u.rad)
assert (1.*u.steradian).to_value(1, equivalencies=rad1) == 1.
# more complicated example
I = 1.e45 * u.g * u.cm**2 # noqa
Omega = u.cycle / (1.*u.s)
Erot = 0.5 * I * Omega**2
# check that equivalency makes this work
Erot_in_erg1 = Erot.to(u.erg, equivalencies=rad1)
# and check that value is correct
assert_allclose(Erot_in_erg1.value, (Erot/u.radian**2).to_value(u.erg))
# test built-in equivalency in subclass
class MyRad1(u.Quantity):
_equivalencies = rad1
phase = MyRad1(1., u.cycle)
assert phase.to_value(1) == u.cycle.to(u.radian)
@pytest.mark.parametrize('log_unit', (u.mag, u.dex, u.dB))
def test_logarithmic(log_unit):
# check conversion of mag, dB, and dex to dimensionless and vice versa
with pytest.raises(u.UnitsError):
log_unit.to(1, 0.)
with pytest.raises(u.UnitsError):
u.dimensionless_unscaled.to(log_unit)
assert log_unit.to(1, 0., equivalencies=u.logarithmic()) == 1.
assert u.dimensionless_unscaled.to(log_unit,
equivalencies=u.logarithmic()) == 0.
# also try with quantities
q_dex = np.array([0., -1., 1., 2.]) * u.dex
q_expected = 10.**q_dex.value * u.dimensionless_unscaled
q_log_unit = q_dex.to(log_unit)
assert np.all(q_log_unit.to(1, equivalencies=u.logarithmic()) ==
q_expected)
assert np.all(q_expected.to(log_unit, equivalencies=u.logarithmic()) ==
q_log_unit)
with u.set_enabled_equivalencies(u.logarithmic()):
assert np.all(np.abs(q_log_unit - q_expected.to(log_unit)) <
1.e-10*log_unit)
doppler_functions = [u.doppler_optical, u.doppler_radio, u.doppler_relativistic]
@pytest.mark.parametrize(('function'), doppler_functions)
def test_doppler_frequency_0(function):
rest = 105.01 * u.GHz
velo0 = rest.to(u.km/u.s, equivalencies=function(rest))
assert velo0.value == 0
@pytest.mark.parametrize(('function'), doppler_functions)
def test_doppler_wavelength_0(function):
rest = 105.01 * u.GHz
q1 = 0.00285489437196 * u.m
velo0 = q1.to(u.km/u.s, equivalencies=function(rest))
np.testing.assert_almost_equal(velo0.value, 0, decimal=6)
@pytest.mark.parametrize(('function'), doppler_functions)
def test_doppler_energy_0(function):
rest = 105.01 * u.GHz
q1 = 0.0004342864648539744 * u.eV
velo0 = q1.to(u.km/u.s, equivalencies=function(rest))
np.testing.assert_almost_equal(velo0.value, 0, decimal=6)
@pytest.mark.parametrize(('function'), doppler_functions)
def test_doppler_frequency_circle(function):
rest = 105.01 * u.GHz
shifted = 105.03 * u.GHz
velo = shifted.to(u.km/u.s, equivalencies=function(rest))
freq = velo.to(u.GHz, equivalencies=function(rest))
np.testing.assert_almost_equal(freq.value, shifted.value, decimal=7)
@pytest.mark.parametrize(('function'), doppler_functions)
def test_doppler_wavelength_circle(function):
rest = 105.01 * u.nm
shifted = 105.03 * u.nm
velo = shifted.to(u.km / u.s, equivalencies=function(rest))
wav = velo.to(u.nm, equivalencies=function(rest))
np.testing.assert_almost_equal(wav.value, shifted.value, decimal=7)
@pytest.mark.parametrize(('function'), doppler_functions)
def test_doppler_energy_circle(function):
rest = 1.0501 * u.eV
shifted = 1.0503 * u.eV
velo = shifted.to(u.km / u.s, equivalencies=function(rest))
en = velo.to(u.eV, equivalencies=function(rest))
np.testing.assert_almost_equal(en.value, shifted.value, decimal=7)
values_ghz = (999.899940784289, 999.8999307714406, 999.8999357778647)
@pytest.mark.parametrize(('function', 'value'),
list(zip(doppler_functions, values_ghz)))
def test_30kms(function, value):
rest = 1000 * u.GHz
velo = 30 * u.km/u.s
shifted = velo.to(u.GHz, equivalencies=function(rest))
np.testing.assert_almost_equal(shifted.value, value, decimal=7)
bad_values = (5, 5*u.Jy, None)
@pytest.mark.parametrize(('function', 'value'),
list(zip(doppler_functions, bad_values)))
def test_bad_restfreqs(function, value):
with pytest.raises(u.UnitsError):
function(value)
@pytest.mark.parametrize(('z', 'rv_ans'),
[(0, 0 * (u.km / u.s)),
(0.001, 299642.56184583 * (u.m / u.s)),
(-1, -2.99792458e8 * (u.m / u.s))])
def test_doppler_redshift(z, rv_ans):
z_in = z * u.dimensionless_unscaled
rv_out = z_in.to(u.km / u.s, u.doppler_redshift())
z_out = rv_out.to(u.dimensionless_unscaled, u.doppler_redshift())
assert_quantity_allclose(rv_out, rv_ans)
assert_quantity_allclose(z_out, z_in) # Check roundtrip
def test_doppler_redshift_no_cosmology():
from astropy.cosmology.units import redshift
with pytest.raises(u.UnitConversionError, match='not convertible'):
(0 * (u.km / u.s)).to(redshift, u.doppler_redshift())
def test_massenergy():
# The relative tolerance of these tests is set by the uncertainties
# in the charge of the electron, which is known to about
# 3e-9 (relative tolerance). Therefore, we limit the
# precision of the tests to 1e-7 to be safe. The masses are
# (loosely) known to ~ 5e-8 rel tolerance, so we couldn't test to
# 1e-7 if we used the values from astropy.constants; that is,
# they might change by more than 1e-7 in some future update, so instead
# they are hardwired here.
# Electron, proton, neutron, muon, 1g
mass_eV = u.Quantity([510.998928e3, 938.272046e6, 939.565378e6,
105.6583715e6, 5.60958884539e32], u.eV)
mass_g = u.Quantity([9.10938291e-28, 1.672621777e-24, 1.674927351e-24,
1.88353147e-25, 1], u.g)
# Test both ways
assert np.allclose(mass_eV.to_value(u.g, equivalencies=u.mass_energy()),
mass_g.value, rtol=1e-7)
assert np.allclose(mass_g.to_value(u.eV, equivalencies=u.mass_energy()),
mass_eV.value, rtol=1e-7)
# Basic tests of 'derived' equivalencies
# Surface density
sdens_eV = u.Quantity(5.60958884539e32, u.eV / u.m**2)
sdens_g = u.Quantity(1e-4, u.g / u.cm**2)
assert np.allclose(sdens_eV.to_value(u.g / u.cm**2,
equivalencies=u.mass_energy()),
sdens_g.value, rtol=1e-7)
assert np.allclose(sdens_g.to_value(u.eV / u.m**2,
equivalencies=u.mass_energy()),
sdens_eV.value, rtol=1e-7)
# Density
dens_eV = u.Quantity(5.60958884539e32, u.eV / u.m**3)
dens_g = u.Quantity(1e-6, u.g / u.cm**3)
assert np.allclose(dens_eV.to_value(u.g / u.cm**3,
equivalencies=u.mass_energy()),
dens_g.value, rtol=1e-7)
assert np.allclose(dens_g.to_value(u.eV / u.m**3,
equivalencies=u.mass_energy()),
dens_eV.value, rtol=1e-7)
# Power
pow_eV = u.Quantity(5.60958884539e32, u.eV / u.s)
pow_g = u.Quantity(1, u.g / u.s)
assert np.allclose(pow_eV.to_value(u.g / u.s,
equivalencies=u.mass_energy()),
pow_g.value, rtol=1e-7)
assert np.allclose(pow_g.to_value(u.eV / u.s,
equivalencies=u.mass_energy()),
pow_eV.value, rtol=1e-7)
def test_is_equivalent():
assert u.m.is_equivalent(u.pc)
assert u.cycle.is_equivalent(u.mas)
assert not u.cycle.is_equivalent(u.dimensionless_unscaled)
assert u.cycle.is_equivalent(u.dimensionless_unscaled,
u.dimensionless_angles())
assert not (u.Hz.is_equivalent(u.J))
assert u.Hz.is_equivalent(u.J, u.spectral())
assert u.J.is_equivalent(u.Hz, u.spectral())
assert u.pc.is_equivalent(u.arcsecond, u.parallax())
assert u.arcminute.is_equivalent(u.au, u.parallax())
# Pass a tuple for multiple possibilities
assert u.cm.is_equivalent((u.m, u.s, u.kg))
assert u.ms.is_equivalent((u.m, u.s, u.kg))
assert u.g.is_equivalent((u.m, u.s, u.kg))
assert not u.L.is_equivalent((u.m, u.s, u.kg))
assert not (u.km / u.s).is_equivalent((u.m, u.s, u.kg))
def test_parallax():
a = u.arcsecond.to(u.pc, 10, u.parallax())
assert_allclose(a, 0.10, rtol=1.e-12)
b = u.pc.to(u.arcsecond, a, u.parallax())
assert_allclose(b, 10, rtol=1.e-12)
a = u.arcminute.to(u.au, 1, u.parallax())
assert_allclose(a, 3437.746770785, rtol=1.e-12)
b = u.au.to(u.arcminute, a, u.parallax())
assert_allclose(b, 1, rtol=1.e-12)
val = (-1 * u.mas).to(u.pc, u.parallax())
assert np.isnan(val.value)
val = (-1 * u.mas).to_value(u.pc, u.parallax())
assert np.isnan(val)
def test_parallax2():
a = u.arcsecond.to(u.pc, [0.1, 2.5], u.parallax())
assert_allclose(a, [10, 0.4], rtol=1.e-12)
def test_spectral():
a = u.AA.to(u.Hz, 1, u.spectral())
assert_allclose(a, 2.9979245799999995e+18)
b = u.Hz.to(u.AA, a, u.spectral())
assert_allclose(b, 1)
a = u.AA.to(u.MHz, 1, u.spectral())
assert_allclose(a, 2.9979245799999995e+12)
b = u.MHz.to(u.AA, a, u.spectral())
assert_allclose(b, 1)
a = u.m.to(u.Hz, 1, u.spectral())
assert_allclose(a, 2.9979245799999995e+8)
b = u.Hz.to(u.m, a, u.spectral())
assert_allclose(b, 1)
def test_spectral2():
a = u.nm.to(u.J, 500, u.spectral())
assert_allclose(a, 3.972891366538605e-19)
b = u.J.to(u.nm, a, u.spectral())
assert_allclose(b, 500)
a = u.AA.to(u.Hz, 1, u.spectral())
b = u.Hz.to(u.J, a, u.spectral())
c = u.AA.to(u.J, 1, u.spectral())
assert_allclose(b, c)
c = u.J.to(u.Hz, b, u.spectral())
assert_allclose(a, c)
def test_spectral3():
a = u.nm.to(u.Hz, [1000, 2000], u.spectral())
assert_allclose(a, [2.99792458e+14, 1.49896229e+14])
@pytest.mark.parametrize(
('in_val', 'in_unit'),
[([0.1, 5000.0, 10000.0], u.AA),
([1e+5, 2.0, 1.0], u.micron ** -1),
([2.99792458e+19, 5.99584916e+14, 2.99792458e+14], u.Hz),
([1.98644568e-14, 3.97289137e-19, 1.98644568e-19], u.J)])
def test_spectral4(in_val, in_unit):
"""Wave number conversion w.r.t. wavelength, freq, and energy."""
# Spectroscopic and angular
out_units = [u.micron ** -1, u.radian / u.micron]
answers = [[1e+5, 2.0, 1.0], [6.28318531e+05, 12.5663706, 6.28318531]]
for out_unit, ans in zip(out_units, answers):
# Forward
a = in_unit.to(out_unit, in_val, u.spectral())
assert_allclose(a, ans)
# Backward
b = out_unit.to(in_unit, ans, u.spectral())
assert_allclose(b, in_val)
@pytest.mark.parametrize('wav', (3500 * u.AA,
8.5654988e+14 * u.Hz,
1 / (3500 * u.AA),
5.67555959e-19 * u.J))
def test_spectraldensity2(wav):
# flux density
flambda = u.erg / u.angstrom / u.cm ** 2 / u.s
fnu = u.erg / u.Hz / u.cm ** 2 / u.s
a = flambda.to(fnu, 1, u.spectral_density(wav))
assert_allclose(a, 4.086160166177361e-12)
# integrated flux
f_int = u.erg / u.cm ** 2 / u.s
phot_int = u.ph / u.cm ** 2 / u.s
a = f_int.to(phot_int, 1, u.spectral_density(wav))
assert_allclose(a, 1.7619408e+11)
a = phot_int.to(f_int, 1, u.spectral_density(wav))
assert_allclose(a, 5.67555959e-12)
# luminosity density
llambda = u.erg / u.angstrom / u.s
lnu = u.erg / u.Hz / u.s
a = llambda.to(lnu, 1, u.spectral_density(wav))
assert_allclose(a, 4.086160166177361e-12)
a = lnu.to(llambda, 1, u.spectral_density(wav))
assert_allclose(a, 2.44728537142857e11)
def test_spectraldensity3():
# Define F_nu in Jy
f_nu = u.Jy
# Define F_lambda in ergs / cm^2 / s / micron
f_lambda = u.erg / u.cm ** 2 / u.s / u.micron
# 1 GHz
one_ghz = u.Quantity(1, u.GHz)
# Convert to ergs / cm^2 / s / Hz
assert_allclose(f_nu.to(u.erg / u.cm ** 2 / u.s / u.Hz, 1.), 1.e-23, 10)
# Convert to ergs / cm^2 / s at 10 Ghz
assert_allclose(f_nu.to(u.erg / u.cm ** 2 / u.s, 1.,
equivalencies=u.spectral_density(one_ghz * 10)),
1.e-13, 10)
# Convert to F_lambda at 1 Ghz
assert_allclose(f_nu.to(f_lambda, 1.,
equivalencies=u.spectral_density(one_ghz)),
3.335640951981521e-20, 10)
# Convert to Jy at 1 Ghz
assert_allclose(f_lambda.to(u.Jy, 1.,
equivalencies=u.spectral_density(one_ghz)),
1. / 3.335640951981521e-20, 10)
# Convert to ergs / cm^2 / s at 10 microns
assert_allclose(f_lambda.to(u.erg / u.cm ** 2 / u.s, 1.,
equivalencies=u.spectral_density(u.Quantity(10, u.micron))),
10., 10)
def test_spectraldensity4():
"""PHOTLAM and PHOTNU conversions."""
flam = u.erg / (u.cm ** 2 * u.s * u.AA)
fnu = u.erg / (u.cm ** 2 * u.s * u.Hz)
photlam = u.photon / (u.cm ** 2 * u.s * u.AA)
photnu = u.photon / (u.cm ** 2 * u.s * u.Hz)
wave = u.Quantity([4956.8, 4959.55, 4962.3], u.AA)
flux_photlam = [9.7654e-3, 1.003896e-2, 9.78473e-3]
flux_photnu = [8.00335589e-14, 8.23668949e-14, 8.03700310e-14]
flux_flam = [3.9135e-14, 4.0209e-14, 3.9169e-14]
flux_fnu = [3.20735792e-25, 3.29903646e-25, 3.21727226e-25]
flux_jy = [3.20735792e-2, 3.29903646e-2, 3.21727226e-2]
flux_stmag = [12.41858665, 12.38919182, 12.41764379]
flux_abmag = [12.63463143, 12.60403221, 12.63128047]
# PHOTLAM <--> FLAM
assert_allclose(photlam.to(
flam, flux_photlam, u.spectral_density(wave)), flux_flam, rtol=1e-6)
assert_allclose(flam.to(
photlam, flux_flam, u.spectral_density(wave)), flux_photlam, rtol=1e-6)
# PHOTLAM <--> FNU
assert_allclose(photlam.to(
fnu, flux_photlam, u.spectral_density(wave)), flux_fnu, rtol=1e-6)
assert_allclose(fnu.to(
photlam, flux_fnu, u.spectral_density(wave)), flux_photlam, rtol=1e-6)
# PHOTLAM <--> Jy
assert_allclose(photlam.to(
u.Jy, flux_photlam, u.spectral_density(wave)), flux_jy, rtol=1e-6)
assert_allclose(u.Jy.to(
photlam, flux_jy, u.spectral_density(wave)), flux_photlam, rtol=1e-6)
# PHOTLAM <--> PHOTNU
assert_allclose(photlam.to(
photnu, flux_photlam, u.spectral_density(wave)), flux_photnu, rtol=1e-6)
assert_allclose(photnu.to(
photlam, flux_photnu, u.spectral_density(wave)), flux_photlam, rtol=1e-6)
# PHOTNU <--> FNU
assert_allclose(photnu.to(
fnu, flux_photnu, u.spectral_density(wave)), flux_fnu, rtol=1e-6)
assert_allclose(fnu.to(
photnu, flux_fnu, u.spectral_density(wave)), flux_photnu, rtol=1e-6)
# PHOTNU <--> FLAM
assert_allclose(photnu.to(
flam, flux_photnu, u.spectral_density(wave)), flux_flam, rtol=1e-6)
assert_allclose(flam.to(
photnu, flux_flam, u.spectral_density(wave)), flux_photnu, rtol=1e-6)
# PHOTLAM <--> STMAG
assert_allclose(photlam.to(
u.STmag, flux_photlam, u.spectral_density(wave)), flux_stmag, rtol=1e-6)
assert_allclose(u.STmag.to(
photlam, flux_stmag, u.spectral_density(wave)), flux_photlam, rtol=1e-6)
# PHOTLAM <--> ABMAG
assert_allclose(photlam.to(
u.ABmag, flux_photlam, u.spectral_density(wave)), flux_abmag, rtol=1e-6)
assert_allclose(u.ABmag.to(
photlam, flux_abmag, u.spectral_density(wave)), flux_photlam, rtol=1e-6)
def test_spectraldensity5():
""" Test photon luminosity density conversions. """
L_la = u.erg / (u.s * u.AA)
L_nu = u.erg / (u.s * u.Hz)
phot_L_la = u.photon / (u.s * u.AA)
phot_L_nu = u.photon / (u.s * u.Hz)
wave = u.Quantity([4956.8, 4959.55, 4962.3], u.AA)
flux_phot_L_la = [9.7654e-3, 1.003896e-2, 9.78473e-3]
flux_phot_L_nu = [8.00335589e-14, 8.23668949e-14, 8.03700310e-14]
flux_L_la = [3.9135e-14, 4.0209e-14, 3.9169e-14]
flux_L_nu = [3.20735792e-25, 3.29903646e-25, 3.21727226e-25]
# PHOTLAM <--> FLAM
assert_allclose(phot_L_la.to(
L_la, flux_phot_L_la, u.spectral_density(wave)), flux_L_la, rtol=1e-6)
assert_allclose(L_la.to(
phot_L_la, flux_L_la, u.spectral_density(wave)), flux_phot_L_la, rtol=1e-6)
# PHOTLAM <--> FNU
assert_allclose(phot_L_la.to(
L_nu, flux_phot_L_la, u.spectral_density(wave)), flux_L_nu, rtol=1e-6)
assert_allclose(L_nu.to(
phot_L_la, flux_L_nu, u.spectral_density(wave)), flux_phot_L_la, rtol=1e-6)
# PHOTLAM <--> PHOTNU
assert_allclose(phot_L_la.to(
phot_L_nu, flux_phot_L_la, u.spectral_density(wave)), flux_phot_L_nu, rtol=1e-6)
assert_allclose(phot_L_nu.to(
phot_L_la, flux_phot_L_nu, u.spectral_density(wave)), flux_phot_L_la, rtol=1e-6)
# PHOTNU <--> FNU
assert_allclose(phot_L_nu.to(
L_nu, flux_phot_L_nu, u.spectral_density(wave)), flux_L_nu, rtol=1e-6)
assert_allclose(L_nu.to(
phot_L_nu, flux_L_nu, u.spectral_density(wave)), flux_phot_L_nu, rtol=1e-6)
# PHOTNU <--> FLAM
assert_allclose(phot_L_nu.to(
L_la, flux_phot_L_nu, u.spectral_density(wave)), flux_L_la, rtol=1e-6)
assert_allclose(L_la.to(
phot_L_nu, flux_L_la, u.spectral_density(wave)), flux_phot_L_nu, rtol=1e-6)
def test_spectraldensity6():
""" Test surface brightness conversions. """
slam = u.erg / (u.cm ** 2 * u.s * u.AA * u.sr)
snu = u.erg / (u.cm ** 2 * u.s * u.Hz * u.sr)
wave = u.Quantity([4956.8, 4959.55, 4962.3], u.AA)
sb_flam = [3.9135e-14, 4.0209e-14, 3.9169e-14]
sb_fnu = [3.20735792e-25, 3.29903646e-25, 3.21727226e-25]
# S(nu) <--> S(lambda)
assert_allclose(snu.to(
slam, sb_fnu, u.spectral_density(wave)), sb_flam, rtol=1e-6)
assert_allclose(slam.to(
snu, sb_flam, u.spectral_density(wave)), sb_fnu, rtol=1e-6)
@pytest.mark.parametrize(
('from_unit', 'to_unit'),
[(u.ph / u.cm ** 2 / u.s, (u.cm * u.cm * u.s) ** -1),
(u.ph / u.cm ** 2 / u.s, u.erg / (u.cm * u.cm * u.s * u.keV)),
(u.erg / u.cm ** 2 / u.s, (u.cm * u.cm * u.s) ** -1),
(u.erg / u.cm ** 2 / u.s, u.erg / (u.cm * u.cm * u.s * u.keV))])
def test_spectraldensity_not_allowed(from_unit, to_unit):
"""Not allowed to succeed as
per https://github.com/astropy/astropy/pull/10015
"""
with pytest.raises(u.UnitConversionError, match='not convertible'):
from_unit.to(to_unit, 1, u.spectral_density(1 * u.AA))
# The other way
with pytest.raises(u.UnitConversionError, match='not convertible'):
to_unit.to(from_unit, 1, u.spectral_density(1 * u.AA))
def test_equivalent_units():
from astropy.units import imperial
with u.add_enabled_units(imperial):
units = u.g.find_equivalent_units()
units_set = set(units)
match = {
u.M_e, u.M_p, u.g, u.kg, u.solMass, u.t, u.u, u.M_earth,
u.M_jup, imperial.oz, imperial.lb, imperial.st, imperial.ton,
imperial.slug}
assert units_set == match
r = repr(units)
assert r.count('\n') == len(units) + 2
def test_equivalent_units2():
units = set(u.Hz.find_equivalent_units(u.spectral()))
match = {
u.AU, u.Angstrom, u.Hz, u.J, u.Ry, u.cm, u.eV, u.erg, u.lyr, u.lsec,
u.m, u.micron, u.pc, u.solRad, u.Bq, u.Ci, u.k, u.earthRad,
u.jupiterRad}
assert units == match
from astropy.units import imperial
with u.add_enabled_units(imperial):
units = set(u.Hz.find_equivalent_units(u.spectral()))
match = {
u.AU, u.Angstrom, imperial.BTU, u.Hz, u.J, u.Ry,
imperial.cal, u.cm, u.eV, u.erg, imperial.ft, imperial.fur,
imperial.inch, imperial.kcal, u.lyr, u.m, imperial.mi, u.lsec,
imperial.mil, u.micron, u.pc, u.solRad, imperial.yd, u.Bq, u.Ci,
imperial.nmi, u.k, u.earthRad, u.jupiterRad}
assert units == match
units = set(u.Hz.find_equivalent_units(u.spectral()))
match = {
u.AU, u.Angstrom, u.Hz, u.J, u.Ry, u.cm, u.eV, u.erg, u.lyr, u.lsec,
u.m, u.micron, u.pc, u.solRad, u.Bq, u.Ci, u.k, u.earthRad,
u.jupiterRad}
assert units == match
def test_trivial_equivalency():
assert u.m.to(u.kg, equivalencies=[(u.m, u.kg)]) == 1.0
def test_invalid_equivalency():
with pytest.raises(ValueError):
u.m.to(u.kg, equivalencies=[(u.m,)])
with pytest.raises(ValueError):
u.m.to(u.kg, equivalencies=[(u.m, 5.0)])
def test_irrelevant_equivalency():
with pytest.raises(u.UnitsError):
u.m.to(u.kg, equivalencies=[(u.m, u.l)])
def test_brightness_temperature():
omega_B = np.pi * (50 * u.arcsec) ** 2
nu = u.GHz * 5
tb = 7.052587837212582 * u.K
np.testing.assert_almost_equal(
tb.value, (1 * u.Jy).to_value(
u.K, equivalencies=u.brightness_temperature(nu, beam_area=omega_B)))
np.testing.assert_almost_equal(
1.0, tb.to_value(
u.Jy, equivalencies=u.brightness_temperature(nu, beam_area=omega_B)))
def test_swapped_args_brightness_temperature():
"""
#5173 changes the order of arguments but accepts the old (deprecated) args
"""
omega_B = np.pi * (50 * u.arcsec) ** 2
nu = u.GHz * 5
tb = 7.052587837212582 * u.K
with pytest.warns(AstropyDeprecationWarning) as w:
result = (1*u.Jy).to(
u.K, equivalencies=u.brightness_temperature(omega_B, nu))
roundtrip = result.to(
u.Jy, equivalencies=u.brightness_temperature(omega_B, nu))
assert len(w) == 2
np.testing.assert_almost_equal(tb.value, result.value)
np.testing.assert_almost_equal(roundtrip.value, 1)
def test_surfacebrightness():
sb = 50*u.MJy/u.sr
k = sb.to(u.K, u.brightness_temperature(50*u.GHz))
np.testing.assert_almost_equal(k.value, 0.650965, 5)
assert k.unit.is_equivalent(u.K)
def test_beam():
# pick a beam area: 2 pi r^2 = area of a Gaussina with sigma=50 arcsec
omega_B = 2 * np.pi * (50 * u.arcsec) ** 2
new_beam = (5*u.beam).to(u.sr, u.equivalencies.beam_angular_area(omega_B))
np.testing.assert_almost_equal(omega_B.to(u.sr).value * 5, new_beam.value)
assert new_beam.unit.is_equivalent(u.sr)
# make sure that it's still consistent with 5 beams
nbeams = new_beam.to(u.beam, u.equivalencies.beam_angular_area(omega_B))
np.testing.assert_almost_equal(nbeams.value, 5)
# test inverse beam equivalency
# (this is just a sanity check that the equivalency is defined;
# it's not for testing numerical consistency)
(5/u.beam).to(1/u.sr, u.equivalencies.beam_angular_area(omega_B))
# test practical case
# (this is by far the most important one)
flux_density = (5*u.Jy/u.beam).to(u.MJy/u.sr, u.equivalencies.beam_angular_area(omega_B))
np.testing.assert_almost_equal(flux_density.value, 13.5425483146382)
def test_thermodynamic_temperature():
nu = 143 * u.GHz
tb = 0.0026320501262630277 * u.K
eq = u.thermodynamic_temperature(nu, T_cmb=2.7255 * u.K)
np.testing.assert_almost_equal(
tb.value, (1 * (u.MJy / u.sr)).to_value(u.K, equivalencies=eq))
np.testing.assert_almost_equal(
1.0, tb.to_value(u.MJy / u.sr, equivalencies=eq))
def test_equivalency_context():
with u.set_enabled_equivalencies(u.dimensionless_angles()):
phase = u.Quantity(1., u.cycle)
assert_allclose(np.exp(1j*phase), 1.)
Omega = u.cycle / (1.*u.minute)
assert_allclose(np.exp(1j*Omega*60.*u.second), 1.)
# ensure we can turn off equivalencies even within the scope
with pytest.raises(u.UnitsError):
phase.to(1, equivalencies=None)
# test the manager also works in the Quantity constructor.
q1 = u.Quantity(phase, u.dimensionless_unscaled)
assert_allclose(q1.value, u.cycle.to(u.radian))
# and also if we use a class that happens to have a unit attribute.
class MyQuantityLookalike(np.ndarray):
pass
mylookalike = np.array(1.).view(MyQuantityLookalike)
mylookalike.unit = 'cycle'
# test the manager also works in the Quantity constructor.
q2 = u.Quantity(mylookalike, u.dimensionless_unscaled)
assert_allclose(q2.value, u.cycle.to(u.radian))
with u.set_enabled_equivalencies(u.spectral()):
u.GHz.to(u.cm)
eq_on = u.GHz.find_equivalent_units()
with pytest.raises(u.UnitsError):
u.GHz.to(u.cm, equivalencies=None)
# without equivalencies, we should find a smaller (sub)set
eq_off = u.GHz.find_equivalent_units()
assert all(eq in set(eq_on) for eq in eq_off)
assert set(eq_off) < set(eq_on)
# Check the equivalency manager also works in ufunc evaluations,
# not just using (wrong) scaling. [#2496]
l2v = u.doppler_optical(6000 * u.angstrom)
l1 = 6010 * u.angstrom
assert l1.to(u.km/u.s, equivalencies=l2v) > 100. * u.km / u.s
with u.set_enabled_equivalencies(l2v):
assert l1 > 100. * u.km / u.s
assert abs((l1 - 500. * u.km / u.s).to(u.angstrom)) < 1. * u.km/u.s
def test_equivalency_context_manager():
base_registry = u.get_current_unit_registry()
def just_to_from_units(equivalencies):
return [(equiv[0], equiv[1]) for equiv in equivalencies]
tf_dimensionless_angles = just_to_from_units(u.dimensionless_angles())
tf_spectral = just_to_from_units(u.spectral())
# <=1 b/c might have the dimensionless_redshift equivalency enabled.
assert len(base_registry.equivalencies) <= 1
with u.set_enabled_equivalencies(u.dimensionless_angles()):
new_registry = u.get_current_unit_registry()
assert (set(just_to_from_units(new_registry.equivalencies)) ==
set(tf_dimensionless_angles))
assert set(new_registry.all_units) == set(base_registry.all_units)
with u.set_enabled_equivalencies(u.spectral()):
newer_registry = u.get_current_unit_registry()
assert (set(just_to_from_units(newer_registry.equivalencies)) ==
set(tf_spectral))
assert (set(newer_registry.all_units) ==
set(base_registry.all_units))
assert (set(just_to_from_units(new_registry.equivalencies)) ==
set(tf_dimensionless_angles))
assert set(new_registry.all_units) == set(base_registry.all_units)
with u.add_enabled_equivalencies(u.spectral()):
newer_registry = u.get_current_unit_registry()
assert (set(just_to_from_units(newer_registry.equivalencies)) ==
set(tf_dimensionless_angles) | set(tf_spectral))
assert (set(newer_registry.all_units) ==
set(base_registry.all_units))
assert base_registry is u.get_current_unit_registry()
def test_temperature():
from astropy.units.imperial import deg_F, deg_R
t_k = 0 * u.K
assert_allclose(t_k.to_value(u.deg_C, u.temperature()), -273.15)
assert_allclose(t_k.to_value(deg_F, u.temperature()), -459.67)
t_k = 20 * u.K
assert_allclose(t_k.to_value(deg_R, u.temperature()), 36.0)
t_k = 20 * deg_R
assert_allclose(t_k.to_value(u.K, u.temperature()), 11.11, atol=0.01)
t_k = 20 * deg_F
assert_allclose(t_k.to_value(deg_R, u.temperature()), 479.67)
t_k = 20 * deg_R
assert_allclose(t_k.to_value(deg_F, u.temperature()), -439.67)
t_k = 20 * u.deg_C
assert_allclose(t_k.to_value(deg_R, u.temperature()), 527.67)
t_k = 20 * deg_R
assert_allclose(t_k.to_value(u.deg_C, u.temperature()), -262.039, atol=0.01)
def test_temperature_energy():
x = 1000 * u.K
y = (x * constants.k_B).to(u.keV)
assert_allclose(x.to_value(u.keV, u.temperature_energy()), y.value)
assert_allclose(y.to_value(u.K, u.temperature_energy()), x.value)
def test_molar_mass_amu():
x = 1 * (u.g/u.mol)
y = 1 * u.u
assert_allclose(x.to_value(u.u, u.molar_mass_amu()), y.value)
assert_allclose(y.to_value(u.g/u.mol, u.molar_mass_amu()), x.value)
with pytest.raises(u.UnitsError):
x.to(u.u)
def test_compose_equivalencies():
x = u.Unit("arcsec").compose(units=(u.pc,), equivalencies=u.parallax())
assert x[0] == u.pc
x = u.Unit("2 arcsec").compose(units=(u.pc,), equivalencies=u.parallax())
assert x[0] == u.Unit(0.5 * u.pc)
x = u.degree.compose(equivalencies=u.dimensionless_angles())
assert u.Unit(u.degree.to(u.radian)) in x
x = (u.nm).compose(units=(u.m, u.s), equivalencies=u.doppler_optical(0.55*u.micron))
for y in x:
if y.bases == [u.m, u.s]:
assert y.powers == [1, -1]
assert_allclose(
y.scale,
u.nm.to(u.m / u.s, equivalencies=u.doppler_optical(0.55 * u.micron)))
break
else:
assert False, "Didn't find speed in compose results"
def test_pixel_scale():
pix = 75*u.pix
asec = 30*u.arcsec
pixscale = 0.4*u.arcsec/u.pix
pixscale2 = 2.5*u.pix/u.arcsec
assert_quantity_allclose(pix.to(u.arcsec, u.pixel_scale(pixscale)), asec)
assert_quantity_allclose(pix.to(u.arcmin, u.pixel_scale(pixscale)), asec)
assert_quantity_allclose(pix.to(u.arcsec, u.pixel_scale(pixscale2)), asec)
assert_quantity_allclose(pix.to(u.arcmin, u.pixel_scale(pixscale2)), asec)
assert_quantity_allclose(asec.to(u.pix, u.pixel_scale(pixscale)), pix)
assert_quantity_allclose(asec.to(u.pix, u.pixel_scale(pixscale2)), pix)
def test_pixel_scale_invalid_scale_unit():
pixscale = 0.4 * u.arcsec
pixscale2 = 0.4 * u.arcsec / u.pix ** 2
with pytest.raises(u.UnitsError, match="pixel dimension"):
u.pixel_scale(pixscale)
with pytest.raises(u.UnitsError, match="pixel dimension"):
u.pixel_scale(pixscale2)
def test_pixel_scale_acceptable_scale_unit():
pix = 75 * u.pix
v = 3000 * (u.cm / u.s)
pixscale = 0.4 * (u.m / u.s / u.pix)
pixscale2 = 2.5 * (u.pix / (u.m / u.s))
assert_quantity_allclose(pix.to(u.m / u.s, u.pixel_scale(pixscale)), v)
assert_quantity_allclose(pix.to(u.km / u.s, u.pixel_scale(pixscale)), v)
assert_quantity_allclose(pix.to(u.m / u.s, u.pixel_scale(pixscale2)), v)
assert_quantity_allclose(pix.to(u.km / u.s, u.pixel_scale(pixscale2)), v)
assert_quantity_allclose(v.to(u.pix, u.pixel_scale(pixscale)), pix)
assert_quantity_allclose(v.to(u.pix, u.pixel_scale(pixscale2)), pix)
def test_plate_scale():
mm = 1.5*u.mm
asec = 30*u.arcsec
platescale = 20*u.arcsec/u.mm
platescale2 = 0.05*u.mm/u.arcsec
assert_quantity_allclose(mm.to(u.arcsec, u.plate_scale(platescale)), asec)
assert_quantity_allclose(mm.to(u.arcmin, u.plate_scale(platescale)), asec)
assert_quantity_allclose(mm.to(u.arcsec, u.plate_scale(platescale2)), asec)
assert_quantity_allclose(mm.to(u.arcmin, u.plate_scale(platescale2)), asec)
assert_quantity_allclose(asec.to(u.mm, u.plate_scale(platescale)), mm)
assert_quantity_allclose(asec.to(u.mm, u.plate_scale(platescale2)), mm)
def test_equivelency():
ps = u.pixel_scale(10*u.arcsec/u.pix)
assert isinstance(ps, Equivalency)
assert isinstance(ps.name, list)
assert len(ps.name) == 1
assert ps.name[0] == "pixel_scale"
assert isinstance(ps.kwargs, list)
assert len(ps.kwargs) == 1
assert ps.kwargs[0] == dict({'pixscale': 10*u.arcsec/u.pix})
def test_add_equivelencies():
e1 = u.pixel_scale(10*u.arcsec/u.pixel) + u.temperature_energy()
assert isinstance(e1, Equivalency)
assert e1.name == ["pixel_scale", "temperature_energy"]
assert isinstance(e1.kwargs, list)
assert e1.kwargs == [dict({'pixscale': 10*u.arcsec/u.pix}), dict()]
e2 = u.pixel_scale(10*u.arcsec/u.pixel) + [1, 2, 3]
assert isinstance(e2, list)
def test_pprint():
pprint_class = u.UnitBase.EquivalentUnitsList
equiv_units_to_Hz = u.Hz.find_equivalent_units()
assert pprint_class.__repr__(equiv_units_to_Hz).splitlines() == [
' Primary name | Unit definition | Aliases ',
'[',
' Bq | 1 / s | becquerel ,',
' Ci | 3.7e+10 / s | curie ,',
' Hz | 1 / s | Hertz, hertz ,',
']'
]
assert pprint_class._repr_html_(equiv_units_to_Hz) == (
'<table style="width:50%">'
'<tr><th>Primary name</th><th>Unit definition</th>'
'<th>Aliases</th></tr>'
'<tr><td>Bq</td><td>1 / s</td><td>becquerel</td></tr>'
'<tr><td>Ci</td><td>3.7e+10 / s</td><td>curie</td></tr>'
'<tr><td>Hz</td><td>1 / s</td><td>Hertz, hertz</td></tr></table>'
)
|
34869b71d27c185cee841bc50d604338d2f608acb3577327155943d5a8483b10 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test setting and adding unit aliases."""
import pytest
import astropy.units as u
trials = [
({"Angstroms": u.AA}, "Angstroms", u.AA),
({"counts": u.count}, "counts/s", u.count / u.s),
({"ergs": u.erg, "Angstroms": u.AA},
"ergs/(s cm**2 Angstroms)", u.erg / (u.s * u.cm**2 * u.AA))]
class TestAliases:
def teardown_method(self):
u.set_enabled_aliases({})
def teardown_class(self):
assert u.get_current_unit_registry().aliases == {}
@pytest.mark.parametrize('format_', [None, 'fits', 'ogip', 'vounit', 'cds'])
@pytest.mark.parametrize('aliases,bad,unit', trials)
def test_set_enabled_aliases_context_manager(self, aliases, bad, unit, format_):
if format_ == 'cds':
bad = bad.replace(' ', '.').replace('**', '')
with u.set_enabled_aliases(aliases):
assert u.get_current_unit_registry().aliases == aliases
assert u.Unit(bad) == unit
assert u.get_current_unit_registry().aliases == {}
with pytest.raises(ValueError):
u.Unit(bad)
@pytest.mark.parametrize('aliases,bad,unit', trials)
def test_add_enabled_aliases_context_manager(self, aliases, bad, unit):
with u.add_enabled_aliases(aliases):
assert u.get_current_unit_registry().aliases == aliases
assert u.Unit(bad) == unit
assert u.get_current_unit_registry().aliases == {}
with pytest.raises(ValueError):
u.Unit(bad)
def test_set_enabled_aliases(self):
for i, (aliases, bad, unit) in enumerate(trials):
u.set_enabled_aliases(aliases)
assert u.get_current_unit_registry().aliases == aliases
assert u.Unit(bad) == unit
for _, bad2, unit2 in trials:
if bad2 == bad or bad2 in aliases:
assert u.Unit(bad2) == unit2
else:
with pytest.raises(ValueError):
u.Unit(bad2)
def test_add_enabled_aliases(self):
expected_aliases = {}
for i, (aliases, bad, unit) in enumerate(trials):
u.add_enabled_aliases(aliases)
expected_aliases.update(aliases)
assert u.get_current_unit_registry().aliases == expected_aliases
assert u.Unit(bad) == unit
for j, (_, bad2, unit2) in enumerate(trials):
if j <= i:
assert u.Unit(bad2) == unit2
else:
with pytest.raises(ValueError):
u.Unit(bad2)
def test_cannot_alias_existing_unit(self):
with pytest.raises(ValueError, match='already means'):
u.set_enabled_aliases({'pct': u.Unit(1e-12*u.count)})
def test_cannot_alias_existing_alias_to_another_unit(self):
u.set_enabled_aliases({'counts': u.count})
with pytest.raises(ValueError, match='already is an alias'):
u.add_enabled_aliases({'counts': u.adu})
|
e54dc05ac60e249c13b7d407dbbdb244bc2bf3fd9bdc09f9595948727668ce87 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Test utilities for `astropy.units`.
"""
import numpy as np
from numpy import finfo
from astropy.units.quantity import Quantity
from astropy.units.utils import quantity_asanyarray, sanitize_scale
_float_finfo = finfo(float)
def test_quantity_asanyarray():
array_of_quantities = [Quantity(1), Quantity(2), Quantity(3)]
quantity_array = quantity_asanyarray(array_of_quantities)
assert isinstance(quantity_array, Quantity)
assert np.issubdtype(quantity_array.dtype, np.inexact)
array_of_integers = [1, 2, 3]
np_array = quantity_asanyarray(array_of_integers)
assert isinstance(np_array, np.ndarray)
assert np.issubdtype(np_array.dtype, np.integer)
np_array = quantity_asanyarray(array_of_integers, dtype=np.inexact)
assert np.issubdtype(np_array.dtype, np.inexact)
def test_sanitize_scale():
assert sanitize_scale(complex(2, _float_finfo.eps)) == 2
assert sanitize_scale(complex(_float_finfo.eps, 2)) == 2j
|
cbb0b7055a078f4c47e0f45108db946092de7296321667e852b1a6c7b2ea1645 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Regression tests for deprecated units or those that are "soft" deprecated
because they are required for VOUnit support but are not in common use."""
import pytest
from astropy import units as u
from astropy.units import deprecated, required_by_vounit
def test_emu():
with pytest.raises(AttributeError):
u.emu
assert u.Bi.to(deprecated.emu, 1) == 1
with deprecated.enable():
assert u.Bi.compose()[0] == deprecated.emu
assert u.Bi.compose()[0] == u.Bi
# test that the earth/jupiter mass/rad are also in the deprecated bunch
for body in ('earth', 'jupiter'):
for phystype in ('Mass', 'Rad'):
# only test a couple prefixes to same time
for prefix in ('n', 'y'):
namewoprefix = body + phystype
unitname = prefix + namewoprefix
with pytest.raises(AttributeError):
getattr(u, unitname)
assert (getattr(deprecated, unitname).represents.bases[0] ==
getattr(u, namewoprefix))
def test_required_by_vounit():
# The tests below could be replicated with all the various prefixes, but it
# seems unnecessary because they all come as a set. So we only use nano for
# the purposes of this test.
with pytest.raises(AttributeError):
# nano-solar mass/rad/lum shouldn't be in the base unit namespace
u.nsolMass
u.nsolRad
u.nsolLum
# but they should be enabled by default via required_by_vounit, to allow
# the Unit constructor to accept them
assert u.Unit('nsolMass') == required_by_vounit.nsolMass
assert u.Unit('nsolRad') == required_by_vounit.nsolRad
assert u.Unit('nsolLum') == required_by_vounit.nsolLum
# but because they are prefixes, they shouldn't be in find_equivalent_units
assert required_by_vounit.nsolMass not in u.solMass.find_equivalent_units()
assert required_by_vounit.nsolRad not in u.solRad.find_equivalent_units()
assert required_by_vounit.nsolLum not in u.solLum.find_equivalent_units()
|
48836c53007cdb2f52301492b11c4f31c3a0529c999e51c9029ab203e3951230 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# STDLIB
import sys
import typing as T
# THIRD PARTY
import pytest
# LOCAL
from astropy import units as u
from astropy.units import Quantity
from astropy.units._typing import HAS_ANNOTATED, Annotated
def test_ignore_generic_type_annotations():
"""Test annotations that are not unit related are ignored.
This test passes if the function works.
"""
# one unit, one not (should be ignored)
@u.quantity_input
def func(x: u.m, y: str):
return x, y
i_q, i_str = 2 * u.m, "cool string"
o_q, o_str = func(i_q, i_str) # if this doesn't fail, it worked.
assert i_q == o_q
assert i_str == o_str
@pytest.mark.skipif(not HAS_ANNOTATED, reason="need `Annotated`")
class TestQuantityUnitAnnotations:
"""Test Quantity[Unit] type annotation."""
def test_simple_annotation(self):
@u.quantity_input
def func(x: Quantity[u.m], y: str):
return x, y
i_q, i_str = 2 * u.m, "cool string"
o_q, o_str = func(i_q, i_str)
assert i_q == o_q
assert i_str == o_str
# checks the input on the 1st arg
with pytest.raises(u.UnitsError):
func(1 * u.s, i_str)
# but not the second
o_q, o_str = func(i_q, {"not": "a string"})
assert i_q == o_q
assert i_str != o_str
def test_multiple_annotation(self):
@u.quantity_input
def multi_func(a: Quantity[u.km]) -> Quantity[u.m]:
return a
i_q = 2 * u.km
o_q = multi_func(i_q)
assert o_q == i_q
assert o_q.unit == u.m
@pytest.mark.skipif(not HAS_ANNOTATED, reason="need `Annotated`")
def test_optional_and_annotated(self):
@u.quantity_input
def opt_func(x: T.Optional[Quantity[u.m]] = None) -> Quantity[u.km]:
if x is None:
return 1 * u.km
return x
i_q = 250 * u.m
o_q = opt_func(i_q)
assert o_q.unit == u.km
assert o_q == i_q
i_q = None
o_q = opt_func(i_q)
assert o_q == 1 * u.km
@pytest.mark.skipif(not HAS_ANNOTATED, reason="need `Annotated`")
def test_union_and_annotated(self):
# Union and Annotated
@u.quantity_input
def union_func(x: T.Union[Quantity[u.m], Quantity[u.s], None]):
if x is None:
return None
else:
return 2 * x
i_q = 1 * u.m
o_q = union_func(i_q)
assert o_q == 2 * i_q
i_q = 1 * u.s
o_q = union_func(i_q)
assert o_q == 2 * i_q
i_q = None
o_q = union_func(i_q)
assert o_q is None
def test_not_unit_or_ptype(self):
with pytest.raises(TypeError, match="unit annotation is not"):
Quantity["definitely not a unit"]
@pytest.mark.skipif(HAS_ANNOTATED, reason="requires py3.8 behavior")
def test_not_unit_or_ptype():
"""
Same as above test, but different behavior for python 3.8 b/c it passes
Quantity right through.
"""
with pytest.warns(Warning):
annot = Quantity[u.km]
assert annot == u.km
@pytest.mark.parametrize("solarx_unit,solary_unit", [
(u.arcsec, u.arcsec),
('angle', 'angle')])
def test_args3(solarx_unit, solary_unit):
@u.quantity_input
def myfunc_args(solarx: solarx_unit, solary: solary_unit):
return solarx, solary
solarx, solary = myfunc_args(1*u.arcsec, 1*u.arcsec)
assert isinstance(solarx, Quantity)
assert isinstance(solary, Quantity)
assert solarx.unit == u.arcsec
assert solary.unit == u.arcsec
@pytest.mark.parametrize("solarx_unit,solary_unit", [
(u.arcsec, u.arcsec),
('angle', 'angle')])
def test_args_noconvert3(solarx_unit, solary_unit):
@u.quantity_input()
def myfunc_args(solarx: solarx_unit, solary: solary_unit):
return solarx, solary
solarx, solary = myfunc_args(1*u.deg, 1*u.arcmin)
assert isinstance(solarx, Quantity)
assert isinstance(solary, Quantity)
assert solarx.unit == u.deg
assert solary.unit == u.arcmin
@pytest.mark.parametrize("solarx_unit", [
u.arcsec, 'angle'])
def test_args_nonquantity3(solarx_unit):
@u.quantity_input
def myfunc_args(solarx: solarx_unit, solary):
return solarx, solary
solarx, solary = myfunc_args(1*u.arcsec, 100)
assert isinstance(solarx, Quantity)
assert isinstance(solary, int)
assert solarx.unit == u.arcsec
@pytest.mark.parametrize("solarx_unit,solary_unit", [
(u.arcsec, u.eV),
('angle', 'energy')])
def test_arg_equivalencies3(solarx_unit, solary_unit):
@u.quantity_input(equivalencies=u.mass_energy())
def myfunc_args(solarx: solarx_unit, solary: solary_unit):
return solarx, solary+(10*u.J) # Add an energy to check equiv is working
solarx, solary = myfunc_args(1*u.arcsec, 100*u.gram)
assert isinstance(solarx, Quantity)
assert isinstance(solary, Quantity)
assert solarx.unit == u.arcsec
assert solary.unit == u.gram
@pytest.mark.parametrize("solarx_unit,solary_unit", [
(u.arcsec, u.deg),
('angle', 'angle')])
def test_wrong_unit3(solarx_unit, solary_unit):
@u.quantity_input
def myfunc_args(solarx: solarx_unit, solary: solary_unit):
return solarx, solary
with pytest.raises(u.UnitsError) as e:
solarx, solary = myfunc_args(1*u.arcsec, 100*u.km)
str_to = str(solary_unit)
assert str(e.value) == f"Argument 'solary' to function 'myfunc_args' must be in units convertible to '{str_to}'."
@pytest.mark.parametrize("solarx_unit,solary_unit", [
(u.arcsec, u.deg),
('angle', 'angle')])
def test_not_quantity3(solarx_unit, solary_unit):
@u.quantity_input
def myfunc_args(solarx: solarx_unit, solary: solary_unit):
return solarx, solary
with pytest.raises(TypeError) as e:
solarx, solary = myfunc_args(1*u.arcsec, 100)
assert str(e.value) == "Argument 'solary' to function 'myfunc_args' has no 'unit' attribute. You should pass in an astropy Quantity instead."
def test_decorator_override():
@u.quantity_input(solarx=u.arcsec)
def myfunc_args(solarx: u.km, solary: u.arcsec):
return solarx, solary
solarx, solary = myfunc_args(1*u.arcsec, 1*u.arcsec)
assert isinstance(solarx, Quantity)
assert isinstance(solary, Quantity)
assert solarx.unit == u.arcsec
assert solary.unit == u.arcsec
@pytest.mark.parametrize("solarx_unit,solary_unit", [
(u.arcsec, u.deg),
('angle', 'angle')])
def test_kwargs3(solarx_unit, solary_unit):
@u.quantity_input
def myfunc_args(solarx: solarx_unit, solary, myk: solary_unit=1*u.arcsec):
return solarx, solary, myk
solarx, solary, myk = myfunc_args(1*u.arcsec, 100, myk=100*u.deg)
assert isinstance(solarx, Quantity)
assert isinstance(solary, int)
assert isinstance(myk, Quantity)
assert myk.unit == u.deg
@pytest.mark.parametrize("solarx_unit,solary_unit", [
(u.arcsec, u.deg),
('angle', 'angle')])
def test_unused_kwargs3(solarx_unit, solary_unit):
@u.quantity_input
def myfunc_args(solarx: solarx_unit, solary, myk: solary_unit=1*u.arcsec, myk2=1000):
return solarx, solary, myk, myk2
solarx, solary, myk, myk2 = myfunc_args(1*u.arcsec, 100, myk=100*u.deg, myk2=10)
assert isinstance(solarx, Quantity)
assert isinstance(solary, int)
assert isinstance(myk, Quantity)
assert isinstance(myk2, int)
assert myk.unit == u.deg
assert myk2 == 10
@pytest.mark.parametrize("solarx_unit,energy", [
(u.arcsec, u.eV),
('angle', 'energy')])
def test_kwarg_equivalencies3(solarx_unit, energy):
@u.quantity_input(equivalencies=u.mass_energy())
def myfunc_args(solarx: solarx_unit, energy: energy=10*u.eV):
return solarx, energy+(10*u.J) # Add an energy to check equiv is working
solarx, energy = myfunc_args(1*u.arcsec, 100*u.gram)
assert isinstance(solarx, Quantity)
assert isinstance(energy, Quantity)
assert solarx.unit == u.arcsec
assert energy.unit == u.gram
@pytest.mark.parametrize("solarx_unit,solary_unit", [
(u.arcsec, u.deg),
('angle', 'angle')])
def test_kwarg_wrong_unit3(solarx_unit, solary_unit):
@u.quantity_input
def myfunc_args(solarx: solarx_unit, solary: solary_unit=10*u.deg):
return solarx, solary
with pytest.raises(u.UnitsError) as e:
solarx, solary = myfunc_args(1*u.arcsec, solary=100*u.km)
str_to = str(solary_unit)
assert str(e.value) == f"Argument 'solary' to function 'myfunc_args' must be in units convertible to '{str_to}'."
@pytest.mark.parametrize("solarx_unit,solary_unit", [
(u.arcsec, u.deg),
('angle', 'angle')])
def test_kwarg_not_quantity3(solarx_unit, solary_unit):
@u.quantity_input
def myfunc_args(solarx: solarx_unit, solary: solary_unit=10*u.deg):
return solarx, solary
with pytest.raises(TypeError) as e:
solarx, solary = myfunc_args(1*u.arcsec, solary=100)
assert str(e.value) == "Argument 'solary' to function 'myfunc_args' has no 'unit' attribute. You should pass in an astropy Quantity instead."
@pytest.mark.parametrize("solarx_unit,solary_unit", [
(u.arcsec, u.deg),
('angle', 'angle')])
def test_kwarg_default3(solarx_unit, solary_unit):
@u.quantity_input
def myfunc_args(solarx: solarx_unit, solary: solary_unit=10*u.deg):
return solarx, solary
solarx, solary = myfunc_args(1*u.arcsec)
def test_return_annotation():
@u.quantity_input
def myfunc_args(solarx: u.arcsec) -> u.deg:
return solarx
solarx = myfunc_args(1*u.arcsec)
assert solarx.unit is u.deg
def test_return_annotation_none():
@u.quantity_input
def myfunc_args(solarx: u.arcsec) -> None:
pass
solarx = myfunc_args(1*u.arcsec)
assert solarx is None
def test_return_annotation_notUnit():
@u.quantity_input
def myfunc_args(solarx: u.arcsec) -> int:
return 0
solarx = myfunc_args(1*u.arcsec)
assert solarx == 0
def test_enum_annotation():
# Regression test for gh-9932
from enum import Enum, auto
class BasicEnum(Enum):
AnOption = auto()
@u.quantity_input
def myfunc_args(a: BasicEnum, b: u.arcsec) -> None:
pass
myfunc_args(BasicEnum.AnOption, 1*u.arcsec)
|
6d4fb0cf91440a44b4baf3cb3a50aa73e4bb995b2752b85271d7dc2323c1b1e4 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for the photometric module. Note that this is shorter than
might be expected because a lot of the relevant tests that deal
with magnidues are in `test_logarithmic.py`
"""
from astropy.tests.helper import assert_quantity_allclose
from astropy.units import AA, ABflux, Jy, Magnitude, STflux, cm, erg, mgy, nmgy, s, zero_point_flux
def test_maggies():
assert_quantity_allclose(1e-9*mgy, 1*nmgy)
assert_quantity_allclose(Magnitude((1*nmgy).to(mgy)).value, 22.5)
def test_maggies_zpts():
assert_quantity_allclose((1*nmgy).to(ABflux, zero_point_flux(1*ABflux)), 3631e-9*Jy, rtol=1e-3)
ST_base_unit = erg * cm**-2 / s / AA
stmgy = (10*mgy).to(STflux, zero_point_flux(1*ST_base_unit))
assert_quantity_allclose(stmgy, 10*ST_base_unit)
mgyst = (2*ST_base_unit).to(mgy, zero_point_flux(0.5*ST_base_unit))
assert_quantity_allclose(mgyst, 4*mgy)
nmgyst = (5.e-10*ST_base_unit).to(mgy, zero_point_flux(0.5*ST_base_unit))
assert_quantity_allclose(nmgyst, 1*nmgy)
|
507fe5650dc67d4b6fa192363ece5158384b3b9cb5bb24dacf65c2754233c451 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Regression tests for the units.format package
"""
import warnings
from contextlib import nullcontext
from fractions import Fraction
import numpy as np
import pytest
from numpy.testing import assert_allclose
from astropy import units as u
from astropy.constants import si
from astropy.units import UnitsWarning, core, dex
from astropy.units import format as u_format
from astropy.units.utils import is_effectively_unity
@pytest.mark.parametrize('strings, unit', [
(["m s", "m*s", "m.s"], u.m * u.s),
(["m/s", "m*s**-1", "m /s", "m / s", "m/ s"], u.m / u.s),
(["m**2", "m2", "m**(2)", "m**+2", "m+2", "m^(+2)"], u.m ** 2),
(["m**-3", "m-3", "m^(-3)", "/m3"], u.m ** -3),
(["m**(1.5)", "m(3/2)", "m**(3/2)", "m^(3/2)"], u.m ** 1.5),
(["2.54 cm"], u.Unit(u.cm * 2.54)),
(["10+8m"], u.Unit(u.m * 1e8)),
# This is the VOUnits documentation, but doesn't seem to follow the
# unity grammar (["3.45 10**(-4)Jy"], 3.45 * 1e-4 * u.Jy)
(["sqrt(m)"], u.m ** 0.5),
(["dB(mW)", "dB (mW)"], u.DecibelUnit(u.mW)),
(["mag"], u.mag),
(["mag(ct/s)"], u.MagUnit(u.ct / u.s)),
(["dex"], u.dex),
(["dex(cm s**-2)", "dex(cm/s2)"], u.DexUnit(u.cm / u.s**2)),
])
def test_unit_grammar(strings, unit):
for s in strings:
print(s)
unit2 = u_format.Generic.parse(s)
assert unit2 == unit
@pytest.mark.parametrize('string', ['sin( /pixel /s)', 'mag(mag)',
'dB(dB(mW))', 'dex()'])
def test_unit_grammar_fail(string):
with pytest.raises(ValueError):
print(string)
u_format.Generic.parse(string)
@pytest.mark.parametrize('strings, unit', [
(["0.1nm"], u.AA),
(["mW/m2"], u.Unit(u.erg / u.cm ** 2 / u.s)),
(["mW/(m2)"], u.Unit(u.erg / u.cm ** 2 / u.s)),
(["km/s", "km.s-1"], u.km / u.s),
(["10pix/nm"], u.Unit(10 * u.pix / u.nm)),
(["1.5x10+11m"], u.Unit(1.5e11 * u.m)),
(["1.5×10+11m"], u.Unit(1.5e11 * u.m)),
(["m2"], u.m ** 2),
(["10+21m"], u.Unit(u.m * 1e21)),
(["2.54cm"], u.Unit(u.cm * 2.54)),
(["20%"], 0.20 * u.dimensionless_unscaled),
(["10+9"], 1.e9 * u.dimensionless_unscaled),
(["2x10-9"], 2.e-9 * u.dimensionless_unscaled),
(["---"], u.dimensionless_unscaled),
(["ma"], u.ma),
(["mAU"], u.mAU),
(["uarcmin"], u.uarcmin),
(["uarcsec"], u.uarcsec),
(["kbarn"], u.kbarn),
(["Gbit"], u.Gbit),
(["Gibit"], 2 ** 30 * u.bit),
(["kbyte"], u.kbyte),
(["mRy"], 0.001 * u.Ry),
(["mmag"], u.mmag),
(["Mpc"], u.Mpc),
(["Gyr"], u.Gyr),
(["°"], u.degree),
(["°/s"], u.degree / u.s),
(["Å"], u.AA),
(["Å/s"], u.AA / u.s),
(["\\h"], si.h),
(["[cm/s2]"], dex(u.cm / u.s ** 2)),
(["[K]"], dex(u.K)),
(["[-]"], dex(u.dimensionless_unscaled))])
def test_cds_grammar(strings, unit):
for s in strings:
print(s)
unit2 = u_format.CDS.parse(s)
assert unit2 == unit
@pytest.mark.parametrize('string', [
'0.1 nm',
'solMass(3/2)',
'km / s',
'km s-1',
'pix0.1nm',
'pix/(0.1nm)',
'km*s',
'km**2',
'5x8+3m',
'0.1---',
'---m',
'm---',
'--',
'0.1-',
'-m',
'm-',
'mag(s-1)',
'dB(mW)',
'dex(cm s-2)',
'[--]'])
def test_cds_grammar_fail(string):
with pytest.raises(ValueError):
print(string)
u_format.CDS.parse(string)
def test_cds_dimensionless():
assert u.Unit('---', format='cds') == u.dimensionless_unscaled
assert u.dimensionless_unscaled.to_string(format='cds') == "---"
def test_cds_log10_dimensionless():
assert u.Unit('[-]', format='cds') == u.dex(u.dimensionless_unscaled)
assert u.dex(u.dimensionless_unscaled).to_string(format='cds') == "[-]"
# These examples are taken from the EXAMPLES section of
# https://heasarc.gsfc.nasa.gov/docs/heasarc/ofwg/docs/general/ogip_93_001/
@pytest.mark.parametrize('strings, unit', [
(["count /s", "count/s", "count s**(-1)", "count / s", "count /s "],
u.count / u.s),
(["/pixel /s", "/(pixel * s)"], (u.pixel * u.s) ** -1),
(["count /m**2 /s /eV", "count m**(-2) * s**(-1) * eV**(-1)",
"count /(m**2 * s * eV)"],
u.count * u.m ** -2 * u.s ** -1 * u.eV ** -1),
(["erg /pixel /s /GHz", "erg /s /GHz /pixel", "erg /pixel /(s * GHz)"],
u.erg / (u.s * u.GHz * u.pixel)),
(["keV**2 /yr /angstrom", "10**(10) keV**2 /yr /m"],
# Though this is given as an example, it seems to violate the rules
# of not raising scales to powers, so I'm just excluding it
# "(10**2 MeV)**2 /yr /m"
u.keV**2 / (u.yr * u.angstrom)),
(["10**(46) erg /s", "10**46 erg /s", "10**(39) J /s", "10**(39) W",
"10**(15) YW", "YJ /fs"],
10**46 * u.erg / u.s),
(["10**(-7) J /cm**2 /MeV", "10**(-9) J m**(-2) eV**(-1)",
"nJ m**(-2) eV**(-1)", "nJ /m**2 /eV"],
10 ** -7 * u.J * u.cm ** -2 * u.MeV ** -1),
(["sqrt(erg /pixel /s /GHz)", "(erg /pixel /s /GHz)**(0.5)",
"(erg /pixel /s /GHz)**(1/2)",
"erg**(0.5) pixel**(-0.5) s**(-0.5) GHz**(-0.5)"],
(u.erg * u.pixel ** -1 * u.s ** -1 * u.GHz ** -1) ** 0.5),
(["(count /s) (/pixel /s)", "(count /s) * (/pixel /s)",
"count /pixel /s**2"],
(u.count / u.s) * (1.0 / (u.pixel * u.s)))])
def test_ogip_grammar(strings, unit):
for s in strings:
print(s)
unit2 = u_format.OGIP.parse(s)
assert unit2 == unit
@pytest.mark.parametrize('string', [
'log(photon /m**2 /s /Hz)',
'sin( /pixel /s)',
'log(photon /cm**2 /s /Hz) /(sin( /pixel /s))',
'log(photon /cm**2 /s /Hz) (sin( /pixel /s))**(-1)',
'dB(mW)', 'dex(cm/s**2)'])
def test_ogip_grammar_fail(string):
with pytest.raises(ValueError):
print(string)
u_format.OGIP.parse(string)
class RoundtripBase:
deprecated_units = set()
def check_roundtrip(self, unit, output_format=None):
if output_format is None:
output_format = self.format_
with warnings.catch_warnings():
warnings.simplefilter('ignore') # Same warning shows up multiple times
s = unit.to_string(output_format)
if s in self.deprecated_units:
with pytest.warns(UnitsWarning, match='deprecated') as w:
a = core.Unit(s, format=self.format_)
assert len(w) == 1
else:
a = core.Unit(s, format=self.format_) # No warning
assert_allclose(a.decompose().scale, unit.decompose().scale, rtol=1e-9)
def check_roundtrip_decompose(self, unit):
ud = unit.decompose()
s = ud.to_string(self.format_)
assert ' ' not in s
a = core.Unit(s, format=self.format_)
assert_allclose(a.decompose().scale, ud.scale, rtol=1e-5)
class TestRoundtripGeneric(RoundtripBase):
format_ = 'generic'
@pytest.mark.parametrize('unit', [
unit for unit in u.__dict__.values()
if (isinstance(unit, core.UnitBase) and
not isinstance(unit, core.PrefixUnit))])
def test_roundtrip(self, unit):
self.check_roundtrip(unit)
self.check_roundtrip(unit, output_format='unicode')
self.check_roundtrip_decompose(unit)
class TestRoundtripVOUnit(RoundtripBase):
format_ = 'vounit'
deprecated_units = u_format.VOUnit._deprecated_units
@pytest.mark.parametrize('unit', [
unit for unit in u_format.VOUnit._units.values()
if (isinstance(unit, core.UnitBase) and
not isinstance(unit, core.PrefixUnit))])
def test_roundtrip(self, unit):
self.check_roundtrip(unit)
if unit not in (u.mag, u.dB):
self.check_roundtrip_decompose(unit)
class TestRoundtripFITS(RoundtripBase):
format_ = 'fits'
deprecated_units = u_format.Fits._deprecated_units
@pytest.mark.parametrize('unit', [
unit for unit in u_format.Fits._units.values()
if (isinstance(unit, core.UnitBase) and
not isinstance(unit, core.PrefixUnit))])
def test_roundtrip(self, unit):
self.check_roundtrip(unit)
class TestRoundtripCDS(RoundtripBase):
format_ = 'cds'
@pytest.mark.parametrize('unit', [
unit for unit in u_format.CDS._units.values()
if (isinstance(unit, core.UnitBase) and
not isinstance(unit, core.PrefixUnit))])
def test_roundtrip(self, unit):
self.check_roundtrip(unit)
if unit == u.mag:
# Skip mag: decomposes into dex, which is unknown to CDS.
return
self.check_roundtrip_decompose(unit)
@pytest.mark.parametrize('unit', [u.dex(unit) for unit in
(u.cm/u.s**2, u.K, u.Lsun)])
def test_roundtrip_dex(self, unit):
string = unit.to_string(format='cds')
recovered = u.Unit(string, format='cds')
assert recovered == unit
class TestRoundtripOGIP(RoundtripBase):
format_ = 'ogip'
deprecated_units = u_format.OGIP._deprecated_units | {'d'}
@pytest.mark.parametrize('unit', [
unit for unit in u_format.OGIP._units.values()
if (isinstance(unit, core.UnitBase) and
not isinstance(unit, core.PrefixUnit))])
def test_roundtrip(self, unit):
if str(unit) in ('d', '0.001 Crab'):
# Special-case day, which gets auto-converted to hours, and mCrab,
# which the default check does not recognize as a deprecated unit.
with pytest.warns(UnitsWarning):
s = unit.to_string(self.format_)
a = core.Unit(s, format=self.format_)
assert_allclose(a.decompose().scale, unit.decompose().scale, rtol=1e-9)
else:
self.check_roundtrip(unit)
if str(unit) in ('mag', 'byte', 'Crab'):
# Skip mag and byte, which decompose into dex and bit, resp.,
# both of which are unknown to OGIP, as well as Crab, which does
# not decompose, and thus gives a deprecated unit warning.
return
power_of_ten = np.log10(unit.decompose().scale)
if abs(power_of_ten - round(power_of_ten)) > 1e-3:
ctx = pytest.warns(UnitsWarning, match='power of 10')
elif str(unit) == '0.001 Crab':
ctx = pytest.warns(UnitsWarning, match='deprecated')
else:
ctx = nullcontext()
with ctx:
self.check_roundtrip_decompose(unit)
def test_fits_units_available():
u_format.Fits._units
def test_vo_units_available():
u_format.VOUnit._units
def test_cds_units_available():
u_format.CDS._units
def test_cds_non_ascii_unit():
"""Regression test for #5350. This failed with a decoding error as
μas could not be represented in ascii."""
from astropy.units import cds
with cds.enable():
u.radian.find_equivalent_units(include_prefix_units=True)
def test_latex():
fluxunit = u.erg / (u.cm ** 2 * u.s)
assert fluxunit.to_string('latex') == r'$\mathrm{\frac{erg}{s\,cm^{2}}}$'
def test_new_style_latex():
fluxunit = u.erg / (u.cm ** 2 * u.s)
assert f"{fluxunit:latex}" == r'$\mathrm{\frac{erg}{s\,cm^{2}}}$'
def test_latex_scale():
fluxunit = u.Unit(1.e-24 * u.erg / (u.cm ** 2 * u.s * u.Hz))
latex = r'$\mathrm{1 \times 10^{-24}\,\frac{erg}{Hz\,s\,cm^{2}}}$'
assert fluxunit.to_string('latex') == latex
def test_latex_inline_scale():
fluxunit = u.Unit(1.e-24 * u.erg / (u.cm ** 2 * u.s * u.Hz))
latex_inline = (r'$\mathrm{1 \times 10^{-24}\,erg'
r'\,Hz^{-1}\,s^{-1}\,cm^{-2}}$')
assert fluxunit.to_string('latex_inline') == latex_inline
@pytest.mark.parametrize('format_spec, string', [
('generic', 'erg / (cm2 s)'),
('s', 'erg / (cm2 s)'),
('console', ' erg \n ------\n s cm^2'),
('latex', '$\\mathrm{\\frac{erg}{s\\,cm^{2}}}$'),
('latex_inline', '$\\mathrm{erg\\,s^{-1}\\,cm^{-2}}$'),
('>20s', ' erg / (cm2 s)')])
def test_format_styles(format_spec, string):
fluxunit = u.erg / (u.cm ** 2 * u.s)
assert format(fluxunit, format_spec) == string
def test_flatten_to_known():
myunit = u.def_unit("FOOBAR_One", u.erg / u.Hz)
assert myunit.to_string('fits') == 'erg Hz-1'
myunit2 = myunit * u.bit ** 3
assert myunit2.to_string('fits') == 'bit3 erg Hz-1'
def test_flatten_impossible():
myunit = u.def_unit("FOOBAR_Two")
with u.add_enabled_units(myunit), pytest.raises(ValueError):
myunit.to_string('fits')
def test_console_out():
"""
Issue #436.
"""
u.Jy.decompose().to_string('console')
def test_flexible_float():
assert u.min._represents.to_string('latex') == r'$\mathrm{60\,s}$'
def test_fits_to_string_function_error():
"""Test function raises TypeError on bad input.
This instead of returning None, see gh-11825.
"""
with pytest.raises(TypeError, match='unit argument must be'):
u_format.Fits.to_string(None)
def test_fraction_repr():
area = u.cm ** 2.0
assert '.' not in area.to_string('latex')
fractional = u.cm ** 2.5
assert '5/2' in fractional.to_string('latex')
assert fractional.to_string('unicode') == 'cm⁵⸍²'
def test_scale_effectively_unity():
"""Scale just off unity at machine precision level is OK.
Ensures #748 does not recur
"""
a = (3. * u.N).cgs
assert is_effectively_unity(a.unit.scale)
assert len(a.__repr__().split()) == 3
def test_percent():
"""Test that the % unit is properly recognized. Since % is a special
symbol, this goes slightly beyond the round-tripping tested above."""
assert u.Unit('%') == u.percent == u.Unit(0.01)
assert u.Unit('%', format='cds') == u.Unit(0.01)
assert u.Unit(0.01).to_string('cds') == '%'
with pytest.raises(ValueError):
u.Unit('%', format='fits')
with pytest.raises(ValueError):
u.Unit('%', format='vounit')
def test_scaled_dimensionless():
"""Test that scaled dimensionless units are properly recognized in generic
and CDS, but not in fits and vounit."""
assert u.Unit('0.1') == u.Unit(0.1) == 0.1 * u.dimensionless_unscaled
assert u.Unit('1.e-4') == u.Unit(1.e-4)
assert u.Unit('10-4', format='cds') == u.Unit(1.e-4)
assert u.Unit('10+8').to_string('cds') == '10+8'
with pytest.raises(ValueError):
u.Unit(0.15).to_string('fits')
assert u.Unit(0.1).to_string('fits') == '10**-1'
with pytest.raises(ValueError):
u.Unit(0.1).to_string('vounit')
def test_deprecated_did_you_mean_units():
with pytest.raises(ValueError) as exc_info:
u.Unit('ANGSTROM', format='fits')
assert 'Did you mean Angstrom or angstrom?' in str(exc_info.value)
with pytest.raises(ValueError) as exc_info:
u.Unit('crab', format='ogip')
assert 'Crab (deprecated)' in str(exc_info.value)
assert 'mCrab (deprecated)' in str(exc_info.value)
with pytest.warns(UnitsWarning, match=r'.* Did you mean 0\.1nm, Angstrom '
r'\(deprecated\) or angstrom \(deprecated\)\?') as w:
u.Unit('ANGSTROM', format='vounit')
assert len(w) == 1
assert str(w[0].message).count('0.1nm') == 1
with pytest.warns(UnitsWarning, match=r'.* 0\.1nm\.') as w:
u.Unit('angstrom', format='vounit')
assert len(w) == 1
@pytest.mark.parametrize('string', ['mag(ct/s)', 'dB(mW)', 'dex(cm s**-2)'])
def test_fits_function(string):
# Function units cannot be written, so ensure they're not parsed either.
with pytest.raises(ValueError):
print(string)
u_format.Fits().parse(string)
@pytest.mark.parametrize('string', ['mag(ct/s)', 'dB(mW)', 'dex(cm s**-2)'])
def test_vounit_function(string):
# Function units cannot be written, so ensure they're not parsed either.
with pytest.raises(ValueError), warnings.catch_warnings():
warnings.simplefilter('ignore') # ct, dex also raise warnings - irrelevant here.
u_format.VOUnit().parse(string)
def test_vounit_binary_prefix():
u.Unit('KiB', format='vounit') == u.Unit('1024 B')
u.Unit('Kibyte', format='vounit') == u.Unit('1024 B')
u.Unit('Kibit', format='vounit') == u.Unit('1024 B')
with pytest.warns(UnitsWarning) as w:
u.Unit('kibibyte', format='vounit')
assert len(w) == 1
def test_vounit_unknown():
assert u.Unit('unknown', format='vounit') is None
assert u.Unit('UNKNOWN', format='vounit') is None
assert u.Unit('', format='vounit') is u.dimensionless_unscaled
def test_vounit_details():
with pytest.warns(UnitsWarning, match='deprecated') as w:
assert u.Unit('Pa', format='vounit') is u.Pascal
assert len(w) == 1
# The da- prefix is not allowed, and the d- prefix is discouraged
assert u.dam.to_string('vounit') == '10m'
assert u.Unit('dam dag').to_string('vounit') == '100g.m'
# Parse round-trip
with pytest.warns(UnitsWarning, match='deprecated'):
flam = u.erg / u.cm / u.cm / u.s / u.AA
x = u.format.VOUnit.to_string(flam)
assert x == 'Angstrom**-1.cm**-2.erg.s**-1'
new_flam = u.format.VOUnit.parse(x)
assert new_flam == flam
@pytest.mark.parametrize('unit, vounit, number, scale, voscale',
[('nm', 'nm', 0.1, '10^-1', '0.1'),
('fm', 'fm', 100.0, '10+2', '100'),
('m^2', 'm**2', 100.0, '100.0', '100'),
('cm', 'cm', 2.54, '2.54', '2.54'),
('kg', 'kg', 1.898124597e27, '1.898124597E27', '1.8981246e+27'),
('m/s', 'm.s**-1', 299792458.0, '299792458', '2.9979246e+08'),
('cm2', 'cm**2', 1.e-20, '10^(-20)', '1e-20')])
def test_vounit_scale_factor(unit, vounit, number, scale, voscale):
x = u.Unit(f'{scale} {unit}')
assert x == number * u.Unit(unit)
assert x.to_string(format='vounit') == voscale + vounit
def test_vounit_custom():
x = u.Unit("'foo' m", format='vounit')
x_vounit = x.to_string('vounit')
assert x_vounit == "'foo'.m"
x_string = x.to_string()
assert x_string == "foo m"
x = u.Unit("m'foo' m", format='vounit')
assert x.bases[1]._represents.scale == 0.001
x_vounit = x.to_string('vounit')
assert x_vounit == "m.m'foo'"
x_string = x.to_string()
assert x_string == 'm mfoo'
def test_vounit_implicit_custom():
# Yikes, this becomes "femto-urlong"... But at least there's a warning.
with pytest.warns(UnitsWarning) as w:
x = u.Unit("furlong/week", format="vounit")
assert x.bases[0]._represents.scale == 1e-15
assert x.bases[0]._represents.bases[0].name == 'urlong'
assert len(w) == 2
assert 'furlong' in str(w[0].message)
assert 'week' in str(w[1].message)
@pytest.mark.parametrize('scale, number, string',
[('10+2', 100, '10**2'),
('10(+2)', 100, '10**2'),
('10**+2', 100, '10**2'),
('10**(+2)', 100, '10**2'),
('10^+2', 100, '10**2'),
('10^(+2)', 100, '10**2'),
('10**2', 100, '10**2'),
('10**(2)', 100, '10**2'),
('10^2', 100, '10**2'),
('10^(2)', 100, '10**2'),
('10-20', 10**(-20), '10**-20'),
('10(-20)', 10**(-20), '10**-20'),
('10**-20', 10**(-20), '10**-20'),
('10**(-20)', 10**(-20), '10**-20'),
('10^-20', 10**(-20), '10**-20'),
('10^(-20)', 10**(-20), '10**-20'),
])
def test_fits_scale_factor(scale, number, string):
x = u.Unit(scale + ' erg/(s cm**2 Angstrom)', format='fits')
assert x == number * (u.erg / u.s / u.cm ** 2 / u.Angstrom)
assert x.to_string(format='fits') == string + ' Angstrom-1 cm-2 erg s-1'
x = u.Unit(scale + '*erg/(s cm**2 Angstrom)', format='fits')
assert x == number * (u.erg / u.s / u.cm ** 2 / u.Angstrom)
assert x.to_string(format='fits') == string + ' Angstrom-1 cm-2 erg s-1'
def test_fits_scale_factor_errors():
with pytest.raises(ValueError):
x = u.Unit('1000 erg/(s cm**2 Angstrom)', format='fits')
with pytest.raises(ValueError):
x = u.Unit('12 erg/(s cm**2 Angstrom)', format='fits')
x = u.Unit(1.2 * u.erg)
with pytest.raises(ValueError):
x.to_string(format='fits')
x = u.Unit(100.0 * u.erg)
assert x.to_string(format='fits') == '10**2 erg'
def test_double_superscript():
"""Regression test for #5870, #8699, #9218; avoid double superscripts."""
assert (u.deg).to_string("latex") == r'$\mathrm{{}^{\circ}}$'
assert (u.deg**2).to_string("latex") == r'$\mathrm{deg^{2}}$'
assert (u.arcmin).to_string("latex") == r'$\mathrm{{}^{\prime}}$'
assert (u.arcmin**2).to_string("latex") == r'$\mathrm{arcmin^{2}}$'
assert (u.arcsec).to_string("latex") == r'$\mathrm{{}^{\prime\prime}}$'
assert (u.arcsec**2).to_string("latex") == r'$\mathrm{arcsec^{2}}$'
assert (u.hourangle).to_string("latex") == r'$\mathrm{{}^{h}}$'
assert (u.hourangle**2).to_string("latex") == r'$\mathrm{hourangle^{2}}$'
assert (u.electron).to_string("latex") == r'$\mathrm{e^{-}}$'
assert (u.electron**2).to_string("latex") == r'$\mathrm{electron^{2}}$'
@pytest.mark.parametrize('power,expected', (
(1., 'm'), (2., 'm2'), (-10, '1 / m10'), (1.5, 'm(3/2)'), (2/3, 'm(2/3)'),
(7/11, 'm(7/11)'), (-1/64, '1 / m(1/64)'), (1/100, 'm(1/100)'),
(2/101, 'm(0.019801980198019802)'), (Fraction(2, 101), 'm(2/101)')))
def test_powers(power, expected):
"""Regression test for #9279 - powers should not be oversimplified."""
unit = u.m ** power
s = unit.to_string()
assert s == expected
assert unit == s
@pytest.mark.parametrize('string,unit', [
('\N{MICRO SIGN}g', u.microgram),
('\N{GREEK SMALL LETTER MU}g', u.microgram),
('g\N{MINUS SIGN}1', u.g**(-1)),
('m\N{SUPERSCRIPT MINUS}\N{SUPERSCRIPT ONE}', 1 / u.m),
('m s\N{SUPERSCRIPT MINUS}\N{SUPERSCRIPT ONE}', u.m / u.s),
('m\N{SUPERSCRIPT TWO}', u.m**2),
('m\N{SUPERSCRIPT PLUS SIGN}\N{SUPERSCRIPT TWO}', u.m**2),
('m\N{SUPERSCRIPT THREE}', u.m**3),
('m\N{SUPERSCRIPT ONE}\N{SUPERSCRIPT ZERO}', u.m**10),
('\N{GREEK CAPITAL LETTER OMEGA}', u.ohm),
('\N{OHM SIGN}', u.ohm), # deprecated but for compatibility
('\N{MICRO SIGN}\N{GREEK CAPITAL LETTER OMEGA}', u.microOhm),
('\N{ANGSTROM SIGN}', u.Angstrom),
('\N{ANGSTROM SIGN} \N{OHM SIGN}', u.Angstrom * u.Ohm),
('\N{LATIN CAPITAL LETTER A WITH RING ABOVE}', u.Angstrom),
('\N{LATIN CAPITAL LETTER A}\N{COMBINING RING ABOVE}', u.Angstrom),
('m\N{ANGSTROM SIGN}', u.milliAngstrom),
('°C', u.deg_C),
('°', u.deg),
('M⊙', u.Msun), # \N{CIRCLED DOT OPERATOR}
('L☉', u.Lsun), # \N{SUN}
('M⊕', u.Mearth), # normal earth symbol = \N{CIRCLED PLUS}
('M♁', u.Mearth), # be generous with \N{EARTH}
('R♃', u.Rjup), # \N{JUPITER}
('′', u.arcmin), # \N{PRIME}
('R∞', u.Ry),
('Mₚ', u.M_p),
])
def test_unicode(string, unit):
assert u_format.Generic.parse(string) == unit
assert u.Unit(string) == unit
@pytest.mark.parametrize('string', [
'g\N{MICRO SIGN}',
'g\N{MINUS SIGN}',
'm\N{SUPERSCRIPT MINUS}1',
'm+\N{SUPERSCRIPT ONE}',
'm\N{MINUS SIGN}\N{SUPERSCRIPT ONE}',
'k\N{ANGSTROM SIGN}',
])
def test_unicode_failures(string):
with pytest.raises(ValueError):
u.Unit(string)
@pytest.mark.parametrize('format_', ('unicode', 'latex', 'latex_inline'))
def test_parse_error_message_for_output_only_format(format_):
with pytest.raises(NotImplementedError, match='not parse'):
u.Unit('m', format=format_)
def test_unknown_parser():
with pytest.raises(ValueError, match=r"Unknown.*unicode'\] for output only"):
u.Unit('m', format='foo')
|
0abf905e4639c4fc4ad449d88a8ee31e662733beb187022b54603b9e48ef9108 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Regression tests for the units package."""
import pickle
from fractions import Fraction
import numpy as np
import pytest
from numpy.testing import assert_allclose
from astropy import constants as c
from astropy import units as u
from astropy.units import utils
def test_initialisation():
assert u.Unit(u.m) is u.m
ten_meter = u.Unit(10.*u.m)
assert ten_meter == u.CompositeUnit(10., [u.m], [1])
assert u.Unit(ten_meter) is ten_meter
assert u.Unit(10.*ten_meter) == u.CompositeUnit(100., [u.m], [1])
foo = u.Unit('foo', (10. * ten_meter)**2, namespace=locals())
assert foo == u.CompositeUnit(10000., [u.m], [2])
assert u.Unit('m') == u.m
assert u.Unit('') == u.dimensionless_unscaled
assert u.one == u.dimensionless_unscaled
assert u.Unit('10 m') == ten_meter
assert u.Unit(10.) == u.CompositeUnit(10., [], [])
assert u.Unit() == u.dimensionless_unscaled
def test_invalid_power():
x = u.m ** Fraction(1, 3)
assert isinstance(x.powers[0], Fraction)
x = u.m ** Fraction(1, 2)
assert isinstance(x.powers[0], float)
# Test the automatic conversion to a fraction
x = u.m ** (1. / 3.)
assert isinstance(x.powers[0], Fraction)
def test_invalid_compare():
assert not (u.m == u.s)
def test_convert():
assert u.h._get_converter(u.s)(1) == 3600
def test_convert_fail():
with pytest.raises(u.UnitsError):
u.cm.to(u.s, 1)
with pytest.raises(u.UnitsError):
(u.cm / u.s).to(u.m, 1)
def test_composite():
assert (u.cm / u.s * u.h)._get_converter(u.m)(1) == 36
assert u.cm * u.cm == u.cm ** 2
assert u.cm * u.cm * u.cm == u.cm ** 3
assert u.Hz.to(1000 * u.Hz, 1) == 0.001
def test_str():
assert str(u.cm) == "cm"
def test_repr():
assert repr(u.cm) == 'Unit("cm")'
def test_represents():
assert u.m.represents is u.m
assert u.km.represents.scale == 1000.
assert u.km.represents.bases == [u.m]
assert u.Ry.scale == 1.0 and u.Ry.bases == [u.Ry]
assert_allclose(u.Ry.represents.scale, 13.605692518464949)
assert u.Ry.represents.bases == [u.eV]
bla = u.def_unit('bla', namespace=locals())
assert bla.represents is bla
blabla = u.def_unit('blabla', 10 * u.hr, namespace=locals())
assert blabla.represents.scale == 10.
assert blabla.represents.bases == [u.hr]
assert blabla.decompose().scale == 10 * 3600
assert blabla.decompose().bases == [u.s]
def test_units_conversion():
assert_allclose(u.kpc.to(u.Mpc), 0.001)
assert_allclose(u.Mpc.to(u.kpc), 1000)
assert_allclose(u.yr.to(u.Myr), 1.e-6)
assert_allclose(u.AU.to(u.pc), 4.84813681e-6)
assert_allclose(u.cycle.to(u.rad), 6.283185307179586)
assert_allclose(u.spat.to(u.sr), 12.56637061435917)
def test_units_manipulation():
# Just do some manipulation and check it's happy
(u.kpc * u.yr) ** Fraction(1, 3) / u.Myr
(u.AA * u.erg) ** 9
def test_decompose():
assert u.Ry == u.Ry.decompose()
def test_dimensionless_to_si():
"""
Issue #1150: Test for conversion of dimensionless quantities
to the SI system
"""
testunit = ((1.0 * u.kpc) / (1.0 * u.Mpc))
assert testunit.unit.physical_type == 'dimensionless'
assert_allclose(testunit.si, 0.001)
def test_dimensionless_to_cgs():
"""
Issue #1150: Test for conversion of dimensionless quantities
to the CGS system
"""
testunit = ((1.0 * u.m) / (1.0 * u.km))
assert testunit.unit.physical_type == 'dimensionless'
assert_allclose(testunit.cgs, 0.001)
def test_unknown_unit():
with pytest.warns(u.UnitsWarning, match='FOO'):
u.Unit("FOO", parse_strict='warn')
def test_multiple_solidus():
with pytest.warns(u.UnitsWarning, match="'m/s/kg' contains multiple "
"slashes, which is discouraged"):
assert u.Unit("m/s/kg").to_string() == 'm / (kg s)'
with pytest.raises(ValueError):
u.Unit("m/s/kg", format="vounit")
# Regression test for #9000: solidi in exponents do not count towards this.
x = u.Unit("kg(3/10) * m(5/2) / s", format="vounit")
assert x.to_string() == 'kg(3/10) m(5/2) / s'
def test_unknown_unit3():
unit = u.Unit("FOO", parse_strict='silent')
assert isinstance(unit, u.UnrecognizedUnit)
assert unit.name == "FOO"
unit2 = u.Unit("FOO", parse_strict='silent')
assert unit == unit2
assert unit.is_equivalent(unit2)
unit3 = u.Unit("BAR", parse_strict='silent')
assert unit != unit3
assert not unit.is_equivalent(unit3)
# Also test basic (in)equalities.
assert unit == "FOO"
assert unit != u.m
# next two from gh-7603.
assert unit != None # noqa
assert unit not in (None, u.m)
with pytest.raises(ValueError):
unit._get_converter(unit3)
_ = unit.to_string('latex')
_ = unit2.to_string('cgs')
with pytest.raises(ValueError):
u.Unit("BAR", parse_strict='strict')
with pytest.raises(TypeError):
u.Unit(None)
def test_invalid_scale():
with pytest.raises(TypeError):
['a', 'b', 'c'] * u.m
def test_cds_power():
unit = u.Unit("10+22/cm2", format="cds", parse_strict='silent')
assert unit.scale == 1e22
def test_register():
foo = u.def_unit("foo", u.m ** 3, namespace=locals())
assert 'foo' in locals()
with u.add_enabled_units(foo):
assert 'foo' in u.get_current_unit_registry().registry
assert 'foo' not in u.get_current_unit_registry().registry
def test_in_units():
speed_unit = u.cm / u.s
_ = speed_unit.in_units(u.pc / u.hour, 1)
def test_null_unit():
assert (u.m / u.m) == u.Unit(1)
def test_unrecognized_equivalency():
assert u.m.is_equivalent('foo') is False
assert u.m.is_equivalent('pc') is True
def test_convertible_exception():
with pytest.raises(u.UnitsError, match=r'length.+ are not convertible'):
u.AA.to(u.h * u.s ** 2)
def test_convertible_exception2():
with pytest.raises(u.UnitsError, match=r'length. and .+time.+ are not convertible'):
u.m.to(u.s)
def test_invalid_type():
class A:
pass
with pytest.raises(TypeError):
u.Unit(A())
def test_steradian():
"""
Issue #599
"""
assert u.sr.is_equivalent(u.rad * u.rad)
results = u.sr.compose(units=u.cgs.bases)
assert results[0].bases[0] is u.rad
results = u.sr.compose(units=u.cgs.__dict__)
assert results[0].bases[0] is u.sr
def test_decompose_bases():
"""
From issue #576
"""
from astropy.constants import e
from astropy.units import cgs
d = e.esu.unit.decompose(bases=cgs.bases)
assert d._bases == [u.cm, u.g, u.s]
assert d._powers == [Fraction(3, 2), 0.5, -1]
assert d._scale == 1.0
def test_complex_compose():
complex = u.cd * u.sr * u.Wb
composed = complex.compose()
assert set(composed[0]._bases) == {u.lm, u.Wb}
def test_equiv_compose():
composed = u.m.compose(equivalencies=u.spectral())
assert any([u.Hz] == x.bases for x in composed)
def test_empty_compose():
with pytest.raises(u.UnitsError):
u.m.compose(units=[])
def _unit_as_str(unit):
# This function serves two purposes - it is used to sort the units to
# test alphabetically, and it is also use to allow pytest to show the unit
# in the [] when running the parametrized tests.
return str(unit)
# We use a set to make sure we don't have any duplicates.
COMPOSE_ROUNDTRIP = set()
for val in u.__dict__.values():
if (isinstance(val, u.UnitBase) and
not isinstance(val, u.PrefixUnit)):
COMPOSE_ROUNDTRIP.add(val)
@pytest.mark.parametrize('unit', sorted(COMPOSE_ROUNDTRIP, key=_unit_as_str), ids=_unit_as_str)
def test_compose_roundtrip(unit):
composed_list = unit.decompose().compose()
found = False
for composed in composed_list:
if len(composed.bases):
if composed.bases[0] is unit:
found = True
break
elif len(unit.bases) == 0:
found = True
break
assert found
# We use a set to make sure we don't have any duplicates.
COMPOSE_CGS_TO_SI = set()
for val in u.cgs.__dict__.values():
# Can't decompose Celsius
if (isinstance(val, u.UnitBase) and
not isinstance(val, u.PrefixUnit) and
val != u.cgs.deg_C):
COMPOSE_CGS_TO_SI.add(val)
@pytest.mark.parametrize('unit', sorted(COMPOSE_CGS_TO_SI, key=_unit_as_str),
ids=_unit_as_str)
def test_compose_cgs_to_si(unit):
si = unit.to_system(u.si)
assert [x.is_equivalent(unit) for x in si]
assert si[0] == unit.si
# We use a set to make sure we don't have any duplicates.
COMPOSE_SI_TO_CGS = set()
for val in u.si.__dict__.values():
# Can't decompose Celsius
if (isinstance(val, u.UnitBase) and
not isinstance(val, u.PrefixUnit) and
val != u.si.deg_C):
COMPOSE_SI_TO_CGS.add(val)
@pytest.mark.parametrize('unit', sorted(COMPOSE_SI_TO_CGS, key=_unit_as_str), ids=_unit_as_str)
def test_compose_si_to_cgs(unit):
# Can't convert things with Ampere to CGS without more context
try:
cgs = unit.to_system(u.cgs)
except u.UnitsError:
if u.A in unit.decompose().bases:
pass
else:
raise
else:
assert [x.is_equivalent(unit) for x in cgs]
assert cgs[0] == unit.cgs
def test_to_si():
"""Check units that are not official derived units.
Should not appear on its own or as part of a composite unit.
"""
# TODO: extend to all units not listed in Tables 1--6 of
# https://physics.nist.gov/cuu/Units/units.html
# See gh-10585.
# This was always the case
assert u.bar.si is not u.bar
# But this used to fail.
assert u.bar not in (u.kg/(u.s**2*u.sr*u.nm)).si._bases
def test_to_cgs():
assert u.Pa.to_system(u.cgs)[1]._bases[0] is u.Ba
assert u.Pa.to_system(u.cgs)[1]._scale == 10.0
def test_decompose_to_cgs():
from astropy.units import cgs
assert u.m.decompose(bases=cgs.bases)._bases[0] is cgs.cm
def test_compose_issue_579():
unit = u.kg * u.s ** 2 / u.m
result = unit.compose(units=[u.N, u.s, u.m])
assert len(result) == 1
assert result[0]._bases == [u.s, u.N, u.m]
assert result[0]._powers == [4, 1, -2]
def test_compose_prefix_unit():
x = u.m.compose(units=(u.m,))
assert x[0].bases[0] is u.m
assert x[0].scale == 1.0
x = u.m.compose(units=[u.km], include_prefix_units=True)
assert x[0].bases[0] is u.km
assert x[0].scale == 0.001
x = u.m.compose(units=[u.km])
assert x[0].bases[0] is u.km
assert x[0].scale == 0.001
x = (u.km/u.s).compose(units=(u.pc, u.Myr))
assert x[0].bases == [u.pc, u.Myr]
assert_allclose(x[0].scale, 1.0227121650537077)
with pytest.raises(u.UnitsError):
(u.km/u.s).compose(units=(u.pc, u.Myr), include_prefix_units=False)
def test_self_compose():
unit = u.kg * u.s
assert len(unit.compose(units=[u.g, u.s])) == 1
def test_compose_failed():
unit = u.kg
with pytest.raises(u.UnitsError):
unit.compose(units=[u.N])
def test_compose_fractional_powers():
# Warning: with a complicated unit, this test becomes very slow;
# e.g., x = (u.kg / u.s ** 3 * u.au ** 2.5 / u.yr ** 0.5 / u.sr ** 2)
# takes 3 s
x = u.m ** 0.5 / u.yr ** 1.5
factored = x.compose()
for unit in factored:
assert x.decompose() == unit.decompose()
factored = x.compose(units=u.cgs)
for unit in factored:
assert x.decompose() == unit.decompose()
factored = x.compose(units=u.si)
for unit in factored:
assert x.decompose() == unit.decompose()
def test_compose_best_unit_first():
results = u.l.compose()
assert len(results[0].bases) == 1
assert results[0].bases[0] is u.l
results = (u.s ** -1).compose()
assert results[0].bases[0] in (u.Hz, u.Bq)
results = (u.Ry.decompose()).compose()
assert results[0].bases[0] is u.Ry
def test_compose_no_duplicates():
new = u.kg / u.s ** 3 * u.au ** 2.5 / u.yr ** 0.5 / u.sr ** 2
composed = new.compose(units=u.cgs.bases)
assert len(composed) == 1
def test_long_int():
"""
Issue #672
"""
sigma = 10 ** 21 * u.M_p / u.cm ** 2
sigma.to(u.M_sun / u.pc ** 2)
def test_endian_independence():
"""
Regression test for #744
A logic issue in the units code meant that big endian arrays could not be
converted because the dtype is '>f4', not 'float32', and the code was
looking for the strings 'float' or 'int'.
"""
for endian in ['<', '>']:
for ntype in ['i', 'f']:
for byte in ['4', '8']:
x = np.array([1, 2, 3], dtype=(endian + ntype + byte))
u.m.to(u.cm, x)
def test_radian_base():
"""
Issue #863
"""
assert (1 * u.degree).si.unit == u.rad
def test_no_as():
# We don't define 'as', since it is a keyword, but we
# do want to define the long form (`attosecond`).
assert not hasattr(u, 'as')
assert hasattr(u, 'attosecond')
def test_no_duplicates_in_names():
# Regression test for #5036
assert u.ct.names == ['ct', 'count']
assert u.ct.short_names == ['ct', 'count']
assert u.ct.long_names == ['count']
assert set(u.ph.names) == set(u.ph.short_names) | set(u.ph.long_names)
def test_pickling():
p = pickle.dumps(u.m)
other = pickle.loads(p)
assert other is u.m
new_unit = u.IrreducibleUnit(['foo'], format={'baz': 'bar'})
# This is local, so the unit should not be registered.
assert 'foo' not in u.get_current_unit_registry().registry
# Test pickling of this unregistered unit.
p = pickle.dumps(new_unit)
new_unit_copy = pickle.loads(p)
assert new_unit_copy is not new_unit
assert new_unit_copy.names == ['foo']
assert new_unit_copy.get_format_name('baz') == 'bar'
# It should still not be registered.
assert 'foo' not in u.get_current_unit_registry().registry
# Now try the same with a registered unit.
with u.add_enabled_units([new_unit]):
p = pickle.dumps(new_unit)
assert 'foo' in u.get_current_unit_registry().registry
new_unit_copy = pickle.loads(p)
assert new_unit_copy is new_unit
# Check that a registered unit can be loaded and that it gets re-enabled.
with u.add_enabled_units([]):
assert 'foo' not in u.get_current_unit_registry().registry
new_unit_copy = pickle.loads(p)
assert new_unit_copy is not new_unit
assert new_unit_copy.names == ['foo']
assert new_unit_copy.get_format_name('baz') == 'bar'
assert 'foo' in u.get_current_unit_registry().registry
# And just to be sure, that it gets removed outside of the context.
assert 'foo' not in u.get_current_unit_registry().registry
def test_pickle_between_sessions():
"""We cannot really test between sessions easily, so fake it.
This test can be changed if the pickle protocol or the code
changes enough that it no longer works.
"""
hash_m = hash(u.m)
unit = pickle.loads(
b'\x80\x04\x95\xd6\x00\x00\x00\x00\x00\x00\x00\x8c\x12'
b'astropy.units.core\x94\x8c\x1a_recreate_irreducible_unit'
b'\x94\x93\x94h\x00\x8c\x0fIrreducibleUnit\x94\x93\x94]\x94'
b'(\x8c\x01m\x94\x8c\x05meter\x94e\x88\x87\x94R\x94}\x94(\x8c\x06'
b'_names\x94]\x94(h\x06h\x07e\x8c\x0c_short_names'
b'\x94]\x94h\x06a\x8c\x0b_long_names\x94]\x94h\x07a\x8c\x07'
b'_format\x94}\x94\x8c\x07__doc__\x94\x8c '
b'meter: base unit of length in SI\x94ub.')
assert unit is u.m
assert hash(u.m) == hash_m
@pytest.mark.parametrize('unit', [
u.IrreducibleUnit(['foo'], format={'baz': 'bar'}),
u.Unit('m_per_s', u.m/u.s)])
def test_pickle_does_not_keep_memoized_hash(unit):
"""
Tests private attribute since the problem with _hash being pickled
and restored only appeared if the unpickling was done in another
session, for which the hash no longer was valid, and it is difficult
to mimic separate sessions in a simple test. See gh-11872.
"""
unit_hash = hash(unit)
assert unit._hash is not None
unit_copy = pickle.loads(pickle.dumps(unit))
# unit is not registered so we get a copy.
assert unit_copy is not unit
assert unit_copy._hash is None
assert hash(unit_copy) == unit_hash
with u.add_enabled_units([unit]):
# unit is registered, so we get a reference.
unit_ref = pickle.loads(pickle.dumps(unit))
if isinstance(unit, u.IrreducibleUnit):
assert unit_ref is unit
else:
assert unit_ref is not unit
# pickle.load used to override the hash, although in this case
# it would be the same anyway, so not clear this tests much.
assert hash(unit) == unit_hash
def test_pickle_unrecognized_unit():
"""
Issue #2047
"""
a = u.Unit('asdf', parse_strict='silent')
pickle.loads(pickle.dumps(a))
def test_duplicate_define():
with pytest.raises(ValueError):
u.def_unit('m', namespace=u.__dict__)
def test_all_units():
from astropy.units.core import get_current_unit_registry
registry = get_current_unit_registry()
assert len(registry.all_units) > len(registry.non_prefix_units)
def test_repr_latex():
assert u.m._repr_latex_() == u.m.to_string('latex')
def test_operations_with_strings():
assert u.m / '5s' == (u.m / (5.0 * u.s))
assert u.m * '5s' == (5.0 * u.m * u.s)
def test_comparison():
assert u.m > u.cm
assert u.m >= u.cm
assert u.cm < u.m
assert u.cm <= u.m
with pytest.raises(u.UnitsError):
u.m > u.kg
def test_compose_into_arbitrary_units():
# Issue #1438
from astropy.constants import G
G.decompose([u.kg, u.km, u.Unit("15 s")])
def test_unit_multiplication_with_string():
"""Check that multiplication with strings produces the correct unit."""
u1 = u.cm
us = 'kg'
assert us * u1 == u.Unit(us) * u1
assert u1 * us == u1 * u.Unit(us)
def test_unit_division_by_string():
"""Check that multiplication with strings produces the correct unit."""
u1 = u.cm
us = 'kg'
assert us / u1 == u.Unit(us) / u1
assert u1 / us == u1 / u.Unit(us)
def test_sorted_bases():
"""See #1616."""
assert (u.m * u.Jy).bases == (u.Jy * u.m).bases
def test_megabit():
"""See #1543"""
assert u.Mbit is u.Mb
assert u.megabit is u.Mb
assert u.Mbyte is u.MB
assert u.megabyte is u.MB
def test_composite_unit_get_format_name():
"""See #1576"""
unit1 = u.Unit('nrad/s')
unit2 = u.Unit('Hz(1/2)')
assert (str(u.CompositeUnit(1, [unit1, unit2], [1, -1])) ==
'nrad / (Hz(1/2) s)')
def test_unicode_policy():
from astropy.tests.helper import assert_follows_unicode_guidelines
assert_follows_unicode_guidelines(
u.degree, roundtrip=u.__dict__)
def test_suggestions():
for search, matches in [
('microns', 'micron'),
('s/microns', 'micron'),
('M', 'm'),
('metre', 'meter'),
('angstroms', 'Angstrom or angstrom'),
('milimeter', 'millimeter'),
('ångström', 'Angstrom, angstrom, mAngstrom or mangstrom'),
('kev', 'EV, eV, kV or keV')]:
with pytest.raises(ValueError, match=f'Did you mean {matches}'):
u.Unit(search)
def test_fits_hst_unit():
"""See #1911."""
with pytest.warns(u.UnitsWarning, match='multiple slashes') as w:
x = u.Unit("erg /s /cm**2 /angstrom")
assert x == u.erg * u.s ** -1 * u.cm ** -2 * u.angstrom ** -1
assert len(w) == 1
def test_barn_prefixes():
"""Regression test for https://github.com/astropy/astropy/issues/3753"""
assert u.fbarn is u.femtobarn
assert u.pbarn is u.picobarn
def test_fractional_powers():
"""See #2069"""
m = 1e9 * u.Msun
tH = 1. / (70. * u.km / u.s / u.Mpc)
vc = 200 * u.km/u.s
x = (c.G ** 2 * m ** 2 * tH.cgs) ** Fraction(1, 3) / vc
v1 = x.to('pc')
x = (c.G ** 2 * m ** 2 * tH) ** Fraction(1, 3) / vc
v2 = x.to('pc')
x = (c.G ** 2 * m ** 2 * tH.cgs) ** (1.0 / 3.0) / vc
v3 = x.to('pc')
x = (c.G ** 2 * m ** 2 * tH) ** (1.0 / 3.0) / vc
v4 = x.to('pc')
assert_allclose(v1, v2)
assert_allclose(v2, v3)
assert_allclose(v3, v4)
x = u.m ** (1.0 / 101.0)
assert isinstance(x.powers[0], float)
x = u.m ** (3.0 / 7.0)
assert isinstance(x.powers[0], Fraction)
assert x.powers[0].numerator == 3
assert x.powers[0].denominator == 7
x = u.cm ** Fraction(1, 2) * u.cm ** Fraction(2, 3)
assert isinstance(x.powers[0], Fraction)
assert x.powers[0] == Fraction(7, 6)
# Regression test for #9258.
x = (u.TeV ** (-2.2)) ** (1/-2.2)
assert isinstance(x.powers[0], Fraction)
assert x.powers[0] == Fraction(1, 1)
def test_sqrt_mag():
sqrt_mag = u.mag ** 0.5
assert hasattr(sqrt_mag.decompose().scale, 'imag')
assert (sqrt_mag.decompose())**2 == u.mag
def test_composite_compose():
# Issue #2382
composite_unit = u.s.compose(units=[u.Unit("s")])[0]
u.s.compose(units=[composite_unit])
def test_data_quantities():
assert u.byte.is_equivalent(u.bit)
def test_compare_with_none():
# Ensure that equality comparisons with `None` work, and don't
# raise exceptions. We are deliberately not using `is None` here
# because that doesn't trigger the bug. See #3108.
assert not (u.m == None) # noqa
assert u.m != None # noqa
def test_validate_power_detect_fraction():
frac = utils.validate_power(1.1666666666666665)
assert isinstance(frac, Fraction)
assert frac.numerator == 7
assert frac.denominator == 6
def test_complex_fractional_rounding_errors():
# See #3788
kappa = 0.34 * u.cm**2 / u.g
r_0 = 886221439924.7849 * u.cm
q = 1.75
rho_0 = 5e-10 * u.solMass / u.solRad**3
y = 0.5
beta = 0.19047619047619049
a = 0.47619047619047628
m_h = 1e6*u.solMass
t1 = 2 * c.c / (kappa * np.sqrt(np.pi))
t2 = (r_0**-q) / (rho_0 * y * beta * (a * c.G * m_h)**0.5)
result = ((t1 * t2)**-0.8)
assert result.unit.physical_type == 'length'
result.to(u.solRad)
def test_fractional_rounding_errors_simple():
x = (u.m ** 1.5) ** Fraction(4, 5)
assert isinstance(x.powers[0], Fraction)
assert x.powers[0].numerator == 6
assert x.powers[0].denominator == 5
def test_enable_unit_groupings():
from astropy.units import cds
with cds.enable():
assert cds.geoMass in u.kg.find_equivalent_units()
from astropy.units import imperial
with imperial.enable():
assert imperial.inch in u.m.find_equivalent_units()
def test_unit_summary_prefixes():
"""
Test for a few units that the unit summary table correctly reports
whether or not that unit supports prefixes.
Regression test for https://github.com/astropy/astropy/issues/3835
"""
from astropy.units import astrophys
for summary in utils._iter_unit_summary(astrophys.__dict__):
unit, _, _, _, prefixes = summary
if unit.name == 'lyr':
assert prefixes
elif unit.name == 'pc':
assert prefixes
elif unit.name == 'barn':
assert prefixes
elif unit.name == 'cycle':
assert prefixes == 'No'
elif unit.name == 'spat':
assert prefixes == 'No'
elif unit.name == 'vox':
assert prefixes == 'Yes'
def test_raise_to_negative_power():
"""Test that order of bases is changed when raising to negative power.
Regression test for https://github.com/astropy/astropy/issues/8260
"""
m2s2 = u.m ** 2 / u.s ** 2
spm = m2s2 ** (-1 / 2)
assert spm.bases == [u.s, u.m]
assert spm.powers == [1, -1]
assert spm == u.s / u.m
|
fe6f3843d96882edac814aac2fc72fa006b4f79e0ee2d2472b6baafefb0fed27 | # The purpose of these tests are to ensure that calling quantities using
# array methods returns quantities with the right units, or raises exceptions.
import sys
import numpy as np
import pytest
from numpy.testing import assert_array_equal
from astropy import units as u
from astropy.utils.compat import NUMPY_LT_1_20, NUMPY_LT_1_21_1, NUMPY_LT_1_22
class TestQuantityArrayCopy:
"""
Test whether arrays are properly copied/used in place
"""
def test_copy_on_creation(self):
v = np.arange(1000.)
q_nocopy = u.Quantity(v, "km/s", copy=False)
q_copy = u.Quantity(v, "km/s", copy=True)
v[0] = -1.
assert q_nocopy[0].value == v[0]
assert q_copy[0].value != v[0]
def test_to_copies(self):
q = u.Quantity(np.arange(1., 100.), "km/s")
q2 = q.to(u.m/u.s)
assert np.all(q.value != q2.value)
q3 = q.to(u.km/u.s)
assert np.all(q.value == q3.value)
q[0] = -1.*u.km/u.s
assert q[0].value != q3[0].value
def test_si_copies(self):
q = u.Quantity(np.arange(100.), "m/s")
q2 = q.si
assert np.all(q.value == q2.value)
q[0] = -1.*u.m/u.s
assert q[0].value != q2[0].value
def test_getitem_is_view(self):
"""Check that [keys] work, and that, like ndarray, it returns
a view, so that changing one changes the other.
Also test that one can add axes (closes #1422)
"""
q = u.Quantity(np.arange(100.), "m/s")
q_sel = q[10:20]
q_sel[0] = -1.*u.m/u.s
assert q_sel[0] == q[10]
# also check that getitem can do new axes
q2 = q[:, np.newaxis]
q2[10, 0] = -9*u.m/u.s
assert np.all(q2.flatten() == q)
def test_flat(self):
q = u.Quantity(np.arange(9.).reshape(3, 3), "m/s")
q_flat = q.flat
# check that a single item is a quantity (with the right value)
assert q_flat[8] == 8. * u.m / u.s
# and that getting a range works as well
assert np.all(q_flat[0:2] == np.arange(2.) * u.m / u.s)
# as well as getting items via iteration
q_flat_list = [_q for _q in q.flat]
assert np.all(u.Quantity(q_flat_list) ==
u.Quantity([_a for _a in q.value.flat], q.unit))
# check that flat works like a view of the real array
q_flat[8] = -1. * u.km / u.s
assert q_flat[8] == -1. * u.km / u.s
assert q[2, 2] == -1. * u.km / u.s
# while if one goes by an iterated item, a copy is made
q_flat_list[8] = -2 * u.km / u.s
assert q_flat_list[8] == -2. * u.km / u.s
assert q_flat[8] == -1. * u.km / u.s
assert q[2, 2] == -1. * u.km / u.s
class TestQuantityReshapeFuncs:
"""Test different ndarray methods that alter the array shape
tests: reshape, squeeze, ravel, flatten, transpose, swapaxes
"""
def test_reshape(self):
q = np.arange(6.) * u.m
q_reshape = q.reshape(3, 2)
assert isinstance(q_reshape, u.Quantity)
assert q_reshape.unit == q.unit
assert np.all(q_reshape.value == q.value.reshape(3, 2))
def test_squeeze(self):
q = np.arange(6.).reshape(6, 1) * u.m
q_squeeze = q.squeeze()
assert isinstance(q_squeeze, u.Quantity)
assert q_squeeze.unit == q.unit
assert np.all(q_squeeze.value == q.value.squeeze())
def test_ravel(self):
q = np.arange(6.).reshape(3, 2) * u.m
q_ravel = q.ravel()
assert isinstance(q_ravel, u.Quantity)
assert q_ravel.unit == q.unit
assert np.all(q_ravel.value == q.value.ravel())
def test_flatten(self):
q = np.arange(6.).reshape(3, 2) * u.m
q_flatten = q.flatten()
assert isinstance(q_flatten, u.Quantity)
assert q_flatten.unit == q.unit
assert np.all(q_flatten.value == q.value.flatten())
def test_transpose(self):
q = np.arange(6.).reshape(3, 2) * u.m
q_transpose = q.transpose()
assert isinstance(q_transpose, u.Quantity)
assert q_transpose.unit == q.unit
assert np.all(q_transpose.value == q.value.transpose())
def test_swapaxes(self):
q = np.arange(6.).reshape(3, 1, 2) * u.m
q_swapaxes = q.swapaxes(0, 2)
assert isinstance(q_swapaxes, u.Quantity)
assert q_swapaxes.unit == q.unit
assert np.all(q_swapaxes.value == q.value.swapaxes(0, 2))
@pytest.mark.xfail(sys.byteorder == 'big' and NUMPY_LT_1_21_1,
reason="Numpy GitHub Issue 19153")
def test_flat_attributes(self):
"""While ``flat`` doesn't make a copy, it changes the shape."""
q = np.arange(6.).reshape(3, 1, 2) * u.m
qf = q.flat
# flat shape is same as before reshaping
assert len(qf) == 6
# see TestQuantityArrayCopy.test_flat for tests of iteration
# and slicing and setting. Here we test the properties and methods to
# match `numpy.ndarray.flatiter`
assert qf.base is q
# testing the indices -- flat and full -- into the array
assert qf.coords == (0, 0, 0) # to start
assert qf.index == 0
# now consume the iterator
endindices = [(qf.index, qf.coords) for x in qf][-2] # next() oversteps
assert endindices[0] == 5
assert endindices[1] == (2, 0, 1) # shape of q - 1
# also check q_flat copies properly
q_flat_copy = qf.copy()
assert all(q_flat_copy == q.flatten())
assert isinstance(q_flat_copy, u.Quantity)
assert not np.may_share_memory(q_flat_copy, q)
class TestQuantityStatsFuncs:
"""
Test statistical functions
"""
def test_mean(self):
q1 = np.array([1., 2., 4., 5., 6.]) * u.m
assert_array_equal(np.mean(q1), 3.6 * u.m)
assert_array_equal(np.mean(q1, keepdims=True), [3.6] * u.m)
def test_mean_inplace(self):
q1 = np.array([1., 2., 4., 5., 6.]) * u.m
qi = 1.5 * u.s
qi2 = np.mean(q1, out=qi)
assert qi2 is qi
assert qi == 3.6 * u.m
@pytest.mark.xfail(NUMPY_LT_1_20, reason="'where' keyword argument not supported for numpy < 1.20")
def test_mean_where(self):
q1 = np.array([1., 2., 4., 5., 6., 7.]) * u.m
assert_array_equal(np.mean(q1, where=q1 < 7 * u.m), 3.6 * u.m)
def test_std(self):
q1 = np.array([1., 2.]) * u.m
assert_array_equal(np.std(q1), 0.5 * u.m)
assert_array_equal(q1.std(axis=-1, keepdims=True), [0.5] * u.m)
def test_std_inplace(self):
q1 = np.array([1., 2.]) * u.m
qi = 1.5 * u.s
np.std(q1, out=qi)
assert qi == 0.5 * u.m
@pytest.mark.xfail(NUMPY_LT_1_20, reason="'where' keyword argument not supported for numpy < 1.20")
def test_std_where(self):
q1 = np.array([1., 2., 3.]) * u.m
assert_array_equal(np.std(q1, where=q1 < 3 * u.m), 0.5 * u.m)
def test_var(self):
q1 = np.array([1., 2.]) * u.m
assert_array_equal(np.var(q1), 0.25 * u.m ** 2)
assert_array_equal(q1.var(axis=0, keepdims=True), [0.25] * u.m ** 2)
def test_var_inplace(self):
q1 = np.array([1., 2.]) * u.m
qi = 1.5 * u.s
np.var(q1, out=qi)
assert qi == 0.25 * u.m ** 2
@pytest.mark.xfail(NUMPY_LT_1_20, reason="'where' keyword argument not supported for numpy < 1.20")
def test_var_where(self):
q1 = np.array([1., 2., 3.]) * u.m
assert_array_equal(np.var(q1, where=q1 < 3 * u.m), 0.25 * u.m ** 2)
def test_median(self):
q1 = np.array([1., 2., 4., 5., 6.]) * u.m
assert np.median(q1) == 4. * u.m
def test_median_inplace(self):
q1 = np.array([1., 2., 4., 5., 6.]) * u.m
qi = 1.5 * u.s
np.median(q1, out=qi)
assert qi == 4 * u.m
def test_min(self):
q1 = np.array([1., 2., 4., 5., 6.]) * u.m
assert np.min(q1) == 1. * u.m
def test_min_inplace(self):
q1 = np.array([1., 2., 4., 5., 6.]) * u.m
qi = 1.5 * u.s
np.min(q1, out=qi)
assert qi == 1. * u.m
def test_min_where(self):
q1 = np.array([0., 1., 2., 4., 5., 6.]) * u.m
assert np.min(q1, initial=10 * u.m, where=q1 > 0 * u.m) == 1. * u.m
def test_argmin(self):
q1 = np.array([6., 2., 4., 5., 6.]) * u.m
assert np.argmin(q1) == 1
@pytest.mark.skipif(NUMPY_LT_1_22,
reason='keepdims only introduced in numpy 1.22')
def test_argmin_keepdims(self):
q1 = np.array([[6., 2.], [4., 5.]]) * u.m
assert_array_equal(q1.argmin(axis=0, keepdims=True), np.array([[1, 0]]))
def test_max(self):
q1 = np.array([1., 2., 4., 5., 6.]) * u.m
assert np.max(q1) == 6. * u.m
def test_max_inplace(self):
q1 = np.array([1., 2., 4., 5., 6.]) * u.m
qi = 1.5 * u.s
np.max(q1, out=qi)
assert qi == 6. * u.m
def test_max_where(self):
q1 = np.array([1., 2., 4., 5., 6., 7.]) * u.m
assert np.max(q1, initial=0 * u.m, where=q1 < 7 * u.m) == 6. * u.m
def test_argmax(self):
q1 = np.array([5., 2., 4., 5., 6.]) * u.m
assert np.argmax(q1) == 4
@pytest.mark.skipif(NUMPY_LT_1_22,
reason='keepdims only introduced in numpy 1.22')
def test_argmax_keepdims(self):
q1 = np.array([[6., 2.], [4., 5.]]) * u.m
assert_array_equal(q1.argmax(axis=0, keepdims=True), np.array([[0, 1]]))
def test_clip(self):
q1 = np.array([1., 2., 4., 5., 6.]) * u.km / u.m
c1 = q1.clip(1500, 5.5 * u.Mm / u.km)
assert np.all(c1 == np.array([1.5, 2., 4., 5., 5.5]) * u.km / u.m)
def test_clip_inplace(self):
q1 = np.array([1., 2., 4., 5., 6.]) * u.km / u.m
c1 = q1.clip(1500, 5.5 * u.Mm / u.km, out=q1)
assert np.all(q1 == np.array([1.5, 2., 4., 5., 5.5]) * u.km / u.m)
c1[0] = 10 * u.Mm/u.mm
assert np.all(c1.value == q1.value)
def test_conj(self):
q1 = np.array([1., 2., 4., 5., 6.]) * u.km / u.m
assert np.all(q1.conj() == q1)
def test_ptp(self):
q1 = np.array([1., 2., 4., 5., 6.]) * u.m
assert np.ptp(q1) == 5. * u.m
def test_ptp_inplace(self):
q1 = np.array([1., 2., 4., 5., 6.]) * u.m
qi = 1.5 * u.s
np.ptp(q1, out=qi)
assert qi == 5. * u.m
def test_round(self):
q1 = np.array([1.253, 2.253, 3.253]) * u.kg
assert np.all(np.round(q1) == np.array([1, 2, 3]) * u.kg)
assert np.all(np.round(q1, decimals=2) ==
np.round(q1.value, decimals=2) * u.kg)
assert np.all(q1.round(decimals=2) ==
q1.value.round(decimals=2) * u.kg)
def test_round_inplace(self):
q1 = np.array([1.253, 2.253, 3.253]) * u.kg
qi = np.zeros(3) * u.s
a = q1.round(decimals=2, out=qi)
assert a is qi
assert np.all(q1.round(decimals=2) == qi)
def test_sum(self):
q1 = np.array([1., 2., 6.]) * u.m
assert np.all(q1.sum() == 9. * u.m)
assert np.all(np.sum(q1) == 9. * u.m)
q2 = np.array([[4., 5., 9.], [1., 1., 1.]]) * u.s
assert np.all(q2.sum(0) == np.array([5., 6., 10.]) * u.s)
assert np.all(np.sum(q2, 0) == np.array([5., 6., 10.]) * u.s)
def test_sum_inplace(self):
q1 = np.array([1., 2., 6.]) * u.m
qi = 1.5 * u.s
np.sum(q1, out=qi)
assert qi == 9. * u.m
def test_sum_where(self):
q1 = np.array([1., 2., 6., 7.]) * u.m
where = q1 < 7 * u.m
assert np.all(q1.sum(where=where) == 9. * u.m)
assert np.all(np.sum(q1, where=where) == 9. * u.m)
@pytest.mark.parametrize('initial', [0, 0*u.m, 1*u.km])
def test_sum_initial(self, initial):
q1 = np.array([1., 2., 6., 7.]) * u.m
expected = 16*u.m + initial
assert q1.sum(initial=initial) == expected
assert np.sum(q1, initial=initial) == expected
def test_sum_dimensionless_initial(self):
q1 = np.array([1., 2., 6., 7.]) * u.one
assert q1.sum(initial=1000) == 1016*u.one
@pytest.mark.parametrize('initial', [10, 1*u.s])
def test_sum_initial_exception(self, initial):
q1 = np.array([1., 2., 6., 7.]) * u.m
with pytest.raises(u.UnitsError):
q1.sum(initial=initial)
def test_cumsum(self):
q1 = np.array([1, 2, 6]) * u.m
assert np.all(q1.cumsum() == np.array([1, 3, 9]) * u.m)
assert np.all(np.cumsum(q1) == np.array([1, 3, 9]) * u.m)
q2 = np.array([4, 5, 9]) * u.s
assert np.all(q2.cumsum() == np.array([4, 9, 18]) * u.s)
assert np.all(np.cumsum(q2) == np.array([4, 9, 18]) * u.s)
def test_cumsum_inplace(self):
q1 = np.array([1, 2, 6]) * u.m
qi = np.ones(3) * u.s
np.cumsum(q1, out=qi)
assert np.all(qi == np.array([1, 3, 9]) * u.m)
q2 = q1
q1.cumsum(out=q1)
assert np.all(q2 == qi)
def test_nansum(self):
q1 = np.array([1., 2., np.nan]) * u.m
assert np.all(q1.nansum() == 3. * u.m)
assert np.all(np.nansum(q1) == 3. * u.m)
q2 = np.array([[np.nan, 5., 9.], [1., np.nan, 1.]]) * u.s
assert np.all(q2.nansum(0) == np.array([1., 5., 10.]) * u.s)
assert np.all(np.nansum(q2, 0) == np.array([1., 5., 10.]) * u.s)
def test_nansum_inplace(self):
q1 = np.array([1., 2., np.nan]) * u.m
qi = 1.5 * u.s
qout = q1.nansum(out=qi)
assert qout is qi
assert qi == np.nansum(q1.value) * q1.unit
qi2 = 1.5 * u.s
qout2 = np.nansum(q1, out=qi2)
assert qout2 is qi2
assert qi2 == np.nansum(q1.value) * q1.unit
@pytest.mark.xfail(NUMPY_LT_1_22, reason="'where' keyword argument not supported for numpy < 1.22")
def test_nansum_where(self):
q1 = np.array([1., 2., np.nan, 4.]) * u.m
initial = 0 * u.m
where = q1 < 4 * u.m
assert np.all(q1.nansum(initial=initial, where=where) == 3. * u.m)
assert np.all(np.nansum(q1, initial=initial, where=where) == 3. * u.m)
def test_prod(self):
q1 = np.array([1, 2, 6]) * u.m
with pytest.raises(u.UnitsError) as exc:
q1.prod()
with pytest.raises(u.UnitsError) as exc:
np.prod(q1)
q2 = np.array([3., 4., 5.]) * u.Unit(1)
assert q2.prod() == 60. * u.Unit(1)
assert np.prod(q2) == 60. * u.Unit(1)
def test_cumprod(self):
q1 = np.array([1, 2, 6]) * u.m
with pytest.raises(u.UnitsError) as exc:
q1.cumprod()
with pytest.raises(u.UnitsError) as exc:
np.cumprod(q1)
q2 = np.array([3, 4, 5]) * u.Unit(1)
assert np.all(q2.cumprod() == np.array([3, 12, 60]) * u.Unit(1))
assert np.all(np.cumprod(q2) == np.array([3, 12, 60]) * u.Unit(1))
def test_diff(self):
q1 = np.array([1., 2., 4., 10.]) * u.m
assert np.all(q1.diff() == np.array([1., 2., 6.]) * u.m)
assert np.all(np.diff(q1) == np.array([1., 2., 6.]) * u.m)
def test_ediff1d(self):
q1 = np.array([1., 2., 4., 10.]) * u.m
assert np.all(q1.ediff1d() == np.array([1., 2., 6.]) * u.m)
assert np.all(np.ediff1d(q1) == np.array([1., 2., 6.]) * u.m)
def test_dot_meth(self):
q1 = np.array([1., 2., 4., 10.]) * u.m
q2 = np.array([3., 4., 5., 6.]) * u.s
q3 = q1.dot(q2)
assert q3.value == np.dot(q1.value, q2.value)
assert q3.unit == u.m * u.s
def test_trace_func(self):
q = np.array([[1., 2.], [3., 4.]]) * u.m
assert np.trace(q) == 5. * u.m
def test_trace_meth(self):
q1 = np.array([[1., 2.], [3., 4.]]) * u.m
assert q1.trace() == 5. * u.m
cont = u.Quantity(4., u.s)
q2 = np.array([[3., 4.], [5., 6.]]) * u.m
q2.trace(out=cont)
assert cont == 9. * u.m
def test_clip_func(self):
q = np.arange(10) * u.m
assert np.all(np.clip(q, 3 * u.m, 6 * u.m) == np.array([3., 3., 3., 3., 4., 5., 6., 6., 6., 6.]) * u.m)
def test_clip_meth(self):
expected = np.array([3., 3., 3., 3., 4., 5., 6., 6., 6., 6.]) * u.m
q1 = np.arange(10) * u.m
q3 = q1.clip(3 * u.m, 6 * u.m)
assert np.all(q1.clip(3 * u.m, 6 * u.m) == expected)
cont = np.zeros(10) * u.s
q1.clip(3 * u.m, 6 * u.m, out=cont)
assert np.all(cont == expected)
class TestArrayConversion:
"""
Test array conversion methods
"""
def test_item(self):
q1 = u.Quantity(np.array([1, 2, 3]), u.m / u.km, dtype=int)
assert q1.item(1) == 2 * q1.unit
q1.itemset(1, 1)
assert q1.item(1) == 1000 * u.m / u.km
q1.itemset(1, 100 * u.cm / u.km)
assert q1.item(1) == 1 * u.m / u.km
with pytest.raises(TypeError):
q1.itemset(1, 1.5 * u.m / u.km)
with pytest.raises(ValueError):
q1.itemset()
q1[1] = 1
assert q1[1] == 1000 * u.m / u.km
q1[1] = 100 * u.cm / u.km
assert q1[1] == 1 * u.m / u.km
with pytest.raises(TypeError):
q1[1] = 1.5 * u.m / u.km
def test_take_put(self):
q1 = np.array([1, 2, 3]) * u.m / u.km
assert q1.take(1) == 2 * u.m / u.km
assert all(q1.take((0, 2)) == np.array([1, 3]) * u.m / u.km)
q1.put((1, 2), (3, 4))
assert np.all(q1.take((1, 2)) == np.array([3000, 4000]) * q1.unit)
q1.put(0, 500 * u.cm / u.km)
assert q1.item(0) == 5 * u.m / u.km
def test_slice(self):
"""Test that setitem changes the unit if needed (or ignores it for
values where that is allowed; viz., #2695)"""
q2 = np.array([[1., 2., 3.], [4., 5., 6.]]) * u.km / u.m
q1 = q2.copy()
q2[0, 0] = 10000.
assert q2.unit == q1.unit
assert q2[0, 0].value == 10.
q2[0] = 9. * u.Mm / u.km
assert all(q2.flatten()[:3].value == np.array([9., 9., 9.]))
q2[0, :-1] = 8000.
assert all(q2.flatten()[:3].value == np.array([8., 8., 9.]))
with pytest.raises(u.UnitsError):
q2[1, 1] = 10 * u.s
# just to be sure, repeat with a dimensionfull unit
q3 = u.Quantity(np.arange(10.), "m/s")
q3[5] = 100. * u.cm / u.s
assert q3[5].value == 1.
# and check unit is ignored for 0, inf, nan, where that is reasonable
q3[5] = 0.
assert q3[5] == 0.
q3[5] = np.inf
assert np.isinf(q3[5])
q3[5] = np.nan
assert np.isnan(q3[5])
def test_fill(self):
q1 = np.array([1, 2, 3]) * u.m / u.km
q1.fill(2)
assert np.all(q1 == 2000 * u.m / u.km)
def test_repeat_compress_diagonal(self):
q1 = np.array([1, 2, 3]) * u.m / u.km
q2 = q1.repeat(2)
assert q2.unit == q1.unit
assert all(q2.value == q1.value.repeat(2))
q2.sort()
assert q2.unit == q1.unit
q2 = q1.compress(np.array([True, True, False, False]))
assert q2.unit == q1.unit
assert all(q2.value == q1.value.compress(np.array([True, True,
False, False])))
q1 = np.array([[1, 2], [3, 4]]) * u.m / u.km
q2 = q1.diagonal()
assert q2.unit == q1.unit
assert all(q2.value == q1.value.diagonal())
def test_view(self):
q1 = np.array([1, 2, 3], dtype=np.int64) * u.m / u.km
q2 = q1.view(np.ndarray)
assert not hasattr(q2, 'unit')
q3 = q2.view(u.Quantity)
assert q3._unit is None
# MaskedArray copies and properties assigned in __dict__
q4 = np.ma.MaskedArray(q1)
assert q4._unit is q1._unit
q5 = q4.view(u.Quantity)
assert q5.unit is q1.unit
def test_slice_to_quantity(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2003
"""
a = np.random.uniform(size=(10, 8))
x, y, z = a[:, 1:4].T * u.km/u.s
total = np.sum(a[:, 1] * u.km / u.s - x)
assert isinstance(total, u.Quantity)
assert total == (0.0 * u.km / u.s)
def test_byte_type_view_field_changes(self):
q1 = np.array([1, 2, 3], dtype=np.int64) * u.m / u.km
q2 = q1.byteswap()
assert q2.unit == q1.unit
assert all(q2.value == q1.value.byteswap())
q2 = q1.astype(np.float64)
assert all(q2 == q1)
assert q2.dtype == np.float64
q2a = q1.getfield(np.int32, offset=0)
q2b = q1.byteswap().getfield(np.int32, offset=4)
assert q2a.unit == q1.unit
assert all(q2b.byteswap() == q2a)
def test_sort(self):
q1 = np.array([1., 5., 2., 4.]) * u.km / u.m
i = q1.argsort()
assert not hasattr(i, 'unit')
q1.sort()
i = q1.searchsorted([1500, 2500])
assert not hasattr(i, 'unit')
assert all(i == q1.to(
u.dimensionless_unscaled).value.searchsorted([1500, 2500]))
def test_not_implemented(self):
q1 = np.array([1, 2, 3]) * u.m / u.km
with pytest.raises(NotImplementedError):
q1.choose([0, 0, 1])
with pytest.raises(NotImplementedError):
q1.tolist()
with pytest.raises(NotImplementedError):
q1.tostring()
with pytest.raises(NotImplementedError):
q1.tobytes()
with pytest.raises(NotImplementedError):
q1.tofile(0)
with pytest.raises(NotImplementedError):
q1.dump('a.a')
with pytest.raises(NotImplementedError):
q1.dumps()
class TestRecArray:
"""Record arrays are not specifically supported, but we should not
prevent their use unnecessarily"""
def setup(self):
self.ra = (np.array(np.arange(12.).reshape(4, 3))
.view(dtype=('f8,f8,f8')).squeeze())
def test_creation(self):
qra = u.Quantity(self.ra, u.m)
assert np.all(qra[:2].value == self.ra[:2])
def test_equality(self):
qra = u.Quantity(self.ra, u.m)
qra[1] = qra[2]
assert qra[1] == qra[2]
|
7eabb6b6898ed1fecc424038fad9d8795dddb2baf9065f46072e7204180aede9 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Test the Logarithmic Units and Quantities
"""
import itertools
import pickle
import numpy as np
import pytest
from numpy.testing import assert_allclose
from astropy import constants as c
from astropy import units as u
from astropy.tests.helper import assert_quantity_allclose
lu_units = [u.dex, u.mag, u.decibel]
lu_subclasses = [u.DexUnit, u.MagUnit, u.DecibelUnit]
lq_subclasses = [u.Dex, u.Magnitude, u.Decibel]
pu_sample = (u.dimensionless_unscaled, u.m, u.g/u.s**2, u.Jy)
class TestLogUnitCreation:
def test_logarithmic_units(self):
"""Check logarithmic units are set up correctly."""
assert u.dB.to(u.dex) == 0.1
assert u.dex.to(u.mag) == -2.5
assert u.mag.to(u.dB) == -4
@pytest.mark.parametrize('lu_unit, lu_cls', zip(lu_units, lu_subclasses))
def test_callable_units(self, lu_unit, lu_cls):
assert isinstance(lu_unit, u.UnitBase)
assert callable(lu_unit)
assert lu_unit._function_unit_class is lu_cls
@pytest.mark.parametrize('lu_unit', lu_units)
def test_equality_to_normal_unit_for_dimensionless(self, lu_unit):
lu = lu_unit()
assert lu == lu._default_function_unit # eg, MagUnit() == u.mag
assert lu._default_function_unit == lu # and u.mag == MagUnit()
@pytest.mark.parametrize('lu_unit, physical_unit',
itertools.product(lu_units, pu_sample))
def test_call_units(self, lu_unit, physical_unit):
"""Create a LogUnit subclass using the callable unit and physical unit,
and do basic check that output is right."""
lu1 = lu_unit(physical_unit)
assert lu1.physical_unit == physical_unit
assert lu1.function_unit == lu1._default_function_unit
def test_call_invalid_unit(self):
with pytest.raises(TypeError):
u.mag([])
with pytest.raises(ValueError):
u.mag(u.mag())
@pytest.mark.parametrize('lu_cls, physical_unit', itertools.product(
lu_subclasses + [u.LogUnit], pu_sample))
def test_subclass_creation(self, lu_cls, physical_unit):
"""Create a LogUnit subclass object for given physical unit,
and do basic check that output is right."""
lu1 = lu_cls(physical_unit)
assert lu1.physical_unit == physical_unit
assert lu1.function_unit == lu1._default_function_unit
lu2 = lu_cls(physical_unit,
function_unit=2*lu1._default_function_unit)
assert lu2.physical_unit == physical_unit
assert lu2.function_unit == u.Unit(2*lu2._default_function_unit)
with pytest.raises(ValueError):
lu_cls(physical_unit, u.m)
def test_lshift_magnitude(self):
mag = 1. << u.ABmag
assert isinstance(mag, u.Magnitude)
assert mag.unit == u.ABmag
assert mag.value == 1.
# same test for an array, which should produce a view
a2 = np.arange(10.)
q2 = a2 << u.ABmag
assert isinstance(q2, u.Magnitude)
assert q2.unit == u.ABmag
assert np.all(q2.value == a2)
a2[9] = 0.
assert np.all(q2.value == a2)
# a different magnitude unit
mag = 10. << u.STmag
assert isinstance(mag, u.Magnitude)
assert mag.unit == u.STmag
assert mag.value == 10.
def test_ilshift_magnitude(self):
# test in-place operation and conversion
mag_fnu_cgs = u.mag(u.erg/u.s/u.cm**2/u.Hz)
m = np.arange(10.0) * u.mag(u.Jy)
jy = m.physical
m2 = m << mag_fnu_cgs
assert np.all(m2 == m.to(mag_fnu_cgs))
m2 = m
m <<= mag_fnu_cgs
assert m is m2 # Check it was done in-place!
assert np.all(m.value == m2.value)
assert m.unit == mag_fnu_cgs
# Check it works if equivalencies are in-place.
with u.add_enabled_equivalencies(u.spectral_density(5500*u.AA)):
st = jy.to(u.ST)
m <<= u.STmag
assert m is m2
assert_quantity_allclose(m.physical, st)
assert m.unit == u.STmag
def test_lshift_errors(self):
m = np.arange(10.0) * u.mag(u.Jy)
with pytest.raises(u.UnitsError):
m << u.STmag
with pytest.raises(u.UnitsError):
m << u.Jy
with pytest.raises(u.UnitsError):
m <<= u.STmag
with pytest.raises(u.UnitsError):
m <<= u.Jy
def test_predefined_magnitudes():
assert_quantity_allclose((-21.1*u.STmag).physical,
1.*u.erg/u.cm**2/u.s/u.AA)
assert_quantity_allclose((-48.6*u.ABmag).physical,
1.*u.erg/u.cm**2/u.s/u.Hz)
assert_quantity_allclose((0*u.M_bol).physical, c.L_bol0)
assert_quantity_allclose((0*u.m_bol).physical,
c.L_bol0/(4.*np.pi*(10.*c.pc)**2))
def test_predefined_reinitialisation():
assert u.mag('STflux') == u.STmag
assert u.mag('ABflux') == u.ABmag
assert u.mag('Bol') == u.M_bol
assert u.mag('bol') == u.m_bol
# required for backwards-compatibility, at least unless deprecated
assert u.mag('ST') == u.STmag
assert u.mag('AB') == u.ABmag
def test_predefined_string_roundtrip():
"""Ensure round-tripping; see #5015"""
assert u.Unit(u.STmag.to_string()) == u.STmag
assert u.Unit(u.ABmag.to_string()) == u.ABmag
assert u.Unit(u.M_bol.to_string()) == u.M_bol
assert u.Unit(u.m_bol.to_string()) == u.m_bol
def test_inequality():
"""Check __ne__ works (regression for #5342)."""
lu1 = u.mag(u.Jy)
lu2 = u.dex(u.Jy)
lu3 = u.mag(u.Jy**2)
lu4 = lu3 - lu1
assert lu1 != lu2
assert lu1 != lu3
assert lu1 == lu4
class TestLogUnitStrings:
def test_str(self):
"""Do some spot checks that str, repr, etc. work as expected."""
lu1 = u.mag(u.Jy)
assert str(lu1) == 'mag(Jy)'
assert repr(lu1) == 'Unit("mag(Jy)")'
assert lu1.to_string('generic') == 'mag(Jy)'
with pytest.raises(ValueError):
lu1.to_string('fits')
with pytest.raises(ValueError):
lu1.to_string(format='cds')
lu2 = u.dex()
assert str(lu2) == 'dex'
assert repr(lu2) == 'Unit("dex(1)")'
assert lu2.to_string() == 'dex(1)'
lu3 = u.MagUnit(u.Jy, function_unit=2*u.mag)
assert str(lu3) == '2 mag(Jy)'
assert repr(lu3) == 'MagUnit("Jy", unit="2 mag")'
assert lu3.to_string() == '2 mag(Jy)'
lu4 = u.mag(u.ct)
assert lu4.to_string('generic') == 'mag(ct)'
latex_str = r'$\mathrm{mag}$$\mathrm{\left( \mathrm{ct} \right)}$'
assert lu4.to_string('latex') == latex_str
assert lu4.to_string('latex_inline') == latex_str
assert lu4._repr_latex_() == latex_str
lu5 = u.mag(u.ct/u.s)
assert lu5.to_string('latex') == (r'$\mathrm{mag}$$\mathrm{\left( '
r'\mathrm{\frac{ct}{s}} \right)}$')
latex_str = (r'$\mathrm{mag}$$\mathrm{\left( \mathrm{ct\,s^{-1}} '
r'\right)}$')
assert lu5.to_string('latex_inline') == latex_str
class TestLogUnitConversion:
@pytest.mark.parametrize('lu_unit, physical_unit',
itertools.product(lu_units, pu_sample))
def test_physical_unit_conversion(self, lu_unit, physical_unit):
"""Check various LogUnit subclasses are equivalent and convertible
to their non-log counterparts."""
lu1 = lu_unit(physical_unit)
assert lu1.is_equivalent(physical_unit)
assert lu1.to(physical_unit, 0.) == 1.
assert physical_unit.is_equivalent(lu1)
assert physical_unit.to(lu1, 1.) == 0.
pu = u.Unit(8.*physical_unit)
assert lu1.is_equivalent(physical_unit)
assert lu1.to(pu, 0.) == 0.125
assert pu.is_equivalent(lu1)
assert_allclose(pu.to(lu1, 0.125), 0., atol=1.e-15)
# Check we round-trip.
value = np.linspace(0., 10., 6)
assert_allclose(pu.to(lu1, lu1.to(pu, value)), value, atol=1.e-15)
# And that we're not just returning True all the time.
pu2 = u.g
assert not lu1.is_equivalent(pu2)
with pytest.raises(u.UnitsError):
lu1.to(pu2)
assert not pu2.is_equivalent(lu1)
with pytest.raises(u.UnitsError):
pu2.to(lu1)
@pytest.mark.parametrize('lu_unit', lu_units)
def test_container_unit_conversion(self, lu_unit):
"""Check that conversion to logarithmic units (u.mag, u.dB, u.dex)
is only possible when the physical unit is dimensionless."""
values = np.linspace(0., 10., 6)
lu1 = lu_unit(u.dimensionless_unscaled)
assert lu1.is_equivalent(lu1.function_unit)
assert_allclose(lu1.to(lu1.function_unit, values), values)
lu2 = lu_unit(u.Jy)
assert not lu2.is_equivalent(lu2.function_unit)
with pytest.raises(u.UnitsError):
lu2.to(lu2.function_unit, values)
@pytest.mark.parametrize(
'flu_unit, tlu_unit, physical_unit',
itertools.product(lu_units, lu_units, pu_sample))
def test_subclass_conversion(self, flu_unit, tlu_unit, physical_unit):
"""Check various LogUnit subclasses are equivalent and convertible
to each other if they correspond to equivalent physical units."""
values = np.linspace(0., 10., 6)
flu = flu_unit(physical_unit)
tlu = tlu_unit(physical_unit)
assert flu.is_equivalent(tlu)
assert_allclose(flu.to(tlu), flu.function_unit.to(tlu.function_unit))
assert_allclose(flu.to(tlu, values),
values * flu.function_unit.to(tlu.function_unit))
tlu2 = tlu_unit(u.Unit(100.*physical_unit))
assert flu.is_equivalent(tlu2)
# Check that we round-trip.
assert_allclose(flu.to(tlu2, tlu2.to(flu, values)), values, atol=1.e-15)
tlu3 = tlu_unit(physical_unit.to_system(u.si)[0])
assert flu.is_equivalent(tlu3)
assert_allclose(flu.to(tlu3, tlu3.to(flu, values)), values, atol=1.e-15)
tlu4 = tlu_unit(u.g)
assert not flu.is_equivalent(tlu4)
with pytest.raises(u.UnitsError):
flu.to(tlu4, values)
def test_unit_decomposition(self):
lu = u.mag(u.Jy)
assert lu.decompose() == u.mag(u.Jy.decompose())
assert lu.decompose().physical_unit.bases == [u.kg, u.s]
assert lu.si == u.mag(u.Jy.si)
assert lu.si.physical_unit.bases == [u.kg, u.s]
assert lu.cgs == u.mag(u.Jy.cgs)
assert lu.cgs.physical_unit.bases == [u.g, u.s]
def test_unit_multiple_possible_equivalencies(self):
lu = u.mag(u.Jy)
assert lu.is_equivalent(pu_sample)
def test_magnitude_conversion_fails_message(self):
"""Check that "dimensionless" magnitude units include a message in their
exception text suggesting a possible cause of the problem.
"""
with pytest.raises(u.UnitConversionError) as excinfo:
(10*u.ABmag - 2*u.ABmag).to(u.nJy)
assert "Did you perhaps subtract magnitudes so the unit got lost?" in str(excinfo.value)
class TestLogUnitArithmetic:
def test_multiplication_division(self):
"""Check that multiplication/division with other units is only
possible when the physical unit is dimensionless, and that this
turns the unit into a normal one."""
lu1 = u.mag(u.Jy)
with pytest.raises(u.UnitsError):
lu1 * u.m
with pytest.raises(u.UnitsError):
u.m * lu1
with pytest.raises(u.UnitsError):
lu1 / lu1
for unit in (u.dimensionless_unscaled, u.m, u.mag, u.dex):
with pytest.raises(u.UnitsError):
lu1 / unit
lu2 = u.mag(u.dimensionless_unscaled)
with pytest.raises(u.UnitsError):
lu2 * lu1
with pytest.raises(u.UnitsError):
lu2 / lu1
# But dimensionless_unscaled can be cancelled.
assert lu2 / lu2 == u.dimensionless_unscaled
# With dimensionless, normal units are OK, but we return a plain unit.
tf = lu2 * u.m
tr = u.m * lu2
for t in (tf, tr):
assert not isinstance(t, type(lu2))
assert t == lu2.function_unit * u.m
with u.set_enabled_equivalencies(u.logarithmic()):
with pytest.raises(u.UnitsError):
t.to(lu2.physical_unit)
# Now we essentially have a LogUnit with a prefactor of 100,
# so should be equivalent again.
t = tf / u.cm
with u.set_enabled_equivalencies(u.logarithmic()):
assert t.is_equivalent(lu2.function_unit)
assert_allclose(t.to(u.dimensionless_unscaled, np.arange(3.)/100.),
lu2.to(lu2.physical_unit, np.arange(3.)))
# If we effectively remove lu1, a normal unit should be returned.
t2 = tf / lu2
assert not isinstance(t2, type(lu2))
assert t2 == u.m
t3 = tf / lu2.function_unit
assert not isinstance(t3, type(lu2))
assert t3 == u.m
# For completeness, also ensure non-sensical operations fail
with pytest.raises(TypeError):
lu1 * object()
with pytest.raises(TypeError):
slice(None) * lu1
with pytest.raises(TypeError):
lu1 / []
with pytest.raises(TypeError):
1 / lu1
@pytest.mark.parametrize('power', (2, 0.5, 1, 0))
def test_raise_to_power(self, power):
"""Check that raising LogUnits to some power is only possible when the
physical unit is dimensionless, and that conversion is turned off when
the resulting logarithmic unit (such as mag**2) is incompatible."""
lu1 = u.mag(u.Jy)
if power == 0:
assert lu1 ** power == u.dimensionless_unscaled
elif power == 1:
assert lu1 ** power == lu1
else:
with pytest.raises(u.UnitsError):
lu1 ** power
# With dimensionless, though, it works, but returns a normal unit.
lu2 = u.mag(u.dimensionless_unscaled)
t = lu2**power
if power == 0:
assert t == u.dimensionless_unscaled
elif power == 1:
assert t == lu2
else:
assert not isinstance(t, type(lu2))
assert t == lu2.function_unit**power
# also check we roundtrip
t2 = t**(1./power)
assert t2 == lu2.function_unit
with u.set_enabled_equivalencies(u.logarithmic()):
assert_allclose(t2.to(u.dimensionless_unscaled, np.arange(3.)),
lu2.to(lu2.physical_unit, np.arange(3.)))
@pytest.mark.parametrize('other', pu_sample)
def test_addition_subtraction_to_normal_units_fails(self, other):
lu1 = u.mag(u.Jy)
with pytest.raises(u.UnitsError):
lu1 + other
with pytest.raises(u.UnitsError):
lu1 - other
with pytest.raises(u.UnitsError):
other - lu1
def test_addition_subtraction_to_non_units_fails(self):
lu1 = u.mag(u.Jy)
with pytest.raises(TypeError):
lu1 + 1.
with pytest.raises(TypeError):
lu1 - [1., 2., 3.]
@pytest.mark.parametrize(
'other', (u.mag, u.mag(), u.mag(u.Jy), u.mag(u.m),
u.Unit(2*u.mag), u.MagUnit('', 2.*u.mag)))
def test_addition_subtraction(self, other):
"""Check physical units are changed appropriately"""
lu1 = u.mag(u.Jy)
other_pu = getattr(other, 'physical_unit', u.dimensionless_unscaled)
lu_sf = lu1 + other
assert lu_sf.is_equivalent(lu1.physical_unit * other_pu)
lu_sr = other + lu1
assert lu_sr.is_equivalent(lu1.physical_unit * other_pu)
lu_df = lu1 - other
assert lu_df.is_equivalent(lu1.physical_unit / other_pu)
lu_dr = other - lu1
assert lu_dr.is_equivalent(other_pu / lu1.physical_unit)
def test_complicated_addition_subtraction(self):
"""for fun, a more complicated example of addition and subtraction"""
dm0 = u.Unit('DM', 1./(4.*np.pi*(10.*u.pc)**2))
lu_dm = u.mag(dm0)
lu_absST = u.STmag - lu_dm
assert lu_absST.is_equivalent(u.erg/u.s/u.AA)
def test_neg_pos(self):
lu1 = u.mag(u.Jy)
neg_lu = -lu1
assert neg_lu != lu1
assert neg_lu.physical_unit == u.Jy**-1
assert -neg_lu == lu1
pos_lu = +lu1
assert pos_lu is not lu1
assert pos_lu == lu1
def test_pickle():
lu1 = u.dex(u.cm/u.s**2)
s = pickle.dumps(lu1)
lu2 = pickle.loads(s)
assert lu1 == lu2
def test_hashable():
lu1 = u.dB(u.mW)
lu2 = u.dB(u.m)
lu3 = u.dB(u.mW)
assert hash(lu1) != hash(lu2)
assert hash(lu1) == hash(lu3)
luset = {lu1, lu2, lu3}
assert len(luset) == 2
class TestLogQuantityCreation:
@pytest.mark.parametrize('lq, lu', zip(lq_subclasses + [u.LogQuantity],
lu_subclasses + [u.LogUnit]))
def test_logarithmic_quantities(self, lq, lu):
"""Check logarithmic quantities are all set up correctly"""
assert lq._unit_class == lu
assert type(lu()._quantity_class(1.)) is lq
@pytest.mark.parametrize('lq_cls, physical_unit',
itertools.product(lq_subclasses, pu_sample))
def test_subclass_creation(self, lq_cls, physical_unit):
"""Create LogQuantity subclass objects for some physical units,
and basic check on transformations"""
value = np.arange(1., 10.)
log_q = lq_cls(value * physical_unit)
assert log_q.unit.physical_unit == physical_unit
assert log_q.unit.function_unit == log_q.unit._default_function_unit
assert_allclose(log_q.physical.value, value)
with pytest.raises(ValueError):
lq_cls(value, physical_unit)
@pytest.mark.parametrize(
'unit', (u.mag, u.mag(), u.mag(u.Jy), u.mag(u.m),
u.Unit(2*u.mag), u.MagUnit('', 2.*u.mag),
u.MagUnit(u.Jy, -1*u.mag), u.MagUnit(u.m, -2.*u.mag)))
def test_different_units(self, unit):
q = u.Magnitude(1.23, unit)
assert q.unit.function_unit == getattr(unit, 'function_unit', unit)
assert q.unit.physical_unit is getattr(unit, 'physical_unit',
u.dimensionless_unscaled)
@pytest.mark.parametrize('value, unit', (
(1.*u.mag(u.Jy), None),
(1.*u.dex(u.Jy), None),
(1.*u.mag(u.W/u.m**2/u.Hz), u.mag(u.Jy)),
(1.*u.dex(u.W/u.m**2/u.Hz), u.mag(u.Jy))))
def test_function_values(self, value, unit):
lq = u.Magnitude(value, unit)
assert lq == value
assert lq.unit.function_unit == u.mag
assert lq.unit.physical_unit == getattr(unit, 'physical_unit',
value.unit.physical_unit)
@pytest.mark.parametrize(
'unit', (u.mag(), u.mag(u.Jy), u.mag(u.m), u.MagUnit('', 2.*u.mag),
u.MagUnit(u.Jy, -1*u.mag), u.MagUnit(u.m, -2.*u.mag)))
def test_indirect_creation(self, unit):
q1 = 2.5 * unit
assert isinstance(q1, u.Magnitude)
assert q1.value == 2.5
assert q1.unit == unit
pv = 100. * unit.physical_unit
q2 = unit * pv
assert q2.unit == unit
assert q2.unit.physical_unit == pv.unit
assert q2.to_value(unit.physical_unit) == 100.
assert (q2._function_view / u.mag).to_value(1) == -5.
q3 = unit / 0.4
assert q3 == q1
def test_from_view(self):
# Cannot view a physical quantity as a function quantity, since the
# values would change.
q = [100., 1000.] * u.cm/u.s**2
with pytest.raises(TypeError):
q.view(u.Dex)
# But fine if we have the right magnitude.
q = [2., 3.] * u.dex
lq = q.view(u.Dex)
assert isinstance(lq, u.Dex)
assert lq.unit.physical_unit == u.dimensionless_unscaled
assert np.all(q == lq)
def test_using_quantity_class(self):
"""Check that we can use Quantity if we have subok=True"""
# following issue #5851
lu = u.dex(u.AA)
with pytest.raises(u.UnitTypeError):
u.Quantity(1., lu)
q = u.Quantity(1., lu, subok=True)
assert type(q) is lu._quantity_class
def test_conversion_to_and_from_physical_quantities():
"""Ensures we can convert from regular quantities."""
mst = [10., 12., 14.] * u.STmag
flux_lambda = mst.physical
mst_roundtrip = flux_lambda.to(u.STmag)
# check we return a logquantity; see #5178.
assert isinstance(mst_roundtrip, u.Magnitude)
assert mst_roundtrip.unit == mst.unit
assert_allclose(mst_roundtrip.value, mst.value)
wave = [4956.8, 4959.55, 4962.3] * u.AA
flux_nu = mst.to(u.Jy, equivalencies=u.spectral_density(wave))
mst_roundtrip2 = flux_nu.to(u.STmag, u.spectral_density(wave))
assert isinstance(mst_roundtrip2, u.Magnitude)
assert mst_roundtrip2.unit == mst.unit
assert_allclose(mst_roundtrip2.value, mst.value)
def test_quantity_decomposition():
lq = 10.*u.mag(u.Jy)
assert lq.decompose() == lq
assert lq.decompose().unit.physical_unit.bases == [u.kg, u.s]
assert lq.si == lq
assert lq.si.unit.physical_unit.bases == [u.kg, u.s]
assert lq.cgs == lq
assert lq.cgs.unit.physical_unit.bases == [u.g, u.s]
class TestLogQuantityViews:
def setup(self):
self.lq = u.Magnitude(np.arange(1., 10.) * u.Jy)
self.lq2 = u.Magnitude(np.arange(1., 5.))
def test_value_view(self):
lq_value = self.lq.value
assert type(lq_value) is np.ndarray
lq_value[2] = -1.
assert np.all(self.lq.value == lq_value)
def test_function_view(self):
lq_fv = self.lq._function_view
assert type(lq_fv) is u.Quantity
assert lq_fv.unit is self.lq.unit.function_unit
lq_fv[3] = -2. * lq_fv.unit
assert np.all(self.lq.value == lq_fv.value)
def test_quantity_view(self):
# Cannot view as Quantity, since the unit cannot be represented.
with pytest.raises(TypeError):
self.lq.view(u.Quantity)
# But a dimensionless one is fine.
q2 = self.lq2.view(u.Quantity)
assert q2.unit is u.mag
assert np.all(q2.value == self.lq2.value)
lq3 = q2.view(u.Magnitude)
assert type(lq3.unit) is u.MagUnit
assert lq3.unit.physical_unit == u.dimensionless_unscaled
assert np.all(lq3 == self.lq2)
class TestLogQuantitySlicing:
def test_item_get_and_set(self):
lq1 = u.Magnitude(np.arange(1., 11.)*u.Jy)
assert lq1[9] == u.Magnitude(10.*u.Jy)
lq1[2] = 100.*u.Jy
assert lq1[2] == u.Magnitude(100.*u.Jy)
with pytest.raises(u.UnitsError):
lq1[2] = 100.*u.m
with pytest.raises(u.UnitsError):
lq1[2] = 100.*u.mag
with pytest.raises(u.UnitsError):
lq1[2] = u.Magnitude(100.*u.m)
assert lq1[2] == u.Magnitude(100.*u.Jy)
def test_slice_get_and_set(self):
lq1 = u.Magnitude(np.arange(1., 10.)*u.Jy)
lq1[2:4] = 100.*u.Jy
assert np.all(lq1[2:4] == u.Magnitude(100.*u.Jy))
with pytest.raises(u.UnitsError):
lq1[2:4] = 100.*u.m
with pytest.raises(u.UnitsError):
lq1[2:4] = 100.*u.mag
with pytest.raises(u.UnitsError):
lq1[2:4] = u.Magnitude(100.*u.m)
assert np.all(lq1[2] == u.Magnitude(100.*u.Jy))
class TestLogQuantityArithmetic:
@pytest.mark.parametrize(
'other', [2.4 * u.mag(), 12.34 * u.ABmag,
u.Magnitude(3.45 * u.Jy), u.Dex(3.),
u.Dex(np.linspace(3000, 5000, 10) * u.Angstrom),
u.Magnitude(6.78, 2. * u.mag)])
@pytest.mark.parametrize('fac', [1., 2, 0.4])
def test_multiplication_division(self, other, fac):
"""Check that multiplication and division works as expectes"""
lq_sf = fac * other
assert lq_sf.unit.physical_unit == other.unit.physical_unit**fac
assert_allclose(lq_sf.physical, other.physical ** fac)
lq_sf = other * fac
assert lq_sf.unit.physical_unit == other.unit.physical_unit**fac
assert_allclose(lq_sf.physical, other.physical ** fac)
lq_sf = other / fac
assert lq_sf.unit.physical_unit**fac == other.unit.physical_unit
assert_allclose(lq_sf.physical**fac, other.physical)
lq_sf = other.copy()
lq_sf *= fac
assert lq_sf.unit.physical_unit == other.unit.physical_unit**fac
assert_allclose(lq_sf.physical, other.physical ** fac)
lq_sf = other.copy()
lq_sf /= fac
assert lq_sf.unit.physical_unit**fac == other.unit.physical_unit
assert_allclose(lq_sf.physical**fac, other.physical)
def test_more_multiplication_division(self):
"""Check that multiplication/division with other quantities is only
possible when the physical unit is dimensionless, and that this turns
the result into a normal quantity."""
lq = u.Magnitude(np.arange(1., 11.)*u.Jy)
with pytest.raises(u.UnitsError):
lq * (1.*u.m)
with pytest.raises(u.UnitsError):
(1.*u.m) * lq
with pytest.raises(u.UnitsError):
lq / lq
for unit in (u.m, u.mag, u.dex):
with pytest.raises(u.UnitsError):
lq / unit
lq2 = u.Magnitude(np.arange(1, 11.))
with pytest.raises(u.UnitsError):
lq2 * lq
with pytest.raises(u.UnitsError):
lq2 / lq
with pytest.raises(u.UnitsError):
lq / lq2
lq_sf = lq.copy()
with pytest.raises(u.UnitsError):
lq_sf *= lq2
# ensure that nothing changed inside
assert (lq_sf == lq).all()
with pytest.raises(u.UnitsError):
lq_sf /= lq2
# ensure that nothing changed inside
assert (lq_sf == lq).all()
# but dimensionless_unscaled can be cancelled
r = lq2 / u.Magnitude(2.)
assert r.unit == u.dimensionless_unscaled
assert np.all(r.value == lq2.value/2.)
# with dimensionless, normal units OK, but return normal quantities
tf = lq2 * u.m
tr = u.m * lq2
for t in (tf, tr):
assert not isinstance(t, type(lq2))
assert t.unit == lq2.unit.function_unit * u.m
with u.set_enabled_equivalencies(u.logarithmic()):
with pytest.raises(u.UnitsError):
t.to(lq2.unit.physical_unit)
t = tf / (50.*u.cm)
# now we essentially have the same quantity but with a prefactor of 2
assert t.unit.is_equivalent(lq2.unit.function_unit)
assert_allclose(t.to(lq2.unit.function_unit), lq2._function_view*2)
@pytest.mark.parametrize('power', (2, 0.5, 1, 0))
def test_raise_to_power(self, power):
"""Check that raising LogQuantities to some power is only possible when
the physical unit is dimensionless, and that conversion is turned off
when the resulting logarithmic unit (say, mag**2) is incompatible."""
lq = u.Magnitude(np.arange(1., 4.)*u.Jy)
if power == 0:
assert np.all(lq ** power == 1.)
elif power == 1:
assert np.all(lq ** power == lq)
else:
with pytest.raises(u.UnitsError):
lq ** power
# with dimensionless, it works, but falls back to normal quantity
# (except for power=1)
lq2 = u.Magnitude(np.arange(10.))
t = lq2**power
if power == 0:
assert t.unit is u.dimensionless_unscaled
assert np.all(t.value == 1.)
elif power == 1:
assert np.all(t == lq2)
else:
assert not isinstance(t, type(lq2))
assert t.unit == lq2.unit.function_unit ** power
with u.set_enabled_equivalencies(u.logarithmic()):
with pytest.raises(u.UnitsError):
t.to(u.dimensionless_unscaled)
def test_error_on_lq_as_power(self):
lq = u.Magnitude(np.arange(1., 4.)*u.Jy)
with pytest.raises(TypeError):
lq ** lq
@pytest.mark.parametrize('other', pu_sample)
def test_addition_subtraction_to_normal_units_fails(self, other):
lq = u.Magnitude(np.arange(1., 10.)*u.Jy)
q = 1.23 * other
with pytest.raises(u.UnitsError):
lq + q
with pytest.raises(u.UnitsError):
lq - q
with pytest.raises(u.UnitsError):
q - lq
@pytest.mark.parametrize(
'other', (1.23 * u.mag, 2.34 * u.mag(),
u.Magnitude(3.45 * u.Jy), u.Magnitude(4.56 * u.m),
5.67 * u.Unit(2*u.mag), u.Magnitude(6.78, 2.*u.mag)))
def test_addition_subtraction(self, other):
"""Check that addition/subtraction with quantities with magnitude or
MagUnit units works, and that it changes the physical units
appropriately."""
lq = u.Magnitude(np.arange(1., 10.)*u.Jy)
other_physical = other.to(getattr(other.unit, 'physical_unit',
u.dimensionless_unscaled),
equivalencies=u.logarithmic())
lq_sf = lq + other
assert_allclose(lq_sf.physical, lq.physical * other_physical)
lq_sr = other + lq
assert_allclose(lq_sr.physical, lq.physical * other_physical)
lq_df = lq - other
assert_allclose(lq_df.physical, lq.physical / other_physical)
lq_dr = other - lq
assert_allclose(lq_dr.physical, other_physical / lq.physical)
@pytest.mark.parametrize('other', pu_sample)
def test_inplace_addition_subtraction_unit_checks(self, other):
lu1 = u.mag(u.Jy)
lq1 = u.Magnitude(np.arange(1., 10.), lu1)
with pytest.raises(u.UnitsError):
lq1 += other
assert np.all(lq1.value == np.arange(1., 10.))
assert lq1.unit == lu1
with pytest.raises(u.UnitsError):
lq1 -= other
assert np.all(lq1.value == np.arange(1., 10.))
assert lq1.unit == lu1
@pytest.mark.parametrize(
'other', (1.23 * u.mag, 2.34 * u.mag(),
u.Magnitude(3.45 * u.Jy), u.Magnitude(4.56 * u.m),
5.67 * u.Unit(2*u.mag), u.Magnitude(6.78, 2.*u.mag)))
def test_inplace_addition_subtraction(self, other):
"""Check that inplace addition/subtraction with quantities with
magnitude or MagUnit units works, and that it changes the physical
units appropriately."""
lq = u.Magnitude(np.arange(1., 10.)*u.Jy)
other_physical = other.to(getattr(other.unit, 'physical_unit',
u.dimensionless_unscaled),
equivalencies=u.logarithmic())
lq_sf = lq.copy()
lq_sf += other
assert_allclose(lq_sf.physical, lq.physical * other_physical)
lq_df = lq.copy()
lq_df -= other
assert_allclose(lq_df.physical, lq.physical / other_physical)
def test_complicated_addition_subtraction(self):
"""For fun, a more complicated example of addition and subtraction."""
dm0 = u.Unit('DM', 1./(4.*np.pi*(10.*u.pc)**2))
DMmag = u.mag(dm0)
m_st = 10. * u.STmag
dm = 5. * DMmag
M_st = m_st - dm
assert M_st.unit.is_equivalent(u.erg/u.s/u.AA)
assert np.abs(M_st.physical /
(m_st.physical*4.*np.pi*(100.*u.pc)**2) - 1.) < 1.e-15
class TestLogQuantityComparisons:
def test_comparison_to_non_quantities_fails(self):
lq = u.Magnitude(np.arange(1., 10.)*u.Jy)
with pytest.raises(TypeError):
lq > 'a'
assert not (lq == 'a')
assert lq != 'a'
def test_comparison(self):
lq1 = u.Magnitude(np.arange(1., 4.)*u.Jy)
lq2 = u.Magnitude(2.*u.Jy)
assert np.all((lq1 > lq2) == np.array([True, False, False]))
assert np.all((lq1 == lq2) == np.array([False, True, False]))
lq3 = u.Dex(2.*u.Jy)
assert np.all((lq1 > lq3) == np.array([True, False, False]))
assert np.all((lq1 == lq3) == np.array([False, True, False]))
lq4 = u.Magnitude(2.*u.m)
assert not (lq1 == lq4)
assert lq1 != lq4
with pytest.raises(u.UnitsError):
lq1 < lq4
q5 = 1.5 * u.Jy
assert np.all((lq1 > q5) == np.array([True, False, False]))
assert np.all((q5 < lq1) == np.array([True, False, False]))
with pytest.raises(u.UnitsError):
lq1 >= 2.*u.m
with pytest.raises(u.UnitsError):
lq1 <= lq1.value * u.mag
# For physically dimensionless, we can compare with the function unit.
lq6 = u.Magnitude(np.arange(1., 4.))
fv6 = lq6.value * u.mag
assert np.all(lq6 == fv6)
# but not some arbitrary unit, of course.
with pytest.raises(u.UnitsError):
lq6 < 2.*u.m
class TestLogQuantityMethods:
def setup(self):
self.mJy = np.arange(1., 5.).reshape(2, 2) * u.mag(u.Jy)
self.m1 = np.arange(1., 5.5, 0.5).reshape(3, 3) * u.mag()
self.mags = (self.mJy, self.m1)
@pytest.mark.parametrize('method', ('mean', 'min', 'max', 'round', 'trace',
'std', 'var', 'ptp', 'diff', 'ediff1d'))
def test_always_ok(self, method):
for mag in self.mags:
res = getattr(mag, method)()
assert np.all(res.value ==
getattr(mag._function_view, method)().value)
if method in ('std', 'ptp', 'diff', 'ediff1d'):
assert res.unit == u.mag()
elif method == 'var':
assert res.unit == u.mag**2
else:
assert res.unit == mag.unit
def test_clip(self):
for mag in self.mags:
assert np.all(mag.clip(2. * mag.unit, 4. * mag.unit).value ==
mag.value.clip(2., 4.))
@pytest.mark.parametrize('method', ('sum', 'cumsum', 'nansum'))
def test_only_ok_if_dimensionless(self, method):
res = getattr(self.m1, method)()
assert np.all(res.value ==
getattr(self.m1._function_view, method)().value)
assert res.unit == self.m1.unit
with pytest.raises(TypeError):
getattr(self.mJy, method)()
def test_dot(self):
assert np.all(self.m1.dot(self.m1).value ==
self.m1.value.dot(self.m1.value))
@pytest.mark.parametrize('method', ('prod', 'cumprod'))
def test_never_ok(self, method):
with pytest.raises(TypeError):
getattr(self.mJy, method)()
with pytest.raises(TypeError):
getattr(self.m1, method)()
|
10f63cd2b4c0d69c8c33fb74cfacb1990bfd9b4fdbc764452fc4b082a8f214c2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test the Quantity class and related."""
import sys
import typing as T
import numpy as np
import pytest
from astropy import units as u
from astropy.units._typing import HAS_ANNOTATED, Annotated
@pytest.mark.skipif(sys.version_info < (3, 9), reason="requires py3.9+")
class TestQuantityTyping:
"""Test Quantity Typing Annotations."""
def test_quantity_typing(self):
"""Test type hint creation from Quantity."""
annot = u.Quantity[u.m]
assert T.get_origin(annot) is Annotated
assert T.get_args(annot) == (u.Quantity, u.m)
# test usage
def func(x: annot, y: str) -> u.Quantity[u.s]:
return x, y
annots = T.get_type_hints(func, include_extras=True)
assert annots["x"] is annot
assert annots["return"].__metadata__[0] == u.s
def test_metadata_in_annotation(self):
"""Test Quantity annotation with added metadata."""
multi_annot = u.Quantity[u.m, T.Any, np.dtype]
def multi_func(x: multi_annot, y: str):
return x, y
annots = T.get_type_hints(multi_func, include_extras=True)
assert annots["x"] == multi_annot
def test_optional_and_annotated(self):
"""Test Quantity annotation in an Optional."""
opt_annot = T.Optional[u.Quantity[u.m]]
def opt_func(x: opt_annot, y: str):
return x, y
annots = T.get_type_hints(opt_func, include_extras=True)
assert annots["x"] == opt_annot
def test_union_and_annotated(self):
"""Test Quantity annotation in a Union."""
# double Quantity[]
union_annot1 = T.Union[u.Quantity[u.m], u.Quantity[u.s]]
# one Quantity, one physical-type
union_annot2 = T.Union[u.Quantity[u.m], u.Quantity["time"]]
# one Quantity, one general type
union_annot3 = T.Union[u.Quantity[u.m / u.s], float]
def union_func(x: union_annot1, y: union_annot2) -> union_annot3:
if isinstance(y, str): # value = time
return x.value # returns <float>
else:
return x / y # returns Quantity[m / s]
annots = T.get_type_hints(union_func, include_extras=True)
assert annots["x"] == union_annot1
assert annots["y"] == union_annot2
assert annots["return"] == union_annot3
def test_quantity_subclass_typing(self):
"""Test type hint creation from a Quantity subclasses."""
class Length(u.SpecificTypeQuantity):
_equivalent_unit = u.m
annot = Length[u.km]
assert T.get_origin(annot) is Annotated
assert T.get_args(annot) == (Length, u.km)
|
845d78a7597c1956c55e087e8cc966c7779b19ff53fce526a0d47afbff499008 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Test Structured units and quantities.
"""
import copy
import numpy as np
import numpy.lib.recfunctions as rfn
import pytest
from numpy.testing import assert_array_equal
from astropy import units as u
from astropy.tests.helper import check_pickling_recovery, pickle_protocol
from astropy.units import Quantity, StructuredUnit, Unit, UnitBase
from astropy.utils.compat import NUMPY_LT_1_21_1
from astropy.utils.masked import Masked
class StructuredTestBase:
@classmethod
def setup_class(self):
self.pv_dtype = np.dtype([('p', 'f8'), ('v', 'f8')])
self.pv_t_dtype = np.dtype([('pv', self.pv_dtype), ('t', 'f8')])
self.p_unit = u.km
self.v_unit = u.km / u.s
self.t_unit = u.s
self.pv_dtype = np.dtype([('p', 'f8'), ('v', 'f8')])
self.pv_t_dtype = np.dtype([('pv', self.pv_dtype), ('t', 'f8')])
self.pv = np.array([(1., 0.25), (2., 0.5), (3., 0.75)],
self.pv_dtype)
self.pv_t = np.array([((4., 2.5), 0.),
((5., 5.0), 1.),
((6., 7.5), 2.)], self.pv_t_dtype)
class StructuredTestBaseWithUnits(StructuredTestBase):
@classmethod
def setup_class(self):
super().setup_class()
self.pv_unit = StructuredUnit((self.p_unit, self.v_unit),
('p', 'v'))
self.pv_t_unit = StructuredUnit((self.pv_unit, self.t_unit),
('pv', 't'))
class TestStructuredUnitBasics(StructuredTestBase):
def test_initialization_and_keying(self):
su = StructuredUnit((self.p_unit, self.v_unit), ('p', 'v'))
assert su['p'] is self.p_unit
assert su['v'] is self.v_unit
su2 = StructuredUnit((su, self.t_unit), ('pv', 't'))
assert isinstance(su2['pv'], StructuredUnit)
assert su2['pv']['p'] is self.p_unit
assert su2['pv']['v'] is self.v_unit
assert su2['t'] is self.t_unit
assert su2['pv'] == su
su3 = StructuredUnit(('AU', 'AU/day'), ('p', 'v'))
assert isinstance(su3['p'], UnitBase)
assert isinstance(su3['v'], UnitBase)
su4 = StructuredUnit('AU, AU/day', ('p', 'v'))
assert su4['p'] == u.AU
assert su4['v'] == u.AU / u.day
su5 = StructuredUnit(('AU', 'AU/day'))
assert su5.field_names == ('f0', 'f1')
assert su5['f0'] == u.AU
assert su5['f1'] == u.AU / u.day
def test_recursive_initialization(self):
su = StructuredUnit(((self.p_unit, self.v_unit), self.t_unit),
(('p', 'v'), 't'))
assert isinstance(su['pv'], StructuredUnit)
assert su['pv']['p'] is self.p_unit
assert su['pv']['v'] is self.v_unit
assert su['t'] is self.t_unit
su2 = StructuredUnit(((self.p_unit, self.v_unit), self.t_unit),
(['p_v', ('p', 'v')], 't'))
assert isinstance(su2['p_v'], StructuredUnit)
assert su2['p_v']['p'] is self.p_unit
assert su2['p_v']['v'] is self.v_unit
assert su2['t'] is self.t_unit
su3 = StructuredUnit((('AU', 'AU/day'), 'yr'),
(['p_v', ('p', 'v')], 't'))
assert isinstance(su3['p_v'], StructuredUnit)
assert su3['p_v']['p'] == u.AU
assert su3['p_v']['v'] == u.AU / u.day
assert su3['t'] == u.yr
su4 = StructuredUnit('(AU, AU/day), yr', (('p', 'v'), 't'))
assert isinstance(su4['pv'], StructuredUnit)
assert su4['pv']['p'] == u.AU
assert su4['pv']['v'] == u.AU / u.day
assert su4['t'] == u.yr
def test_extreme_recursive_initialization(self):
su = StructuredUnit('(yr,(AU,AU/day,(km,(day,day))),m)',
('t', ('p', 'v', ('h', ('d1', 'd2'))), 'l'))
assert su.field_names == ('t', ['pvhd1d2',
('p', 'v',
['hd1d2',
('h',
['d1d2',
('d1', 'd2')])])], 'l')
@pytest.mark.parametrize('names, invalid', [
[('t', ['p', 'v']), "['p', 'v']"],
[('t', ['pv', 'p', 'v']), "['pv', 'p', 'v']"],
[('t', ['pv', ['p', 'v']]), "['pv', ['p', 'v']"],
[('t', ()), "()"],
[('t', ('p', None)), "None"],
[('t', ['pv', ('p', '')]), "''"]])
def test_initialization_names_invalid_list_errors(self, names, invalid):
with pytest.raises(ValueError) as exc:
StructuredUnit('(yr,(AU,AU/day)', names)
assert f'invalid entry {invalid}' in str(exc)
def test_looks_like_unit(self):
su = StructuredUnit((self.p_unit, self.v_unit), ('p', 'v'))
assert Unit(su) is su
def test_initialize_with_float_dtype(self):
su = StructuredUnit(('AU', 'AU/d'), self.pv_dtype)
assert isinstance(su['p'], UnitBase)
assert isinstance(su['v'], UnitBase)
assert su['p'] == u.AU
assert su['v'] == u.AU / u.day
su = StructuredUnit((('km', 'km/s'), 'yr'), self.pv_t_dtype)
assert isinstance(su['pv'], StructuredUnit)
assert isinstance(su['pv']['p'], UnitBase)
assert isinstance(su['t'], UnitBase)
assert su['pv']['v'] == u.km / u.s
su = StructuredUnit('(km, km/s), yr', self.pv_t_dtype)
assert isinstance(su['pv'], StructuredUnit)
assert isinstance(su['pv']['p'], UnitBase)
assert isinstance(su['t'], UnitBase)
assert su['pv']['v'] == u.km / u.s
def test_initialize_with_structured_unit_for_names(self):
su = StructuredUnit(('AU', 'AU/d'), names=('p', 'v'))
su2 = StructuredUnit(('km', 'km/s'), names=su)
assert su2.field_names == ('p', 'v')
assert su2['p'] == u.km
assert su2['v'] == u.km / u.s
def test_initialize_single_field(self):
su = StructuredUnit('AU', 'p')
assert isinstance(su, StructuredUnit)
assert isinstance(su['p'], UnitBase)
assert su['p'] == u.AU
su = StructuredUnit('AU')
assert isinstance(su, StructuredUnit)
assert isinstance(su['f0'], UnitBase)
assert su['f0'] == u.AU
def test_equality(self):
su = StructuredUnit(('AU', 'AU/d'), self.pv_dtype)
assert su == StructuredUnit(('AU', 'AU/d'), self.pv_dtype)
assert su != StructuredUnit(('m', 'AU/d'), self.pv_dtype)
# Names should be ignored.
assert su == StructuredUnit(('AU', 'AU/d'))
assert su == StructuredUnit(('AU', 'AU/d'), names=('q', 'w'))
assert su != StructuredUnit(('m', 'm/s'))
def test_parsing(self):
su = Unit('AU, AU/d')
assert isinstance(su, StructuredUnit)
assert isinstance(su['f0'], UnitBase)
assert isinstance(su['f1'], UnitBase)
assert su['f0'] == u.AU
assert su['f1'] == u.AU/u.day
su2 = Unit('AU, AU/d, yr')
assert isinstance(su2, StructuredUnit)
assert su2 == StructuredUnit(('AU', 'AU/d', 'yr'))
su2a = Unit('(AU, AU/d, yr)')
assert isinstance(su2a, StructuredUnit)
assert su2a == su2
su3 = Unit('(km, km/s), yr')
assert isinstance(su3, StructuredUnit)
assert su3 == StructuredUnit((('km', 'km/s'), 'yr'))
su4 = Unit('km,')
assert isinstance(su4, StructuredUnit)
assert su4 == StructuredUnit((u.km,))
su5 = Unit('(m,s),')
assert isinstance(su5, StructuredUnit)
assert su5 == StructuredUnit(((u.m, u.s),))
ldbody_unit = Unit('Msun, 0.5rad^2, (au, au/day)')
assert ldbody_unit == StructuredUnit(
(u.Msun, Unit(u.rad**2 / 2), (u.AU, u.AU / u.day)))
def test_to_string(self):
su = StructuredUnit((u.km, u.km/u.s))
latex_str = r'$(\mathrm{km}, \mathrm{\frac{km}{s}})$'
assert su.to_string(format='latex') == latex_str
latex_str = r'$(\mathrm{km}, \mathrm{km\,s^{-1}})$'
assert su.to_string(format='latex_inline') == latex_str
def test_str(self):
su = StructuredUnit(((u.km, u.km/u.s), u.yr))
assert str(su) == '((km, km / s), yr)'
assert Unit(str(su)) == su
def test_repr(self):
su = StructuredUnit(((u.km, u.km/u.s), u.yr))
assert repr(su) == 'Unit("((km, km / s), yr)")'
assert eval(repr(su)) == su
class TestStructuredUnitsCopyPickle(StructuredTestBaseWithUnits):
def test_copy(self):
su_copy = copy.copy(self.pv_t_unit)
assert su_copy is not self.pv_t_unit
assert su_copy == self.pv_t_unit
assert su_copy._units is self.pv_t_unit._units
def test_deepcopy(self):
su_copy = copy.deepcopy(self.pv_t_unit)
assert su_copy is not self.pv_t_unit
assert su_copy == self.pv_t_unit
assert su_copy._units is not self.pv_t_unit._units
@pytest.mark.skipif(NUMPY_LT_1_21_1, reason="https://stackoverflow.com/q/69571643")
def test_pickle(self, pickle_protocol):
check_pickling_recovery(self.pv_t_unit, pickle_protocol)
class TestStructuredUnitAsMapping(StructuredTestBaseWithUnits):
def test_len(self):
assert len(self.pv_unit) == 2
assert len(self.pv_t_unit) == 2
def test_keys(self):
slv = list(self.pv_t_unit.keys())
assert slv == ['pv', 't']
def test_values(self):
values = self.pv_t_unit.values()
assert values == (self.pv_unit, self.t_unit)
def test_field_names(self):
field_names = self.pv_t_unit.field_names
assert isinstance(field_names, tuple)
assert field_names == (['pv', ('p', 'v')], 't')
@pytest.mark.parametrize('iterable', [list, set])
def test_as_iterable(self, iterable):
sl = iterable(self.pv_unit)
assert isinstance(sl, iterable)
assert sl == iterable(['p', 'v'])
def test_as_dict(self):
sd = dict(self.pv_t_unit)
assert sd == {'pv': self.pv_unit, 't': self.t_unit}
def test_contains(self):
assert 'p' in self.pv_unit
assert 'v' in self.pv_unit
assert 't' not in self.pv_unit
def test_setitem_fails(self):
with pytest.raises(TypeError, match='item assignment'):
self.pv_t_unit['t'] = u.Gyr
class TestStructuredUnitMethods(StructuredTestBaseWithUnits):
def test_physical_type_id(self):
pv_ptid = self.pv_unit._get_physical_type_id()
assert len(pv_ptid) == 2
assert pv_ptid.dtype.names == ('p', 'v')
p_ptid = self.pv_unit['p']._get_physical_type_id()
v_ptid = self.pv_unit['v']._get_physical_type_id()
# Expected should be (subclass of) void, with structured object dtype.
expected = np.array((p_ptid, v_ptid), [('p', 'O'), ('v', 'O')])[()]
assert pv_ptid == expected
# Names should be ignored in comparison.
assert pv_ptid == np.array((p_ptid, v_ptid), 'O,O')[()]
# Should be possible to address by field and by number.
assert pv_ptid['p'] == p_ptid
assert pv_ptid['v'] == v_ptid
assert pv_ptid[0] == p_ptid
assert pv_ptid[1] == v_ptid
# More complicated version.
pv_t_ptid = self.pv_t_unit._get_physical_type_id()
t_ptid = self.t_unit._get_physical_type_id()
assert pv_t_ptid == np.array((pv_ptid, t_ptid), 'O,O')[()]
assert pv_t_ptid['pv'] == pv_ptid
assert pv_t_ptid['t'] == t_ptid
assert pv_t_ptid['pv'][1] == v_ptid
def test_physical_type(self):
pv_pt = self.pv_unit.physical_type
assert pv_pt == np.array(('length', 'speed'), 'O,O')[()]
pv_t_pt = self.pv_t_unit.physical_type
assert pv_t_pt == np.array((pv_pt, 'time'), 'O,O')[()]
def test_si(self):
pv_t_si = self.pv_t_unit.si
assert pv_t_si == self.pv_t_unit
assert pv_t_si['pv']['v'].scale == 1000
def test_cgs(self):
pv_t_cgs = self.pv_t_unit.cgs
assert pv_t_cgs == self.pv_t_unit
assert pv_t_cgs['pv']['v'].scale == 100000
def test_decompose(self):
pv_t_decompose = self.pv_t_unit.decompose()
assert pv_t_decompose['pv']['v'].scale == 1000
def test_is_equivalent(self):
assert self.pv_unit.is_equivalent(('AU', 'AU/day'))
assert not self.pv_unit.is_equivalent('m')
assert not self.pv_unit.is_equivalent(('AU', 'AU'))
# Names should be ignored.
pv_alt = StructuredUnit('m,m/s', names=('q', 'w'))
assert pv_alt.field_names != self.pv_unit.field_names
assert self.pv_unit.is_equivalent(pv_alt)
# Regular units should work too.
assert not u.m.is_equivalent(self.pv_unit)
def test_conversion(self):
pv1 = self.pv_unit.to(('AU', 'AU/day'), self.pv)
assert isinstance(pv1, np.ndarray)
assert pv1.dtype == self.pv.dtype
assert np.all(pv1['p'] * u.AU == self.pv['p'] * self.p_unit)
assert np.all(pv1['v'] * u.AU / u.day == self.pv['v'] * self.v_unit)
# Names should be from value.
su2 = StructuredUnit((self.p_unit, self.v_unit),
('position', 'velocity'))
pv2 = su2.to(('Mm', 'mm/s'), self.pv)
assert pv2.dtype.names == ('p', 'v')
assert pv2.dtype == self.pv.dtype
# Check recursion.
pv_t1 = self.pv_t_unit.to((('AU', 'AU/day'), 'Myr'), self.pv_t)
assert isinstance(pv_t1, np.ndarray)
assert pv_t1.dtype == self.pv_t.dtype
assert np.all(pv_t1['pv']['p'] * u.AU ==
self.pv_t['pv']['p'] * self.p_unit)
assert np.all(pv_t1['pv']['v'] * u.AU / u.day ==
self.pv_t['pv']['v'] * self.v_unit)
assert np.all(pv_t1['t'] * u.Myr == self.pv_t['t'] * self.t_unit)
# Passing in tuples should work.
pv_t2 = self.pv_t_unit.to((('AU', 'AU/day'), 'Myr'),
((1., 0.1), 10.))
assert pv_t2['pv']['p'] == self.p_unit.to('AU', 1.)
assert pv_t2['pv']['v'] == self.v_unit.to('AU/day', 0.1)
assert pv_t2['t'] == self.t_unit.to('Myr', 10.)
pv_t3 = self.pv_t_unit.to((('AU', 'AU/day'), 'Myr'),
[((1., 0.1), 10.),
((2., 0.2), 20.)])
assert np.all(pv_t3['pv']['p'] == self.p_unit.to('AU', [1., 2.]))
assert np.all(pv_t3['pv']['v'] == self.v_unit.to('AU/day', [0.1, 0.2]))
assert np.all(pv_t3['t'] == self.t_unit.to('Myr', [10., 20.]))
class TestStructuredUnitArithmatic(StructuredTestBaseWithUnits):
def test_multiplication(self):
pv_times_au = self.pv_unit * u.au
assert isinstance(pv_times_au, StructuredUnit)
assert pv_times_au.field_names == ('p', 'v')
assert pv_times_au['p'] == self.p_unit * u.AU
assert pv_times_au['v'] == self.v_unit * u.AU
au_times_pv = u.au * self.pv_unit
assert au_times_pv == pv_times_au
pv_times_au2 = self.pv_unit * 'au'
assert pv_times_au2 == pv_times_au
au_times_pv2 = 'AU' * self.pv_unit
assert au_times_pv2 == pv_times_au
with pytest.raises(TypeError):
self.pv_unit * self.pv_unit
with pytest.raises(TypeError):
's,s' * self.pv_unit
def test_division(self):
pv_by_s = self.pv_unit / u.s
assert isinstance(pv_by_s, StructuredUnit)
assert pv_by_s.field_names == ('p', 'v')
assert pv_by_s['p'] == self.p_unit / u.s
assert pv_by_s['v'] == self.v_unit / u.s
pv_by_s2 = self.pv_unit / 's'
assert pv_by_s2 == pv_by_s
with pytest.raises(TypeError):
1. / self.pv_unit
with pytest.raises(TypeError):
u.s / self.pv_unit
class TestStructuredQuantity(StructuredTestBaseWithUnits):
def test_initialization_and_keying(self):
q_pv = Quantity(self.pv, self.pv_unit)
q_p = q_pv['p']
assert isinstance(q_p, Quantity)
assert isinstance(q_p.unit, UnitBase)
assert np.all(q_p == self.pv['p'] * self.pv_unit['p'])
q_v = q_pv['v']
assert isinstance(q_v, Quantity)
assert isinstance(q_v.unit, UnitBase)
assert np.all(q_v == self.pv['v'] * self.pv_unit['v'])
q_pv_t = Quantity(self.pv_t, self.pv_t_unit)
q_t = q_pv_t['t']
assert np.all(q_t == self.pv_t['t'] * self.pv_t_unit['t'])
q_pv2 = q_pv_t['pv']
assert isinstance(q_pv2, Quantity)
assert q_pv2.unit == self.pv_unit
with pytest.raises(ValueError):
Quantity(self.pv, self.pv_t_unit)
with pytest.raises(ValueError):
Quantity(self.pv_t, self.pv_unit)
def test_initialization_with_unit_tuples(self):
q_pv_t = Quantity(self.pv_t, (('km', 'km/s'), 's'))
assert isinstance(q_pv_t.unit, StructuredUnit)
assert q_pv_t.unit == self.pv_t_unit
def test_initialization_with_string(self):
q_pv_t = Quantity(self.pv_t, '(km, km/s), s')
assert isinstance(q_pv_t.unit, StructuredUnit)
assert q_pv_t.unit == self.pv_t_unit
def test_initialization_by_multiplication_with_unit(self):
q_pv_t = self.pv_t * self.pv_t_unit
assert q_pv_t.unit is self.pv_t_unit
assert np.all(q_pv_t.value == self.pv_t)
assert not np.may_share_memory(q_pv_t, self.pv_t)
q_pv_t2 = self.pv_t_unit * self.pv_t
assert q_pv_t.unit is self.pv_t_unit
# Not testing equality of structured Quantity here.
assert np.all(q_pv_t2.value == q_pv_t.value)
def test_initialization_by_shifting_to_unit(self):
q_pv_t = self.pv_t << self.pv_t_unit
assert q_pv_t.unit is self.pv_t_unit
assert np.all(q_pv_t.value == self.pv_t)
assert np.may_share_memory(q_pv_t, self.pv_t)
def test_getitem(self):
q_pv_t = Quantity(self.pv_t, self.pv_t_unit)
q_pv_t01 = q_pv_t[:2]
assert isinstance(q_pv_t01, Quantity)
assert q_pv_t01.unit == q_pv_t.unit
assert np.all(q_pv_t01['t'] == q_pv_t['t'][:2])
q_pv_t1 = q_pv_t[1]
assert isinstance(q_pv_t1, Quantity)
assert q_pv_t1.unit == q_pv_t.unit
assert q_pv_t1.shape == ()
assert q_pv_t1['t'] == q_pv_t['t'][1]
def test_value(self):
q_pv_t = Quantity(self.pv_t, self.pv_t_unit)
value = q_pv_t.value
assert type(value) is np.ndarray
assert np.all(value == self.pv_t)
value1 = q_pv_t[1].value
assert type(value1) is np.void
assert np.all(value1 == self.pv_t[1])
def test_conversion(self):
q_pv = Quantity(self.pv, self.pv_unit)
q1 = q_pv.to(('AU', 'AU/day'))
assert isinstance(q1, Quantity)
assert q1['p'].unit == u.AU
assert q1['v'].unit == u.AU / u.day
assert np.all(q1['p'] == q_pv['p'].to(u.AU))
assert np.all(q1['v'] == q_pv['v'].to(u.AU/u.day))
q2 = q_pv.to(self.pv_unit)
assert q2['p'].unit == self.p_unit
assert q2['v'].unit == self.v_unit
assert np.all(q2['p'].value == self.pv['p'])
assert np.all(q2['v'].value == self.pv['v'])
assert not np.may_share_memory(q2, q_pv)
pv1 = q_pv.to_value(('AU', 'AU/day'))
assert type(pv1) is np.ndarray
assert np.all(pv1['p'] == q_pv['p'].to_value(u.AU))
assert np.all(pv1['v'] == q_pv['v'].to_value(u.AU/u.day))
pv11 = q_pv[1].to_value(('AU', 'AU/day'))
assert type(pv11) is np.void
assert pv11 == pv1[1]
q_pv_t = Quantity(self.pv_t, self.pv_t_unit)
q2 = q_pv_t.to((('kpc', 'kpc/Myr'), 'Myr'))
assert q2['pv']['p'].unit == u.kpc
assert q2['pv']['v'].unit == u.kpc / u.Myr
assert q2['t'].unit == u.Myr
assert np.all(q2['pv']['p'] == q_pv_t['pv']['p'].to(u.kpc))
assert np.all(q2['pv']['v'] == q_pv_t['pv']['v'].to(u.kpc/u.Myr))
assert np.all(q2['t'] == q_pv_t['t'].to(u.Myr))
def test_conversion_via_lshift(self):
q_pv = Quantity(self.pv, self.pv_unit)
q1 = q_pv << StructuredUnit(('AU', 'AU/day'))
assert isinstance(q1, Quantity)
assert q1['p'].unit == u.AU
assert q1['v'].unit == u.AU / u.day
assert np.all(q1['p'] == q_pv['p'].to(u.AU))
assert np.all(q1['v'] == q_pv['v'].to(u.AU/u.day))
q2 = q_pv << self.pv_unit
assert q2['p'].unit == self.p_unit
assert q2['v'].unit == self.v_unit
assert np.all(q2['p'].value == self.pv['p'])
assert np.all(q2['v'].value == self.pv['v'])
assert np.may_share_memory(q2, q_pv)
q_pv_t = Quantity(self.pv_t, self.pv_t_unit)
q2 = q_pv_t << '(kpc,kpc/Myr),Myr'
assert q2['pv']['p'].unit == u.kpc
assert q2['pv']['v'].unit == u.kpc / u.Myr
assert q2['t'].unit == u.Myr
assert np.all(q2['pv']['p'] == q_pv_t['pv']['p'].to(u.kpc))
assert np.all(q2['pv']['v'] == q_pv_t['pv']['v'].to(u.kpc/u.Myr))
assert np.all(q2['t'] == q_pv_t['t'].to(u.Myr))
def test_inplace_conversion(self):
q_pv = Quantity(self.pv, self.pv_unit)
q1 = q_pv.copy()
q_link = q1
q1 <<= StructuredUnit(('AU', 'AU/day'))
assert q1 is q_link
assert q1['p'].unit == u.AU
assert q1['v'].unit == u.AU / u.day
assert np.all(q1['p'] == q_pv['p'].to(u.AU))
assert np.all(q1['v'] == q_pv['v'].to(u.AU/u.day))
q_pv_t = Quantity(self.pv_t, self.pv_t_unit)
q2 = q_pv_t.copy()
q_link = q2
q2 <<= '(kpc,kpc/Myr),Myr'
assert q2 is q_link
assert q2['pv']['p'].unit == u.kpc
assert q2['pv']['v'].unit == u.kpc / u.Myr
assert q2['t'].unit == u.Myr
assert np.all(q2['pv']['p'] == q_pv_t['pv']['p'].to(u.kpc))
assert np.all(q2['pv']['v'] == q_pv_t['pv']['v'].to(u.kpc/u.Myr))
assert np.all(q2['t'] == q_pv_t['t'].to(u.Myr))
def test_si(self):
q_pv_t = Quantity(self.pv_t, self.pv_t_unit)
q_pv_t_si = q_pv_t.si
assert_array_equal(q_pv_t_si, q_pv_t.to('(m,m/s),s'))
def test_cgs(self):
q_pv_t = Quantity(self.pv_t, self.pv_t_unit)
q_pv_t_cgs = q_pv_t.cgs
assert_array_equal(q_pv_t_cgs, q_pv_t.to('(cm,cm/s),s'))
def test_equality(self):
q_pv = Quantity(self.pv, self.pv_unit)
equal = q_pv == q_pv
not_equal = q_pv != q_pv
assert np.all(equal)
assert not np.any(not_equal)
equal2 = q_pv == q_pv[1]
not_equal2 = q_pv != q_pv[1]
assert np.all(equal2 == [False, True, False])
assert np.all(not_equal2 != equal2)
q1 = q_pv.to(('AU', 'AU/day'))
# Ensure same conversion is done, by placing q1 first.
assert np.all(q1 == q_pv)
assert not np.any(q1 != q_pv)
# Check different names in dtype.
assert np.all(q1.value * u.Unit('AU, AU/day') == q_pv)
assert not np.any(q1.value * u.Unit('AU, AU/day') != q_pv)
assert (q_pv == 'b') is False
assert ('b' != q_pv) is True
q_pv_t = Quantity(self.pv_t, self.pv_t_unit)
assert np.all((q_pv_t[2] == q_pv_t) == [False, False, True])
assert np.all((q_pv_t[2] != q_pv_t) != [False, False, True])
assert (q_pv == q_pv_t) is False
assert (q_pv_t != q_pv) is True
def test_setitem(self):
q_pv = Quantity(self.pv, self.pv_unit)
q_pv[1] = (2., 2.) * self.pv_unit
assert q_pv[1].value == np.array((2., 2.), self.pv_dtype)
q_pv[1:2] = (1., 0.5) * u.Unit('AU, AU/day')
assert q_pv['p'][1] == 1. * u.AU
assert q_pv['v'][1] == 0.5 * u.AU / u.day
q_pv['v'] = 1. * u.km / u.s
assert np.all(q_pv['v'] == 1. * u.km / u.s)
with pytest.raises(u.UnitsError):
q_pv[1] = (1., 1.) * u.Unit('AU, AU')
with pytest.raises(u.UnitsError):
q_pv['v'] = 1. * u.km
q_pv_t = Quantity(self.pv_t, self.pv_t_unit)
q_pv_t[1] = ((2., 2.), 3.) * self.pv_t_unit
assert q_pv_t[1].value == np.array(((2., 2.), 3.), self.pv_t_dtype)
q_pv_t[1:2] = ((1., 0.5), 5.) * u.Unit('(AU, AU/day), yr')
assert q_pv_t['pv'][1] == (1., 0.5) * u.Unit('AU, AU/day')
assert q_pv_t['t'][1] == 5. * u.yr
q_pv_t['pv'] = (1., 0.5) * self.pv_unit
assert np.all(q_pv_t['pv'] == (1., 0.5) * self.pv_unit)
class TestStructuredQuantityFunctions(StructuredTestBaseWithUnits):
@classmethod
def setup_class(self):
super().setup_class()
self.q_pv = self.pv << self.pv_unit
self.q_pv_t = self.pv_t << self.pv_t_unit
def test_empty_like(self):
z = np.empty_like(self.q_pv)
assert z.dtype == self.pv_dtype
assert z.unit == self.pv_unit
assert z.shape == self.pv.shape
@pytest.mark.parametrize('func', [np.zeros_like, np.ones_like])
def test_zeros_ones_like(self, func):
z = func(self.q_pv)
assert z.dtype == self.pv_dtype
assert z.unit == self.pv_unit
assert z.shape == self.pv.shape
assert_array_equal(z, func(self.pv) << self.pv_unit)
def test_structured_to_unstructured(self):
# can't unstructure something with incompatible units
with pytest.raises(u.UnitConversionError, match="'km / s'"):
rfn.structured_to_unstructured(self.q_pv)
# For the other tests of ``structured_to_unstructured``, see
# ``test_quantity_non_ufuncs.TestRecFunctions.test_structured_to_unstructured``
def test_unstructured_to_structured(self):
# can't structure something that's already structured
dtype = np.dtype([("f1", float), ("f2", float)])
with pytest.raises(ValueError, match="The length of the last dimension"):
rfn.unstructured_to_structured(self.q_pv, dtype=self.q_pv.dtype)
# For the other tests of ``structured_to_unstructured``, see
# ``test_quantity_non_ufuncs.TestRecFunctions.test_unstructured_to_structured``
class TestStructuredSpecificTypeQuantity(StructuredTestBaseWithUnits):
def setup_class(self):
super().setup_class()
class PositionVelocity(u.SpecificTypeQuantity):
_equivalent_unit = self.pv_unit
self.PositionVelocity = PositionVelocity
def test_init(self):
pv = self.PositionVelocity(self.pv, self.pv_unit)
assert isinstance(pv, self.PositionVelocity)
assert type(pv['p']) is u.Quantity
assert_array_equal(pv['p'], self.pv['p'] << self.pv_unit['p'])
pv2 = self.PositionVelocity(self.pv, 'AU,AU/day')
assert_array_equal(pv2['p'], self.pv['p'] << u.AU)
def test_error_on_non_equivalent_unit(self):
with pytest.raises(u.UnitsError):
self.PositionVelocity(self.pv, 'AU')
with pytest.raises(u.UnitsError):
self.PositionVelocity(self.pv, 'AU,yr')
class TestStructuredLogUnit:
def setup_class(self):
self.mag_time_dtype = np.dtype([('mag', 'f8'), ('t', 'f8')])
self.mag_time = np.array([(20., 10.), (25., 100.)], self.mag_time_dtype)
def test_unit_initialization(self):
mag_time_unit = StructuredUnit((u.STmag, u.s), self.mag_time_dtype)
assert mag_time_unit['mag'] == u.STmag
assert mag_time_unit['t'] == u.s
mag_time_unit2 = u.Unit('mag(ST),s')
assert mag_time_unit2 == mag_time_unit
def test_quantity_initialization(self):
su = u.Unit('mag(ST),s')
mag_time = self.mag_time << su
assert isinstance(mag_time['mag'], u.Magnitude)
assert isinstance(mag_time['t'], u.Quantity)
assert mag_time.unit == su
assert_array_equal(mag_time['mag'], self.mag_time['mag'] << u.STmag)
assert_array_equal(mag_time['t'], self.mag_time['t'] << u.s)
def test_quantity_si(self):
mag_time = self.mag_time << u.Unit('mag(ST),yr')
mag_time_si = mag_time.si
assert_array_equal(mag_time_si['mag'], mag_time['mag'].si)
assert_array_equal(mag_time_si['t'], mag_time['t'].si)
class TestStructuredMaskedQuantity(StructuredTestBaseWithUnits):
"""Somewhat minimal tests. Conversion is most stringent."""
def setup_class(self):
super().setup_class()
self.qpv = self.pv << self.pv_unit
self.pv_mask = np.array([(True, False),
(False, False),
(False, True)], [('p', bool), ('v', bool)])
self.mpv = Masked(self.qpv, mask=self.pv_mask)
def test_init(self):
assert isinstance(self.mpv, Masked)
assert isinstance(self.mpv, Quantity)
assert_array_equal(self.mpv.unmasked, self.qpv)
assert_array_equal(self.mpv.mask, self.pv_mask)
def test_slicing(self):
mp = self.mpv['p']
assert isinstance(mp, Masked)
assert isinstance(mp, Quantity)
assert_array_equal(mp.unmasked, self.qpv['p'])
assert_array_equal(mp.mask, self.pv_mask['p'])
def test_conversion(self):
mpv = self.mpv.to('AU,AU/day')
assert isinstance(mpv, Masked)
assert isinstance(mpv, Quantity)
assert_array_equal(mpv.unmasked, self.qpv.to('AU,AU/day'))
assert_array_equal(mpv.mask, self.pv_mask)
assert np.all(mpv == self.mpv)
def test_si(self):
mpv = self.mpv.si
assert isinstance(mpv, Masked)
assert isinstance(mpv, Quantity)
assert_array_equal(mpv.unmasked, self.qpv.si)
assert_array_equal(mpv.mask, self.pv_mask)
assert np.all(mpv == self.mpv)
|
b1c71b211e4cad6025a50bd08857fb9290ab42a2ae38b671ac259ed7db4842b3 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test the Quantity class and related."""
import copy
import decimal
import numbers
import pickle
from fractions import Fraction
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_array_almost_equal, assert_array_equal
from astropy import units as u
from astropy.units.quantity import _UNIT_NOT_INITIALISED
from astropy.utils import isiterable, minversion
from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyWarning
""" The Quantity class will represent a number + unit + uncertainty """
class TestQuantityCreation:
def test_1(self):
# create objects through operations with Unit objects:
quantity = 11.42 * u.meter # returns a Quantity object
assert isinstance(quantity, u.Quantity)
quantity = u.meter * 11.42 # returns a Quantity object
assert isinstance(quantity, u.Quantity)
quantity = 11.42 / u.meter
assert isinstance(quantity, u.Quantity)
quantity = u.meter / 11.42
assert isinstance(quantity, u.Quantity)
quantity = 11.42 * u.meter / u.second
assert isinstance(quantity, u.Quantity)
with pytest.raises(TypeError):
quantity = 182.234 + u.meter
with pytest.raises(TypeError):
quantity = 182.234 - u.meter
with pytest.raises(TypeError):
quantity = 182.234 % u.meter
def test_2(self):
# create objects using the Quantity constructor:
_ = u.Quantity(11.412, unit=u.meter)
_ = u.Quantity(21.52, "cm")
q3 = u.Quantity(11.412)
# By default quantities that don't specify a unit are unscaled
# dimensionless
assert q3.unit == u.Unit(1)
with pytest.raises(TypeError):
u.Quantity(object(), unit=u.m)
def test_3(self):
# with pytest.raises(u.UnitsError):
with pytest.raises(ValueError): # Until @mdboom fixes the errors in units
u.Quantity(11.412, unit="testingggg")
def test_nan_inf(self):
# Not-a-number
q = u.Quantity('nan', unit='cm')
assert np.isnan(q.value)
q = u.Quantity('NaN', unit='cm')
assert np.isnan(q.value)
q = u.Quantity('-nan', unit='cm') # float() allows this
assert np.isnan(q.value)
q = u.Quantity('nan cm')
assert np.isnan(q.value)
assert q.unit == u.cm
# Infinity
q = u.Quantity('inf', unit='cm')
assert np.isinf(q.value)
q = u.Quantity('-inf', unit='cm')
assert np.isinf(q.value)
q = u.Quantity('inf cm')
assert np.isinf(q.value)
assert q.unit == u.cm
q = u.Quantity('Infinity', unit='cm') # float() allows this
assert np.isinf(q.value)
# make sure these strings don't parse...
with pytest.raises(TypeError):
q = u.Quantity('', unit='cm')
with pytest.raises(TypeError):
q = u.Quantity('spam', unit='cm')
def test_unit_property(self):
# test getting and setting 'unit' attribute
q1 = u.Quantity(11.4, unit=u.meter)
with pytest.raises(AttributeError):
q1.unit = u.cm
def test_preserve_dtype(self):
"""Test that if an explicit dtype is given, it is used, while if not,
numbers are converted to float (including decimal.Decimal, which
numpy converts to an object; closes #1419)
"""
# If dtype is specified, use it, but if not, convert int, bool to float
q1 = u.Quantity(12, unit=u.m / u.s, dtype=int)
assert q1.dtype == int
q2 = u.Quantity(q1)
assert q2.dtype == float
assert q2.value == float(q1.value)
assert q2.unit == q1.unit
# but we should preserve any float32 or even float16
a3_32 = np.array([1., 2.], dtype=np.float32)
q3_32 = u.Quantity(a3_32, u.yr)
assert q3_32.dtype == a3_32.dtype
a3_16 = np.array([1., 2.], dtype=np.float16)
q3_16 = u.Quantity(a3_16, u.yr)
assert q3_16.dtype == a3_16.dtype
# items stored as objects by numpy should be converted to float
# by default
q4 = u.Quantity(decimal.Decimal('10.25'), u.m)
assert q4.dtype == float
q5 = u.Quantity(decimal.Decimal('10.25'), u.m, dtype=object)
assert q5.dtype == object
def test_numpy_style_dtype_inspect(self):
"""Test that if ``dtype=None``, NumPy's dtype inspection is used."""
q2 = u.Quantity(12, dtype=None)
assert np.issubdtype(q2.dtype, np.integer)
def test_float_dtype_promotion(self):
"""Test that if ``dtype=numpy.inexact``, the minimum precision is float64."""
q1 = u.Quantity(12, dtype=np.inexact)
assert not np.issubdtype(q1.dtype, np.integer)
assert q1.dtype == np.float64
q2 = u.Quantity(np.float64(12), dtype=np.inexact)
assert q2.dtype == np.float64
q3 = u.Quantity(np.float32(12), dtype=np.inexact)
assert q3.dtype == np.float32
if hasattr(np, "float16"):
q3 = u.Quantity(np.float16(12), dtype=np.inexact)
assert q3.dtype == np.float16
if hasattr(np, "float128"):
q4 = u.Quantity(np.float128(12), dtype=np.inexact)
assert q4.dtype == np.float128
def test_copy(self):
# By default, a new quantity is constructed, but not if copy=False
a = np.arange(10.)
q0 = u.Quantity(a, unit=u.m / u.s)
assert q0.base is not a
q1 = u.Quantity(a, unit=u.m / u.s, copy=False)
assert q1.base is a
q2 = u.Quantity(q0)
assert q2 is not q0
assert q2.base is not q0.base
q2 = u.Quantity(q0, copy=False)
assert q2 is q0
assert q2.base is q0.base
q3 = u.Quantity(q0, q0.unit, copy=False)
assert q3 is q0
assert q3.base is q0.base
q4 = u.Quantity(q0, u.cm / u.s, copy=False)
assert q4 is not q0
assert q4.base is not q0.base
def test_subok(self):
"""Test subok can be used to keep class, or to insist on Quantity"""
class MyQuantitySubclass(u.Quantity):
pass
myq = MyQuantitySubclass(np.arange(10.), u.m)
# try both with and without changing the unit
assert type(u.Quantity(myq)) is u.Quantity
assert type(u.Quantity(myq, subok=True)) is MyQuantitySubclass
assert type(u.Quantity(myq, u.km)) is u.Quantity
assert type(u.Quantity(myq, u.km, subok=True)) is MyQuantitySubclass
def test_order(self):
"""Test that order is correctly propagated to np.array"""
ac = np.array(np.arange(10.), order='C')
qcc = u.Quantity(ac, u.m, order='C')
assert qcc.flags['C_CONTIGUOUS']
qcf = u.Quantity(ac, u.m, order='F')
assert qcf.flags['F_CONTIGUOUS']
qca = u.Quantity(ac, u.m, order='A')
assert qca.flags['C_CONTIGUOUS']
# check it works also when passing in a quantity
assert u.Quantity(qcc, order='C').flags['C_CONTIGUOUS']
assert u.Quantity(qcc, order='A').flags['C_CONTIGUOUS']
assert u.Quantity(qcc, order='F').flags['F_CONTIGUOUS']
af = np.array(np.arange(10.), order='F')
qfc = u.Quantity(af, u.m, order='C')
assert qfc.flags['C_CONTIGUOUS']
qff = u.Quantity(ac, u.m, order='F')
assert qff.flags['F_CONTIGUOUS']
qfa = u.Quantity(af, u.m, order='A')
assert qfa.flags['F_CONTIGUOUS']
assert u.Quantity(qff, order='C').flags['C_CONTIGUOUS']
assert u.Quantity(qff, order='A').flags['F_CONTIGUOUS']
assert u.Quantity(qff, order='F').flags['F_CONTIGUOUS']
def test_ndmin(self):
"""Test that ndmin is correctly propagated to np.array"""
a = np.arange(10.)
q1 = u.Quantity(a, u.m, ndmin=1)
assert q1.ndim == 1 and q1.shape == (10,)
q2 = u.Quantity(a, u.m, ndmin=2)
assert q2.ndim == 2 and q2.shape == (1, 10)
# check it works also when passing in a quantity
q3 = u.Quantity(q1, u.m, ndmin=3)
assert q3.ndim == 3 and q3.shape == (1, 1, 10)
# see github issue #10063
assert u.Quantity(u.Quantity(1, 'm'), 'm', ndmin=1).ndim == 1
assert u.Quantity(u.Quantity(1, 'cm'), 'm', ndmin=1).ndim == 1
def test_non_quantity_with_unit(self):
"""Test that unit attributes in objects get recognized."""
class MyQuantityLookalike(np.ndarray):
pass
a = np.arange(3.)
mylookalike = a.copy().view(MyQuantityLookalike)
mylookalike.unit = 'm'
q1 = u.Quantity(mylookalike)
assert isinstance(q1, u.Quantity)
assert q1.unit is u.m
assert np.all(q1.value == a)
q2 = u.Quantity(mylookalike, u.mm)
assert q2.unit is u.mm
assert np.all(q2.value == 1000.*a)
q3 = u.Quantity(mylookalike, copy=False)
assert np.all(q3.value == mylookalike)
q3[2] = 0
assert q3[2] == 0.
assert mylookalike[2] == 0.
mylookalike = a.copy().view(MyQuantityLookalike)
mylookalike.unit = u.m
q4 = u.Quantity(mylookalike, u.mm, copy=False)
q4[2] = 0
assert q4[2] == 0.
assert mylookalike[2] == 2.
mylookalike.unit = 'nonsense'
with pytest.raises(TypeError):
u.Quantity(mylookalike)
def test_creation_via_view(self):
# This works but is no better than 1. * u.m
q1 = 1. << u.m
assert isinstance(q1, u.Quantity)
assert q1.unit == u.m
assert q1.value == 1.
# With an array, we get an actual view.
a2 = np.arange(10.)
q2 = a2 << u.m / u.s
assert isinstance(q2, u.Quantity)
assert q2.unit == u.m / u.s
assert np.all(q2.value == a2)
a2[9] = 0.
assert np.all(q2.value == a2)
# But with a unit change we get a copy.
q3 = q2 << u.mm / u.s
assert isinstance(q3, u.Quantity)
assert q3.unit == u.mm / u.s
assert np.all(q3.value == a2 * 1000.)
a2[8] = 0.
assert q3[8].value == 8000.
# Without a unit change, we do get a view.
q4 = q2 << q2.unit
a2[7] = 0.
assert np.all(q4.value == a2)
with pytest.raises(u.UnitsError):
q2 << u.s
# But one can do an in-place unit change.
a2_copy = a2.copy()
q2 <<= u.mm / u.s
assert q2.unit == u.mm / u.s
# Of course, this changes a2 as well.
assert np.all(q2.value == a2)
# Sanity check on the values.
assert np.all(q2.value == a2_copy * 1000.)
a2[8] = -1.
# Using quantities, one can also work with strings.
q5 = q2 << 'km/hr'
assert q5.unit == u.km / u.hr
assert np.all(q5 == q2)
# Finally, we can use scalar quantities as units.
not_quite_a_foot = 30. * u.cm
a6 = np.arange(5.)
q6 = a6 << not_quite_a_foot
assert q6.unit == u.Unit(not_quite_a_foot)
assert np.all(q6.to_value(u.cm) == 30. * a6)
def test_rshift_warns(self):
with pytest.raises(TypeError), \
pytest.warns(AstropyWarning, match='is not implemented') as warning_lines:
1 >> u.m
assert len(warning_lines) == 1
q = 1. * u.km
with pytest.raises(TypeError), \
pytest.warns(AstropyWarning, match='is not implemented') as warning_lines:
q >> u.m
assert len(warning_lines) == 1
with pytest.raises(TypeError), \
pytest.warns(AstropyWarning, match='is not implemented') as warning_lines:
q >>= u.m
assert len(warning_lines) == 1
with pytest.raises(TypeError), \
pytest.warns(AstropyWarning, match='is not implemented') as warning_lines:
1. >> q
assert len(warning_lines) == 1
class TestQuantityOperations:
q1 = u.Quantity(11.42, u.meter)
q2 = u.Quantity(8.0, u.centimeter)
def test_addition(self):
# Take units from left object, q1
new_quantity = self.q1 + self.q2
assert new_quantity.value == 11.5
assert new_quantity.unit == u.meter
# Take units from left object, q2
new_quantity = self.q2 + self.q1
assert new_quantity.value == 1150.0
assert new_quantity.unit == u.centimeter
new_q = u.Quantity(1500.1, u.m) + u.Quantity(13.5, u.km)
assert new_q.unit == u.m
assert new_q.value == 15000.1
def test_subtraction(self):
# Take units from left object, q1
new_quantity = self.q1 - self.q2
assert new_quantity.value == 11.34
assert new_quantity.unit == u.meter
# Take units from left object, q2
new_quantity = self.q2 - self.q1
assert new_quantity.value == -1134.0
assert new_quantity.unit == u.centimeter
def test_multiplication(self):
# Take units from left object, q1
new_quantity = self.q1 * self.q2
assert new_quantity.value == 91.36
assert new_quantity.unit == (u.meter * u.centimeter)
# Take units from left object, q2
new_quantity = self.q2 * self.q1
assert new_quantity.value == 91.36
assert new_quantity.unit == (u.centimeter * u.meter)
# Multiply with a number
new_quantity = 15. * self.q1
assert new_quantity.value == 171.3
assert new_quantity.unit == u.meter
# Multiply with a number
new_quantity = self.q1 * 15.
assert new_quantity.value == 171.3
assert new_quantity.unit == u.meter
def test_division(self):
# Take units from left object, q1
new_quantity = self.q1 / self.q2
assert_array_almost_equal(new_quantity.value, 1.4275, decimal=5)
assert new_quantity.unit == (u.meter / u.centimeter)
# Take units from left object, q2
new_quantity = self.q2 / self.q1
assert_array_almost_equal(new_quantity.value, 0.70052539404553416,
decimal=16)
assert new_quantity.unit == (u.centimeter / u.meter)
q1 = u.Quantity(11.4, unit=u.meter)
q2 = u.Quantity(10.0, unit=u.second)
new_quantity = q1 / q2
assert_array_almost_equal(new_quantity.value, 1.14, decimal=10)
assert new_quantity.unit == (u.meter / u.second)
# divide with a number
new_quantity = self.q1 / 10.
assert new_quantity.value == 1.142
assert new_quantity.unit == u.meter
# divide with a number
new_quantity = 11.42 / self.q1
assert new_quantity.value == 1.
assert new_quantity.unit == u.Unit("1/m")
def test_commutativity(self):
"""Regression test for issue #587."""
new_q = u.Quantity(11.42, 'm*s')
assert self.q1 * u.s == u.s * self.q1 == new_q
assert self.q1 / u.s == u.Quantity(11.42, 'm/s')
assert u.s / self.q1 == u.Quantity(1 / 11.42, 's/m')
def test_power(self):
# raise quantity to a power
new_quantity = self.q1 ** 2
assert_array_almost_equal(new_quantity.value, 130.4164, decimal=5)
assert new_quantity.unit == u.Unit("m^2")
new_quantity = self.q1 ** 3
assert_array_almost_equal(new_quantity.value, 1489.355288, decimal=7)
assert new_quantity.unit == u.Unit("m^3")
def test_matrix_multiplication(self):
a = np.eye(3)
q = a * u.m
result1 = q @ a
assert np.all(result1 == q)
result2 = a @ q
assert np.all(result2 == q)
result3 = q @ q
assert np.all(result3 == a * u.m ** 2)
# less trivial case.
q2 = np.array([[[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]],
[[0., 1., 0.],
[0., 0., 1.],
[1., 0., 0.]],
[[0., 0., 1.],
[1., 0., 0.],
[0., 1., 0.]]]) / u.s
result4 = q @ q2
assert np.all(result4 == np.matmul(a, q2.value) * q.unit * q2.unit)
def test_unary(self):
# Test the minus unary operator
new_quantity = -self.q1
assert new_quantity.value == -self.q1.value
assert new_quantity.unit == self.q1.unit
new_quantity = -(-self.q1)
assert new_quantity.value == self.q1.value
assert new_quantity.unit == self.q1.unit
# Test the plus unary operator
new_quantity = +self.q1
assert new_quantity.value == self.q1.value
assert new_quantity.unit == self.q1.unit
def test_abs(self):
q = 1. * u.m / u.s
new_quantity = abs(q)
assert new_quantity.value == q.value
assert new_quantity.unit == q.unit
q = -1. * u.m / u.s
new_quantity = abs(q)
assert new_quantity.value == -q.value
assert new_quantity.unit == q.unit
def test_incompatible_units(self):
""" When trying to add or subtract units that aren't compatible, throw an error """
q1 = u.Quantity(11.412, unit=u.meter)
q2 = u.Quantity(21.52, unit=u.second)
with pytest.raises(u.UnitsError):
q1 + q2
def test_non_number_type(self):
q1 = u.Quantity(11.412, unit=u.meter)
with pytest.raises(TypeError) as exc:
q1 + {'a': 1}
assert exc.value.args[0].startswith(
"Unsupported operand type(s) for ufunc add:")
with pytest.raises(TypeError):
q1 + u.meter
def test_dimensionless_operations(self):
# test conversion to dimensionless
dq = 3. * u.m / u.km
dq1 = dq + 1. * u.mm / u.km
assert dq1.value == 3.001
assert dq1.unit == dq.unit
dq2 = dq + 1.
assert dq2.value == 1.003
assert dq2.unit == u.dimensionless_unscaled
# this test will check that operations with dimensionless Quantities
# don't work
with pytest.raises(u.UnitsError):
self.q1 + u.Quantity(0.1, unit=u.Unit(""))
with pytest.raises(u.UnitsError):
self.q1 - u.Quantity(0.1, unit=u.Unit(""))
# and test that scaling of integers works
q = u.Quantity(np.array([1, 2, 3]), u.m / u.km, dtype=int)
q2 = q + np.array([4, 5, 6])
assert q2.unit == u.dimensionless_unscaled
assert_allclose(q2.value, np.array([4.001, 5.002, 6.003]))
# but not if doing it inplace
with pytest.raises(TypeError):
q += np.array([1, 2, 3])
# except if it is actually possible
q = np.array([1, 2, 3]) * u.km / u.m
q += np.array([4, 5, 6])
assert q.unit == u.dimensionless_unscaled
assert np.all(q.value == np.array([1004, 2005, 3006]))
def test_complicated_operation(self):
""" Perform a more complicated test """
from astropy.units import imperial
# Multiple units
distance = u.Quantity(15., u.meter)
time = u.Quantity(11., u.second)
velocity = (distance / time).to(imperial.mile / u.hour)
assert_array_almost_equal(
velocity.value, 3.05037, decimal=5)
G = u.Quantity(6.673E-11, u.m ** 3 / u.kg / u.s ** 2)
_ = ((1. / (4. * np.pi * G)).to(u.pc ** -3 / u.s ** -2 * u.kg))
# Area
side1 = u.Quantity(11., u.centimeter)
side2 = u.Quantity(7., u.centimeter)
area = side1 * side2
assert_array_almost_equal(area.value, 77., decimal=15)
assert area.unit == u.cm * u.cm
def test_comparison(self):
# equality/ non-equality is straightforward for quantity objects
assert (1 / (u.cm * u.cm)) == 1 * u.cm ** -2
assert 1 * u.m == 100 * u.cm
assert 1 * u.m != 1 * u.cm
# when one is a unit, Quantity does not know what to do,
# but unit is fine with it, so it still works
unit = u.cm**3
q = 1. * unit
assert q.__eq__(unit) is NotImplemented
assert unit.__eq__(q) is True
assert q == unit
q = 1000. * u.mm**3
assert q == unit
# mismatched types should never work
assert not 1. * u.cm == 1.
assert 1. * u.cm != 1.
# comparison with zero should raise a deprecation warning
for quantity in (1. * u.cm, 1. * u.dimensionless_unscaled):
with pytest.warns(AstropyDeprecationWarning, match='The truth value of '
'a Quantity is ambiguous. In the future this will '
'raise a ValueError.'):
bool(quantity)
def test_numeric_converters(self):
# float, int, long, and __index__ should only work for single
# quantities, of appropriate type, and only if they are dimensionless.
# for index, this should be unscaled as well
# (Check on __index__ is also a regression test for #1557)
# quantities with units should never convert, or be usable as an index
q1 = u.Quantity(1, u.m)
converter_err_msg = ("only dimensionless scalar quantities "
"can be converted to Python scalars")
index_err_msg = ("only integer dimensionless scalar quantities "
"can be converted to a Python index")
with pytest.raises(TypeError) as exc:
float(q1)
assert exc.value.args[0] == converter_err_msg
with pytest.raises(TypeError) as exc:
int(q1)
assert exc.value.args[0] == converter_err_msg
# We used to test `q1 * ['a', 'b', 'c'] here, but that that worked
# at all was a really odd confluence of bugs. Since it doesn't work
# in numpy >=1.10 any more, just go directly for `__index__` (which
# makes the test more similar to the `int`, `long`, etc., tests).
with pytest.raises(TypeError) as exc:
q1.__index__()
assert exc.value.args[0] == index_err_msg
# dimensionless but scaled is OK, however
q2 = u.Quantity(1.23, u.m / u.km)
assert float(q2) == float(q2.to_value(u.dimensionless_unscaled))
assert int(q2) == int(q2.to_value(u.dimensionless_unscaled))
with pytest.raises(TypeError) as exc:
q2.__index__()
assert exc.value.args[0] == index_err_msg
# dimensionless unscaled is OK, though for index needs to be int
q3 = u.Quantity(1.23, u.dimensionless_unscaled)
assert float(q3) == 1.23
assert int(q3) == 1
with pytest.raises(TypeError) as exc:
q3.__index__()
assert exc.value.args[0] == index_err_msg
# integer dimensionless unscaled is good for all
q4 = u.Quantity(2, u.dimensionless_unscaled, dtype=int)
assert float(q4) == 2.
assert int(q4) == 2
assert q4.__index__() == 2
# but arrays are not OK
q5 = u.Quantity([1, 2], u.m)
with pytest.raises(TypeError) as exc:
float(q5)
assert exc.value.args[0] == converter_err_msg
with pytest.raises(TypeError) as exc:
int(q5)
assert exc.value.args[0] == converter_err_msg
with pytest.raises(TypeError) as exc:
q5.__index__()
assert exc.value.args[0] == index_err_msg
# See https://github.com/numpy/numpy/issues/5074
# It seems unlikely this will be resolved, so xfail'ing it.
@pytest.mark.xfail(reason="list multiplication only works for numpy <=1.10")
def test_numeric_converter_to_index_in_practice(self):
"""Test that use of __index__ actually works."""
q4 = u.Quantity(2, u.dimensionless_unscaled, dtype=int)
assert q4 * ['a', 'b', 'c'] == ['a', 'b', 'c', 'a', 'b', 'c']
def test_array_converters(self):
# Scalar quantity
q = u.Quantity(1.23, u.m)
assert np.all(np.array(q) == np.array([1.23]))
# Array quantity
q = u.Quantity([1., 2., 3.], u.m)
assert np.all(np.array(q) == np.array([1., 2., 3.]))
def test_quantity_conversion():
q1 = u.Quantity(0.1, unit=u.meter)
value = q1.value
assert value == 0.1
value_in_km = q1.to_value(u.kilometer)
assert value_in_km == 0.0001
new_quantity = q1.to(u.kilometer)
assert new_quantity.value == 0.0001
with pytest.raises(u.UnitsError):
q1.to(u.zettastokes)
with pytest.raises(u.UnitsError):
q1.to_value(u.zettastokes)
def test_quantity_value_views():
q1 = u.Quantity([1., 2.], unit=u.meter)
# views if the unit is the same.
v1 = q1.value
v1[0] = 0.
assert np.all(q1 == [0., 2.] * u.meter)
v2 = q1.to_value()
v2[1] = 3.
assert np.all(q1 == [0., 3.] * u.meter)
v3 = q1.to_value('m')
v3[0] = 1.
assert np.all(q1 == [1., 3.] * u.meter)
q2 = q1.to('m', copy=False)
q2[0] = 2 * u.meter
assert np.all(q1 == [2., 3.] * u.meter)
v4 = q1.to_value('cm')
v4[0] = 0.
# copy if different unit.
assert np.all(q1 == [2., 3.] * u.meter)
def test_quantity_conversion_with_equiv():
q1 = u.Quantity(0.1, unit=u.meter)
v2 = q1.to_value(u.Hz, equivalencies=u.spectral())
assert_allclose(v2, 2997924580.0)
q2 = q1.to(u.Hz, equivalencies=u.spectral())
assert_allclose(q2.value, v2)
q1 = u.Quantity(0.4, unit=u.arcsecond)
v2 = q1.to_value(u.au, equivalencies=u.parallax())
q2 = q1.to(u.au, equivalencies=u.parallax())
v3 = q2.to_value(u.arcminute, equivalencies=u.parallax())
q3 = q2.to(u.arcminute, equivalencies=u.parallax())
assert_allclose(v2, 515662.015)
assert_allclose(q2.value, v2)
assert q2.unit == u.au
assert_allclose(v3, 0.0066666667)
assert_allclose(q3.value, v3)
assert q3.unit == u.arcminute
def test_quantity_conversion_equivalency_passed_on():
class MySpectral(u.Quantity):
_equivalencies = u.spectral()
def __quantity_view__(self, obj, unit):
return obj.view(MySpectral)
def __quantity_instance__(self, *args, **kwargs):
return MySpectral(*args, **kwargs)
q1 = MySpectral([1000, 2000], unit=u.Hz)
q2 = q1.to(u.nm)
assert q2.unit == u.nm
q3 = q2.to(u.Hz)
assert q3.unit == u.Hz
assert_allclose(q3.value, q1.value)
q4 = MySpectral([1000, 2000], unit=u.nm)
q5 = q4.to(u.Hz).to(u.nm)
assert q5.unit == u.nm
assert_allclose(q4.value, q5.value)
# Regression test for issue #2315, divide-by-zero error when examining 0*unit
def test_self_equivalency():
assert u.deg.is_equivalent(0*u.radian)
assert u.deg.is_equivalent(1*u.radian)
def test_si():
q1 = 10. * u.m * u.s ** 2 / (200. * u.ms) ** 2 # 250 meters
assert q1.si.value == 250
assert q1.si.unit == u.m
q = 10. * u.m # 10 meters
assert q.si.value == 10
assert q.si.unit == u.m
q = 10. / u.m # 10 1 / meters
assert q.si.value == 10
assert q.si.unit == (1 / u.m)
def test_cgs():
q1 = 10. * u.cm * u.s ** 2 / (200. * u.ms) ** 2 # 250 centimeters
assert q1.cgs.value == 250
assert q1.cgs.unit == u.cm
q = 10. * u.m # 10 centimeters
assert q.cgs.value == 1000
assert q.cgs.unit == u.cm
q = 10. / u.cm # 10 1 / centimeters
assert q.cgs.value == 10
assert q.cgs.unit == (1 / u.cm)
q = 10. * u.Pa # 10 pascals
assert q.cgs.value == 100
assert q.cgs.unit == u.barye
class TestQuantityComparison:
def test_quantity_equality(self):
assert u.Quantity(1000, unit='m') == u.Quantity(1, unit='km')
assert not (u.Quantity(1, unit='m') == u.Quantity(1, unit='km'))
# for ==, !=, return False, True if units do not match
assert (u.Quantity(1100, unit=u.m) != u.Quantity(1, unit=u.s)) is True
assert (u.Quantity(1100, unit=u.m) == u.Quantity(1, unit=u.s)) is False
assert (u.Quantity(0, unit=u.m) == u.Quantity(0, unit=u.s)) is False
# But allow comparison with 0, +/-inf if latter unitless
assert u.Quantity(0, u.m) == 0.
assert u.Quantity(1, u.m) != 0.
assert u.Quantity(1, u.m) != np.inf
assert u.Quantity(np.inf, u.m) == np.inf
def test_quantity_equality_array(self):
a = u.Quantity([0., 1., 1000.], u.m)
b = u.Quantity(1., u.km)
eq = a == b
ne = a != b
assert np.all(eq == [False, False, True])
assert np.all(eq != ne)
# For mismatched units, we should just get True, False
c = u.Quantity(1., u.s)
eq = a == c
ne = a != c
assert eq is False
assert ne is True
# Constants are treated as dimensionless, so False too.
eq = a == 1.
ne = a != 1.
assert eq is False
assert ne is True
# But 0 can have any units, so we can compare.
eq = a == 0
ne = a != 0
assert np.all(eq == [True, False, False])
assert np.all(eq != ne)
# But we do not extend that to arrays; they should have the same unit.
d = np.array([0, 1., 1000.])
eq = a == d
ne = a != d
assert eq is False
assert ne is True
def test_quantity_comparison(self):
assert u.Quantity(1100, unit=u.meter) > u.Quantity(1, unit=u.kilometer)
assert u.Quantity(900, unit=u.meter) < u.Quantity(1, unit=u.kilometer)
with pytest.raises(u.UnitsError):
assert u.Quantity(1100, unit=u.meter) > u.Quantity(1, unit=u.second)
with pytest.raises(u.UnitsError):
assert u.Quantity(1100, unit=u.meter) < u.Quantity(1, unit=u.second)
assert u.Quantity(1100, unit=u.meter) >= u.Quantity(1, unit=u.kilometer)
assert u.Quantity(1000, unit=u.meter) >= u.Quantity(1, unit=u.kilometer)
assert u.Quantity(900, unit=u.meter) <= u.Quantity(1, unit=u.kilometer)
assert u.Quantity(1000, unit=u.meter) <= u.Quantity(1, unit=u.kilometer)
with pytest.raises(u.UnitsError):
assert u.Quantity(
1100, unit=u.meter) >= u.Quantity(1, unit=u.second)
with pytest.raises(u.UnitsError):
assert u.Quantity(1100, unit=u.meter) <= u.Quantity(1, unit=u.second)
assert u.Quantity(1200, unit=u.meter) != u.Quantity(1, unit=u.kilometer)
class TestQuantityDisplay:
scalarintq = u.Quantity(1, unit='m', dtype=int)
scalarfloatq = u.Quantity(1.3, unit='m')
arrq = u.Quantity([1, 2.3, 8.9], unit='m')
scalar_complex_q = u.Quantity(complex(1.0, 2.0))
scalar_big_complex_q = u.Quantity(complex(1.0, 2.0e27) * 1e25)
scalar_big_neg_complex_q = u.Quantity(complex(-1.0, -2.0e27) * 1e36)
arr_complex_q = u.Quantity(np.arange(3) * (complex(-1.0, -2.0e27) * 1e36))
big_arr_complex_q = u.Quantity(np.arange(125) * (complex(-1.0, -2.0e27) * 1e36))
def test_dimensionless_quantity_repr(self):
q2 = u.Quantity(1., unit='m-1')
q3 = u.Quantity(1, unit='m-1', dtype=int)
assert repr(self.scalarintq * q2) == "<Quantity 1.>"
assert repr(self.arrq * q2) == "<Quantity [1. , 2.3, 8.9]>"
assert repr(self.scalarintq * q3) == "<Quantity 1>"
def test_dimensionless_quantity_str(self):
q2 = u.Quantity(1., unit='m-1')
q3 = u.Quantity(1, unit='m-1', dtype=int)
assert str(self.scalarintq * q2) == "1.0"
assert str(self.scalarintq * q3) == "1"
assert str(self.arrq * q2) == "[1. 2.3 8.9]"
def test_dimensionless_quantity_format(self):
q1 = u.Quantity(3.14)
assert format(q1, '.2f') == '3.14'
def test_scalar_quantity_str(self):
assert str(self.scalarintq) == "1 m"
assert str(self.scalarfloatq) == "1.3 m"
def test_scalar_quantity_repr(self):
assert repr(self.scalarintq) == "<Quantity 1 m>"
assert repr(self.scalarfloatq) == "<Quantity 1.3 m>"
def test_array_quantity_str(self):
assert str(self.arrq) == "[1. 2.3 8.9] m"
def test_array_quantity_repr(self):
assert repr(self.arrq) == "<Quantity [1. , 2.3, 8.9] m>"
def test_scalar_quantity_format(self):
assert format(self.scalarintq, '02d') == "01 m"
assert format(self.scalarfloatq, '.1f') == "1.3 m"
assert format(self.scalarfloatq, '.0f') == "1 m"
def test_uninitialized_unit_format(self):
bad_quantity = np.arange(10.).view(u.Quantity)
assert str(bad_quantity).endswith(_UNIT_NOT_INITIALISED)
assert repr(bad_quantity).endswith(_UNIT_NOT_INITIALISED + '>')
def test_to_string(self):
qscalar = u.Quantity(1.5e14, 'm/s')
# __str__ is the default `format`
assert str(qscalar) == qscalar.to_string()
res = 'Quantity as KMS: 150000000000.0 km / s'
assert f"Quantity as KMS: {qscalar.to_string(unit=u.km / u.s)}" == res
# With precision set
res = 'Quantity as KMS: 1.500e+11 km / s'
assert f"Quantity as KMS: {qscalar.to_string(precision=3, unit=u.km / u.s)}" == res
res = r'$1.5 \times 10^{14} \; \mathrm{\frac{m}{s}}$'
assert qscalar.to_string(format="latex") == res
assert qscalar.to_string(format="latex", subfmt="inline") == res
res = r'$\displaystyle 1.5 \times 10^{14} \; \mathrm{\frac{m}{s}}$'
assert qscalar.to_string(format="latex", subfmt="display") == res
res = r'$1.5 \times 10^{14} \; \mathrm{m\,s^{-1}}$'
assert qscalar.to_string(format="latex_inline") == res
assert qscalar.to_string(format="latex_inline", subfmt="inline") == res
res = r'$\displaystyle 1.5 \times 10^{14} \; \mathrm{m\,s^{-1}}$'
assert qscalar.to_string(format="latex_inline", subfmt="display") == res
res = '[0 1 2] (Unit not initialised)'
assert np.arange(3).view(u.Quantity).to_string() == res
def test_repr_latex(self):
from astropy.units.quantity import conf
q2scalar = u.Quantity(1.5e14, 'm/s')
assert self.scalarintq._repr_latex_() == r'$1 \; \mathrm{m}$'
assert self.scalarfloatq._repr_latex_() == r'$1.3 \; \mathrm{m}$'
assert (q2scalar._repr_latex_() ==
r'$1.5 \times 10^{14} \; \mathrm{\frac{m}{s}}$')
assert self.arrq._repr_latex_() == r'$[1,~2.3,~8.9] \; \mathrm{m}$'
# Complex quantities
assert self.scalar_complex_q._repr_latex_() == r'$(1+2i) \; \mathrm{}$'
assert (self.scalar_big_complex_q._repr_latex_() ==
r'$(1 \times 10^{25}+2 \times 10^{52}i) \; \mathrm{}$')
assert (self.scalar_big_neg_complex_q._repr_latex_() ==
r'$(-1 \times 10^{36}-2 \times 10^{63}i) \; \mathrm{}$')
assert (self.arr_complex_q._repr_latex_() ==
(r'$[(0-0i),~(-1 \times 10^{36}-2 \times 10^{63}i),'
r'~(-2 \times 10^{36}-4 \times 10^{63}i)] \; \mathrm{}$'))
assert r'\dots' in self.big_arr_complex_q._repr_latex_()
qmed = np.arange(100)*u.m
qbig = np.arange(1000)*u.m
qvbig = np.arange(10000)*1e9*u.m
pops = np.get_printoptions()
oldlat = conf.latex_array_threshold
try:
# check precision behavior
q = u.Quantity(987654321.123456789, 'm/s')
qa = np.array([7.89123, 123456789.987654321, 0]) * u.cm
np.set_printoptions(precision=8)
assert q._repr_latex_() == r'$9.8765432 \times 10^{8} \; \mathrm{\frac{m}{s}}$'
assert qa._repr_latex_() == r'$[7.89123,~1.2345679 \times 10^{8},~0] \; \mathrm{cm}$'
np.set_printoptions(precision=2)
assert q._repr_latex_() == r'$9.9 \times 10^{8} \; \mathrm{\frac{m}{s}}$'
assert qa._repr_latex_() == r'$[7.9,~1.2 \times 10^{8},~0] \; \mathrm{cm}$'
# check thresholding behavior
conf.latex_array_threshold = 100 # should be default
lsmed = qmed._repr_latex_()
assert r'\dots' not in lsmed
lsbig = qbig._repr_latex_()
assert r'\dots' in lsbig
lsvbig = qvbig._repr_latex_()
assert r'\dots' in lsvbig
conf.latex_array_threshold = 1001
lsmed = qmed._repr_latex_()
assert r'\dots' not in lsmed
lsbig = qbig._repr_latex_()
assert r'\dots' not in lsbig
lsvbig = qvbig._repr_latex_()
assert r'\dots' in lsvbig
conf.latex_array_threshold = -1 # means use the numpy threshold
np.set_printoptions(threshold=99)
lsmed = qmed._repr_latex_()
assert r'\dots' in lsmed
lsbig = qbig._repr_latex_()
assert r'\dots' in lsbig
lsvbig = qvbig._repr_latex_()
assert r'\dots' in lsvbig
assert lsvbig.endswith(',~1 \\times 10^{13}] \\; \\mathrm{m}$')
finally:
# prevent side-effects from influencing other tests
np.set_printoptions(**pops)
conf.latex_array_threshold = oldlat
qinfnan = [np.inf, -np.inf, np.nan] * u.m
assert qinfnan._repr_latex_() == r'$[\infty,~-\infty,~{\rm NaN}] \; \mathrm{m}$'
def test_decompose():
q1 = 5 * u.N
assert q1.decompose() == (5 * u.kg * u.m * u.s ** -2)
def test_decompose_regression():
"""
Regression test for bug #1163
If decompose was called multiple times on a Quantity with an array and a
scale != 1, the result changed every time. This is because the value was
being referenced not copied, then modified, which changed the original
value.
"""
q = np.array([1, 2, 3]) * u.m / (2. * u.km)
assert np.all(q.decompose().value == np.array([0.0005, 0.001, 0.0015]))
assert np.all(q == np.array([1, 2, 3]) * u.m / (2. * u.km))
assert np.all(q.decompose().value == np.array([0.0005, 0.001, 0.0015]))
def test_arrays():
"""
Test using quantites with array values
"""
qsec = u.Quantity(np.arange(10), u.second)
assert isinstance(qsec.value, np.ndarray)
assert not qsec.isscalar
# len and indexing should work for arrays
assert len(qsec) == len(qsec.value)
qsecsub25 = qsec[2:5]
assert qsecsub25.unit == qsec.unit
assert isinstance(qsecsub25, u.Quantity)
assert len(qsecsub25) == 3
# make sure isscalar, len, and indexing behave correctly for non-arrays.
qsecnotarray = u.Quantity(10., u.second)
assert qsecnotarray.isscalar
with pytest.raises(TypeError):
len(qsecnotarray)
with pytest.raises(TypeError):
qsecnotarray[0]
qseclen0array = u.Quantity(np.array(10), u.second, dtype=int)
# 0d numpy array should act basically like a scalar
assert qseclen0array.isscalar
with pytest.raises(TypeError):
len(qseclen0array)
with pytest.raises(TypeError):
qseclen0array[0]
assert isinstance(qseclen0array.value, numbers.Integral)
a = np.array([(1., 2., 3.), (4., 5., 6.), (7., 8., 9.)],
dtype=[('x', float),
('y', float),
('z', float)])
qkpc = u.Quantity(a, u.kpc)
assert not qkpc.isscalar
qkpc0 = qkpc[0]
assert qkpc0.value == a[0]
assert qkpc0.unit == qkpc.unit
assert isinstance(qkpc0, u.Quantity)
assert qkpc0.isscalar
qkpcx = qkpc['x']
assert np.all(qkpcx.value == a['x'])
assert qkpcx.unit == qkpc.unit
assert isinstance(qkpcx, u.Quantity)
assert not qkpcx.isscalar
qkpcx1 = qkpc['x'][1]
assert qkpcx1.unit == qkpc.unit
assert isinstance(qkpcx1, u.Quantity)
assert qkpcx1.isscalar
qkpc1x = qkpc[1]['x']
assert qkpc1x.isscalar
assert qkpc1x == qkpcx1
# can also create from lists, will auto-convert to arrays
qsec = u.Quantity(list(range(10)), u.second)
assert isinstance(qsec.value, np.ndarray)
# quantity math should work with arrays
assert_array_equal((qsec * 2).value, (np.arange(10) * 2))
assert_array_equal((qsec / 2).value, (np.arange(10) / 2))
# quantity addition/subtraction should *not* work with arrays b/c unit
# ambiguous
with pytest.raises(u.UnitsError):
assert_array_equal((qsec + 2).value, (np.arange(10) + 2))
with pytest.raises(u.UnitsError):
assert_array_equal((qsec - 2).value, (np.arange(10) + 2))
# should create by unit multiplication, too
qsec2 = np.arange(10) * u.second
qsec3 = u.second * np.arange(10)
assert np.all(qsec == qsec2)
assert np.all(qsec2 == qsec3)
# make sure numerical-converters fail when arrays are present
with pytest.raises(TypeError):
float(qsec)
with pytest.raises(TypeError):
int(qsec)
def test_array_indexing_slicing():
q = np.array([1., 2., 3.]) * u.m
assert q[0] == 1. * u.m
assert np.all(q[0:2] == u.Quantity([1., 2.], u.m))
def test_array_setslice():
q = np.array([1., 2., 3.]) * u.m
q[1:2] = np.array([400.]) * u.cm
assert np.all(q == np.array([1., 4., 3.]) * u.m)
def test_inverse_quantity():
"""
Regression test from issue #679
"""
q = u.Quantity(4., u.meter / u.second)
qot = q / 2
toq = 2 / q
npqot = q / np.array(2)
assert npqot.value == 2.0
assert npqot.unit == (u.meter / u.second)
assert qot.value == 2.0
assert qot.unit == (u.meter / u.second)
assert toq.value == 0.5
assert toq.unit == (u.second / u.meter)
def test_quantity_mutability():
q = u.Quantity(9.8, u.meter / u.second / u.second)
with pytest.raises(AttributeError):
q.value = 3
with pytest.raises(AttributeError):
q.unit = u.kg
def test_quantity_initialized_with_quantity():
q1 = u.Quantity(60, u.second)
q2 = u.Quantity(q1, u.minute)
assert q2.value == 1
q3 = u.Quantity([q1, q2], u.second)
assert q3[0].value == 60
assert q3[1].value == 60
q4 = u.Quantity([q2, q1])
assert q4.unit == q2.unit
assert q4[0].value == 1
assert q4[1].value == 1
def test_quantity_string_unit():
q1 = 1. * u.m / 's'
assert q1.value == 1
assert q1.unit == (u.m / u.s)
q2 = q1 * "m"
assert q2.unit == ((u.m * u.m) / u.s)
def test_quantity_invalid_unit_string():
with pytest.raises(ValueError):
"foo" * u.m
def test_implicit_conversion():
q = u.Quantity(1.0, u.meter)
# Manually turn this on to simulate what might happen in a subclass
q._include_easy_conversion_members = True
assert_allclose(q.centimeter, 100)
assert_allclose(q.cm, 100)
assert_allclose(q.parsec, 3.240779289469756e-17)
def test_implicit_conversion_autocomplete():
q = u.Quantity(1.0, u.meter)
# Manually turn this on to simulate what might happen in a subclass
q._include_easy_conversion_members = True
q.foo = 42
attrs = dir(q)
assert 'centimeter' in attrs
assert 'cm' in attrs
assert 'parsec' in attrs
assert 'foo' in attrs
assert 'to' in attrs
assert 'value' in attrs
# Something from the base class, object
assert '__setattr__' in attrs
with pytest.raises(AttributeError):
q.l
def test_quantity_iterability():
"""Regressiont est for issue #878.
Scalar quantities should not be iterable and should raise a type error on
iteration.
"""
q1 = [15.0, 17.0] * u.m
assert isiterable(q1)
q2 = next(iter(q1))
assert q2 == 15.0 * u.m
assert not isiterable(q2)
pytest.raises(TypeError, iter, q2)
def test_copy():
q1 = u.Quantity(np.array([[1., 2., 3.], [4., 5., 6.]]), unit=u.m)
q2 = q1.copy()
assert np.all(q1.value == q2.value)
assert q1.unit == q2.unit
assert q1.dtype == q2.dtype
assert q1.value is not q2.value
q3 = q1.copy(order='F')
assert q3.flags['F_CONTIGUOUS']
assert np.all(q1.value == q3.value)
assert q1.unit == q3.unit
assert q1.dtype == q3.dtype
assert q1.value is not q3.value
q4 = q1.copy(order='C')
assert q4.flags['C_CONTIGUOUS']
assert np.all(q1.value == q4.value)
assert q1.unit == q4.unit
assert q1.dtype == q4.dtype
assert q1.value is not q4.value
def test_deepcopy():
q1 = u.Quantity(np.array([1., 2., 3.]), unit=u.m)
q2 = copy.deepcopy(q1)
assert isinstance(q2, u.Quantity)
assert np.all(q1.value == q2.value)
assert q1.unit == q2.unit
assert q1.dtype == q2.dtype
assert q1.value is not q2.value
def test_equality_numpy_scalar():
"""
A regression test to ensure that numpy scalars are correctly compared
(which originally failed due to the lack of ``__array_priority__``).
"""
assert 10 != 10. * u.m
assert np.int64(10) != 10 * u.m
assert 10 * u.m != np.int64(10)
def test_quantity_pickelability():
"""
Testing pickleability of quantity
"""
q1 = np.arange(10) * u.m
q2 = pickle.loads(pickle.dumps(q1))
assert np.all(q1.value == q2.value)
assert q1.unit.is_equivalent(q2.unit)
assert q1.unit == q2.unit
def test_quantity_initialisation_from_string():
q = u.Quantity('1')
assert q.unit == u.dimensionless_unscaled
assert q.value == 1.
q = u.Quantity('1.5 m/s')
assert q.unit == u.m/u.s
assert q.value == 1.5
assert u.Unit(q) == u.Unit('1.5 m/s')
q = u.Quantity('.5 m')
assert q == u.Quantity(0.5, u.m)
q = u.Quantity('-1e1km')
assert q == u.Quantity(-10, u.km)
q = u.Quantity('-1e+1km')
assert q == u.Quantity(-10, u.km)
q = u.Quantity('+.5km')
assert q == u.Quantity(.5, u.km)
q = u.Quantity('+5e-1km')
assert q == u.Quantity(.5, u.km)
q = u.Quantity('5', u.m)
assert q == u.Quantity(5., u.m)
q = u.Quantity('5 km', u.m)
assert q.value == 5000.
assert q.unit == u.m
q = u.Quantity('5Em')
assert q == u.Quantity(5., u.Em)
with pytest.raises(TypeError):
u.Quantity('')
with pytest.raises(TypeError):
u.Quantity('m')
with pytest.raises(TypeError):
u.Quantity('1.2.3 deg')
with pytest.raises(TypeError):
u.Quantity('1+deg')
with pytest.raises(TypeError):
u.Quantity('1-2deg')
with pytest.raises(TypeError):
u.Quantity('1.2e-13.3m')
with pytest.raises(TypeError):
u.Quantity(['5'])
with pytest.raises(TypeError):
u.Quantity(np.array(['5']))
with pytest.raises(ValueError):
u.Quantity('5E')
with pytest.raises(ValueError):
u.Quantity('5 foo')
def test_unsupported():
q1 = np.arange(10) * u.m
with pytest.raises(TypeError):
np.bitwise_and(q1, q1)
def test_unit_identity():
q = 1.0 * u.hour
assert q.unit is u.hour
def test_quantity_to_view():
q1 = np.array([1000, 2000]) * u.m
q2 = q1.to(u.km)
assert q1.value[0] == 1000
assert q2.value[0] == 1
def test_quantity_tuple_power():
with pytest.raises(ValueError):
(5.0 * u.m) ** (1, 2)
def test_quantity_fraction_power():
q = (25.0 * u.m**2) ** Fraction(1, 2)
assert q.value == 5.
assert q.unit == u.m
# Regression check to ensure we didn't create an object type by raising
# the value of the quantity to a Fraction. [#3922]
assert q.dtype.kind == 'f'
def test_quantity_from_table():
"""
Checks that units from tables are respected when converted to a Quantity.
This also generically checks the use of *anything* with a `unit` attribute
passed into Quantity
"""
from astropy.table import Table
t = Table(data=[np.arange(5), np.arange(5)], names=['a', 'b'])
t['a'].unit = u.kpc
qa = u.Quantity(t['a'])
assert qa.unit == u.kpc
assert_array_equal(qa.value, t['a'])
qb = u.Quantity(t['b'])
assert qb.unit == u.dimensionless_unscaled
assert_array_equal(qb.value, t['b'])
# This does *not* auto-convert, because it's not necessarily obvious that's
# desired. Instead we revert to standard `Quantity` behavior
qap = u.Quantity(t['a'], u.pc)
assert qap.unit == u.pc
assert_array_equal(qap.value, t['a'] * 1000)
qbp = u.Quantity(t['b'], u.pc)
assert qbp.unit == u.pc
assert_array_equal(qbp.value, t['b'])
# Also check with a function unit (regression test for gh-8430)
t['a'].unit = u.dex(u.cm/u.s**2)
fq = u.Dex(t['a'])
assert fq.unit == u.dex(u.cm/u.s**2)
assert_array_equal(fq.value, t['a'])
fq2 = u.Quantity(t['a'], subok=True)
assert isinstance(fq2, u.Dex)
assert fq2.unit == u.dex(u.cm/u.s**2)
assert_array_equal(fq2.value, t['a'])
with pytest.raises(u.UnitTypeError):
u.Quantity(t['a'])
def test_assign_slice_with_quantity_like():
# Regression tests for gh-5961
from astropy.table import Column, Table
# first check directly that we can use a Column to assign to a slice.
c = Column(np.arange(10.), unit=u.mm)
q = u.Quantity(c)
q[:2] = c[:2]
# next check that we do not fail the original problem.
t = Table()
t['x'] = np.arange(10) * u.mm
t['y'] = np.ones(10) * u.mm
assert type(t['x']) is Column
xy = np.vstack([t['x'], t['y']]).T * u.mm
ii = [0, 2, 4]
assert xy[ii, 0].unit == t['x'][ii].unit
# should not raise anything
xy[ii, 0] = t['x'][ii]
def test_insert():
"""
Test Quantity.insert method. This does not test the full capabilities
of the underlying np.insert, but hits the key functionality for
Quantity.
"""
q = [1, 2] * u.m
# Insert a compatible float with different units
q2 = q.insert(0, 1 * u.km)
assert np.all(q2.value == [1000, 1, 2])
assert q2.unit is u.m
assert q2.dtype.kind == 'f'
if minversion(np, '1.8.0'):
q2 = q.insert(1, [1, 2] * u.km)
assert np.all(q2.value == [1, 1000, 2000, 2])
assert q2.unit is u.m
# Cannot convert 1.5 * u.s to m
with pytest.raises(u.UnitsError):
q.insert(1, 1.5 * u.s)
# Tests with multi-dim quantity
q = [[1, 2], [3, 4]] * u.m
q2 = q.insert(1, [10, 20] * u.m, axis=0)
assert np.all(q2.value == [[1, 2],
[10, 20],
[3, 4]])
q2 = q.insert(1, [10, 20] * u.m, axis=1)
assert np.all(q2.value == [[1, 10, 2],
[3, 20, 4]])
q2 = q.insert(1, 10 * u.m, axis=1)
assert np.all(q2.value == [[1, 10, 2],
[3, 10, 4]])
def test_repr_array_of_quantity():
"""
Test print/repr of object arrays of Quantity objects with different
units.
Regression test for the issue first reported in
https://github.com/astropy/astropy/issues/3777
"""
a = np.array([1 * u.m, 2 * u.s], dtype=object)
assert repr(a) == 'array([<Quantity 1. m>, <Quantity 2. s>], dtype=object)'
assert str(a) == '[<Quantity 1. m> <Quantity 2. s>]'
class TestSpecificTypeQuantity:
def setup(self):
class Length(u.SpecificTypeQuantity):
_equivalent_unit = u.m
class Length2(Length):
_default_unit = u.m
class Length3(Length):
_unit = u.m
self.Length = Length
self.Length2 = Length2
self.Length3 = Length3
def test_creation(self):
l = self.Length(np.arange(10.)*u.km)
assert type(l) is self.Length
with pytest.raises(u.UnitTypeError):
self.Length(np.arange(10.) * u.hour)
with pytest.raises(u.UnitTypeError):
self.Length(np.arange(10.))
l2 = self.Length2(np.arange(5.))
assert type(l2) is self.Length2
assert l2._default_unit is self.Length2._default_unit
with pytest.raises(u.UnitTypeError):
self.Length3(np.arange(10.))
def test_view(self):
l = (np.arange(5.) * u.km).view(self.Length)
assert type(l) is self.Length
with pytest.raises(u.UnitTypeError):
(np.arange(5.) * u.s).view(self.Length)
v = np.arange(5.).view(self.Length)
assert type(v) is self.Length
assert v._unit is None
l3 = np.ones((2, 2)).view(self.Length3)
assert type(l3) is self.Length3
assert l3.unit is self.Length3._unit
def test_operation_precedence_and_fallback(self):
l = self.Length(np.arange(5.)*u.cm)
sum1 = l + 1.*u.m
assert type(sum1) is self.Length
sum2 = 1.*u.km + l
assert type(sum2) is self.Length
sum3 = l + l
assert type(sum3) is self.Length
res1 = l * (1.*u.m)
assert type(res1) is u.Quantity
res2 = l * l
assert type(res2) is u.Quantity
def test_unit_class_override():
class MyQuantity(u.Quantity):
pass
my_unit = u.Unit("my_deg", u.deg)
my_unit._quantity_class = MyQuantity
q1 = u.Quantity(1., my_unit)
assert type(q1) is u.Quantity
q2 = u.Quantity(1., my_unit, subok=True)
assert type(q2) is MyQuantity
class QuantityMimic:
def __init__(self, value, unit):
self.value = value
self.unit = unit
def __array__(self):
return np.array(self.value)
class QuantityMimic2(QuantityMimic):
def to(self, unit):
return u.Quantity(self.value, self.unit).to(unit)
def to_value(self, unit):
return u.Quantity(self.value, self.unit).to_value(unit)
class TestQuantityMimics:
"""Test Quantity Mimics that are not ndarray subclasses."""
@pytest.mark.parametrize('Mimic', (QuantityMimic, QuantityMimic2))
def test_mimic_input(self, Mimic):
value = np.arange(10.)
mimic = Mimic(value, u.m)
q = u.Quantity(mimic)
assert q.unit == u.m
assert np.all(q.value == value)
q2 = u.Quantity(mimic, u.cm)
assert q2.unit == u.cm
assert np.all(q2.value == 100 * value)
@pytest.mark.parametrize('Mimic', (QuantityMimic, QuantityMimic2))
def test_mimic_setting(self, Mimic):
mimic = Mimic([1., 2.], u.m)
q = u.Quantity(np.arange(10.), u.cm)
q[8:] = mimic
assert np.all(q[:8].value == np.arange(8.))
assert np.all(q[8:].value == [100., 200.])
def test_mimic_function_unit(self):
mimic = QuantityMimic([1., 2.], u.dex(u.cm/u.s**2))
d = u.Dex(mimic)
assert isinstance(d, u.Dex)
assert d.unit == u.dex(u.cm/u.s**2)
assert np.all(d.value == [1., 2.])
q = u.Quantity(mimic, subok=True)
assert isinstance(q, u.Dex)
assert q.unit == u.dex(u.cm/u.s**2)
assert np.all(q.value == [1., 2.])
with pytest.raises(u.UnitTypeError):
u.Quantity(mimic)
def test_masked_quantity_str_repr():
"""Ensure we don't break masked Quantity representation."""
# Really, masked quantities do not work well, but at least let the
# basics work.
masked_quantity = np.ma.array([1, 2, 3, 4] * u.kg,
mask=[True, False, True, False])
str(masked_quantity)
repr(masked_quantity)
class TestQuantitySubclassAboveAndBelow:
@classmethod
def setup_class(self):
class MyArray(np.ndarray):
def __array_finalize__(self, obj):
super_array_finalize = super().__array_finalize__
if super_array_finalize is not None:
super_array_finalize(obj)
if hasattr(obj, 'my_attr'):
self.my_attr = obj.my_attr
self.MyArray = MyArray
self.MyQuantity1 = type('MyQuantity1', (u.Quantity, MyArray),
dict(my_attr='1'))
self.MyQuantity2 = type('MyQuantity2', (MyArray, u.Quantity),
dict(my_attr='2'))
def test_setup(self):
mq1 = self.MyQuantity1(10, u.m)
assert isinstance(mq1, self.MyQuantity1)
assert mq1.my_attr == '1'
assert mq1.unit is u.m
mq2 = self.MyQuantity2(10, u.m)
assert isinstance(mq2, self.MyQuantity2)
assert mq2.my_attr == '2'
assert mq2.unit is u.m
def test_attr_propagation(self):
mq1 = self.MyQuantity1(10, u.m)
mq12 = self.MyQuantity2(mq1)
assert isinstance(mq12, self.MyQuantity2)
assert not isinstance(mq12, self.MyQuantity1)
assert mq12.my_attr == '1'
assert mq12.unit is u.m
mq2 = self.MyQuantity2(10, u.m)
mq21 = self.MyQuantity1(mq2)
assert isinstance(mq21, self.MyQuantity1)
assert not isinstance(mq21, self.MyQuantity2)
assert mq21.my_attr == '2'
assert mq21.unit is u.m
|
f936d0f4957d7f59d9f61d2b199956fb54cf8dd5115da5796c6e70d7396af160 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Test Structured units and quantities specifically with the ERFA ufuncs.
"""
import erfa
import numpy as np
import pytest
from erfa import ufunc as erfa_ufunc
from numpy.testing import assert_array_equal
from astropy import units as u
from astropy.tests.helper import assert_quantity_allclose
from astropy.utils.introspection import minversion
ERFA_LE_2_0_0 = not minversion(erfa, '2.0.0.1')
class TestPVUfuncs:
def setup_class(self):
self.pv_unit = u.Unit('AU,AU/day')
self.pv_value = np.array([([1., 0., 0.], [0., 0.0125, 0.]),
([0., 1., 0.], [-.0125, 0., 0.])],
dtype=erfa_ufunc.dt_pv)
self.pv = self.pv_value << self.pv_unit
def test_cpv(self):
pv_copy = erfa_ufunc.cpv(self.pv)
assert_array_equal(pv_copy, self.pv)
assert not np.may_share_memory(pv_copy, self.pv)
def test_p2pv(self):
p2pv = erfa_ufunc.p2pv(self.pv['p'])
assert_array_equal(p2pv['p'], self.pv['p'])
assert_array_equal(p2pv['v'], np.zeros(self.pv.shape+(3,), float) << u.m/u.s)
@pytest.mark.xfail(erfa.__version__ <= '2.0.0',
reason='erfa bug; https://github.com/liberfa/pyerfa/issues/70)')
def test_p2pv_inplace(self):
# TODO: fix np.zeros_like.
out = np.zeros_like(self.pv_value) << self.pv_unit
p2pv = erfa_ufunc.p2pv(self.pv['p'], out=out)
assert out is p2pv
assert_array_equal(p2pv['p'], self.pv['p'])
assert_array_equal(p2pv['v'], np.zeros(self.pv.shape+(3,), float) << u.m/u.s)
def test_pv2p(self):
p = erfa_ufunc.pv2p(self.pv)
assert_array_equal(p, self.pv['p'])
out = np.zeros_like(p)
p2 = erfa_ufunc.pv2p(self.pv, out=out)
assert out is p2
assert_array_equal(p2, self.pv['p'])
def test_pv2s(self):
theta, phi, r, td, pd, rd = erfa_ufunc.pv2s(self.pv)
assert theta.unit == u.radian
assert_quantity_allclose(theta, [0, 90] * u.deg) # longitude
assert phi.unit == u.radian
assert_array_equal(phi.value, np.zeros(self.pv.shape)) # latitude
assert r.unit == u.AU
assert_array_equal(r.value, np.ones(self.pv.shape))
assert td.unit == u.radian/u.day
assert_array_equal(td.value, np.array([0.0125]*2))
assert pd.unit == u.radian/u.day
assert_array_equal(pd.value, np.zeros(self.pv.shape))
assert rd.unit == u.AU/u.day
assert_array_equal(rd.value, np.zeros(self.pv.shape))
def test_pv2s_non_standard_units(self):
pv = self.pv_value << u.Unit('Pa,Pa/m')
theta, phi, r, td, pd, rd = erfa_ufunc.pv2s(pv)
assert theta.unit == u.radian
assert_quantity_allclose(theta, [0, 90] * u.deg) # longitude
assert phi.unit == u.radian
assert_array_equal(phi.value, np.zeros(pv.shape)) # latitude
assert r.unit == u.Pa
assert_array_equal(r.value, np.ones(pv.shape))
assert td.unit == u.radian/u.m
assert_array_equal(td.value, np.array([0.0125]*2))
assert pd.unit == u.radian/u.m
assert_array_equal(pd.value, np.zeros(pv.shape))
assert rd.unit == u.Pa/u.m
assert_array_equal(rd.value, np.zeros(pv.shape))
@pytest.mark.xfail(reason=(
'erfa ufuncs cannot take different names; it is not yet clear whether '
'this is changeable; see https://github.com/liberfa/pyerfa/issues/77'))
def test_pv2s_non_standard_names_and_units(self):
pv_value = np.array(self.pv_value, dtype=[('pos', 'f8'), ('vel', 'f8')])
pv = pv_value << u.Unit('Pa,Pa/m')
theta, phi, r, td, pd, rd = erfa_ufunc.pv2s(pv)
assert theta.unit == u.radian
assert_quantity_allclose(theta, [0, 90] * u.deg) # longitude
assert phi.unit == u.radian
assert_array_equal(phi.value, np.zeros(pv.shape)) # latitude
assert r.unit == u.Pa
assert_array_equal(r.value, np.ones(pv.shape))
assert td.unit == u.radian/u.m
assert_array_equal(td.value, np.array([0.0125]*2))
assert pd.unit == u.radian/u.m
assert_array_equal(pd.value, np.zeros(pv.shape))
assert rd.unit == u.Pa/u.m
assert_array_equal(rd.value, np.zeros(pv.shape))
def test_s2pv(self):
theta, phi, r, td, pd, rd = erfa_ufunc.pv2s(self.pv)
# On purpose change some of the units away from expected by s2pv.
pv = erfa_ufunc.s2pv(theta.to(u.deg), phi, r.to(u.m),
td.to(u.deg/u.day), pd, rd.to(u.m/u.s))
assert pv.unit == u.StructuredUnit('m, m/s', names=('p', 'v'))
assert_quantity_allclose(pv['p'], self.pv['p'], atol=1*u.m, rtol=0)
assert_quantity_allclose(pv['v'], self.pv['v'], atol=1*u.mm/u.s, rtol=0)
def test_pvstar(self):
ra, dec, pmr, pmd, px, rv, stat = erfa_ufunc.pvstar(self.pv)
assert_array_equal(stat, np.zeros(self.pv.shape, dtype='i4'))
assert ra.unit == u.radian
assert_quantity_allclose(ra, [0, 90] * u.deg)
assert dec.unit == u.radian
assert_array_equal(dec.value, np.zeros(self.pv.shape)) # latitude
assert pmr.unit == u.radian/u.year
assert_quantity_allclose(pmr, [0.0125, 0.0125]*u.radian/u.day)
assert pmd.unit == u.radian/u.year
assert_array_equal(pmd.value, np.zeros(self.pv.shape))
assert px.unit == u.arcsec
assert_quantity_allclose(px, 1*u.radian)
assert rv.unit == u.km / u.s
assert_array_equal(rv.value, np.zeros(self.pv.shape))
def test_starpv(self):
ra, dec, pmr, pmd, px, rv, stat = erfa_ufunc.pvstar(self.pv)
pv, stat = erfa_ufunc.starpv(ra.to(u.deg), dec.to(u.deg), pmr, pmd,
px, rv.to(u.m/u.s))
assert_array_equal(stat, np.zeros(self.pv.shape, dtype='i4'))
assert pv.unit == self.pv.unit
# Roundtrip is not as good as hoped on 32bit, not clear why.
# But proper motions are ridiculously high...
assert_quantity_allclose(pv['p'], self.pv['p'], atol=1*u.m, rtol=0)
assert_quantity_allclose(pv['v'], self.pv['v'], atol=1*u.m/u.s, rtol=0)
def test_pvtob(self):
pv = erfa_ufunc.pvtob([90, 0]*u.deg, 0.*u.deg, 100*u.km,
0*u.deg, 0*u.deg, 0*u.deg, 90*u.deg)
assert pv.unit == u.StructuredUnit('m, m/s', names=('p', 'v'))
assert pv.unit['v'] == u.m / u.s
assert_quantity_allclose(pv['p'], [[-6478, 0, 0], [0, 6478, 0]]*u.km,
atol=2*u.km)
assert_quantity_allclose(pv['v'], [[0, -0.5, 0], [-0.5, 0, 0]]*u.km/u.s,
atol=0.1*u.km/u.s)
def test_pvdpv(self):
pvdpv = erfa_ufunc.pvdpv(self.pv, self.pv)
assert pvdpv['pdp'].unit == self.pv.unit['p'] ** 2
assert pvdpv['pdv'].unit == self.pv.unit['p'] * self.pv.unit['v']
assert_array_equal(pvdpv['pdp'], np.einsum('...i,...i->...',
self.pv['p'], self.pv['p']))
assert_array_equal(pvdpv['pdv'], 2*np.einsum('...i,...i->...',
self.pv['p'], self.pv['v']))
z_axis = u.Quantity(
np.array(([0, 0, 1], [0, 0, 0]), erfa_ufunc.dt_pv),
'1,1/s')
pvdpv2 = erfa_ufunc.pvdpv(self.pv, z_axis)
assert pvdpv2['pdp'].unit == self.pv.unit['p']
assert pvdpv2['pdv'].unit == self.pv.unit['v']
assert_array_equal(pvdpv2['pdp'].value, np.zeros(self.pv.shape))
assert_array_equal(pvdpv2['pdv'].value, np.zeros(self.pv.shape))
def test_pvxpv(self):
pvxpv = erfa_ufunc.pvxpv(self.pv, self.pv)
assert pvxpv['p'].unit == self.pv.unit['p'] ** 2
assert pvxpv['v'].unit == self.pv.unit['p'] * self.pv.unit['v']
assert_array_equal(pvxpv['p'].value, np.zeros(self.pv['p'].shape))
assert_array_equal(pvxpv['v'].value, np.zeros(self.pv['v'].shape))
z_axis = u.Quantity(
np.array(([0, 0, 1], [0, 0, 0]), erfa_ufunc.dt_pv),
'1,1/s')
pvxpv2 = erfa_ufunc.pvxpv(self.pv, z_axis)
assert pvxpv2['p'].unit == self.pv.unit['p']
assert pvxpv2['v'].unit == self.pv.unit['v']
assert_array_equal(pvxpv2['p'], [[0., -1, 0.],
[1., 0., 0.]] * u.AU)
assert_array_equal(pvxpv2['v'], [[0.0125, 0., 0.],
[0., 0.0125, 0.]] * u.AU / u.day)
def test_pvm(self):
pm, vm = erfa_ufunc.pvm(self.pv)
assert pm.unit == self.pv.unit['p']
assert vm.unit == self.pv.unit['v']
assert_array_equal(pm, np.linalg.norm(self.pv['p'], axis=-1))
assert_array_equal(vm, np.linalg.norm(self.pv['v'], axis=-1))
def test_pvmpv(self):
pvmpv = erfa_ufunc.pvmpv(self.pv, self.pv)
assert pvmpv.unit == self.pv.unit
assert_array_equal(pvmpv['p'], 0*self.pv['p'])
assert_array_equal(pvmpv['v'], 0*self.pv['v'])
def test_pvppv(self):
pvppv = erfa_ufunc.pvppv(self.pv, self.pv)
assert pvppv.unit == self.pv.unit
assert_array_equal(pvppv['p'], 2*self.pv['p'])
assert_array_equal(pvppv['v'], 2*self.pv['v'])
def test_pvu(self):
pvu = erfa_ufunc.pvu(86400*u.s, self.pv)
assert pvu.unit == self.pv.unit
assert_array_equal(pvu['p'], self.pv['p'] + 1*u.day*self.pv['v'])
assert_array_equal(pvu['v'], self.pv['v'])
def test_pvup(self):
pvup = erfa_ufunc.pvup(86400*u.s, self.pv)
assert pvup.unit == self.pv.unit['p']
assert_array_equal(pvup, self.pv['p'] + 1*u.day*self.pv['v'])
def test_sxpv(self):
# Not a realistic example!!
sxpv = erfa_ufunc.sxpv(10., self.pv)
assert sxpv.unit == self.pv.unit
assert_array_equal(sxpv['p'], self.pv['p']*10)
assert_array_equal(sxpv['v'], self.pv['v']*10)
sxpv2 = erfa_ufunc.sxpv(30.*u.s, self.pv)
assert sxpv2.unit == u.StructuredUnit('AU s,AU s/d', names=('p', 'v'))
assert_array_equal(sxpv2['p'], self.pv['p']*30*u.s)
assert_array_equal(sxpv2['v'], self.pv['v']*30*u.s)
def test_s2xpv(self):
# Not a realistic example!!
s2xpv = erfa_ufunc.s2xpv(10., 1*u.s, self.pv)
assert s2xpv.unit == u.StructuredUnit('AU,AU s/d', names=('p', 'v'))
assert_array_equal(s2xpv['p'], self.pv['p']*10)
assert_array_equal(s2xpv['v'], self.pv['v']*u.s)
@pytest.mark.parametrize('r', [
np.eye(3),
np.array([[0., -1., 0.],
[1., 0., 0.],
[0., 0., 1.]]),
np.eye(3) / u.s])
def test_rxpv(self, r):
result = erfa_ufunc.rxpv(r, self.pv)
assert_array_equal(result['p'], np.einsum('...ij,...j->...i',
r, self.pv['p']))
assert_array_equal(result['v'], np.einsum('...ij,...j->...i',
r, self.pv['v']))
@pytest.mark.parametrize('r', [
np.eye(3),
np.array([[0., -1., 0.],
[1., 0., 0.],
[0., 0., 1.]]),
np.eye(3) / u.s])
def test_trxpv(self, r):
result = erfa_ufunc.trxpv(r, self.pv)
assert_array_equal(result['p'], np.einsum('...ij,...j->...i',
r.T, self.pv['p']))
assert_array_equal(result['v'], np.einsum('...ij,...j->...i',
r.T, self.pv['v']))
@pytest.mark.xfail(erfa.__version__ < '1.7.3.1',
reason='dt_eraLDBODY incorrectly defined', scope='class')
class TestEraStructUfuncs:
def setup_class(self):
# From t_ldn in t_erfa_c.c
ldbody = np.array(
[(0.00028574, 3e-10, ([-7.81014427, -5.60956681, -1.98079819],
[0.0030723249, -0.00406995477, -0.00181335842])),
(0.00095435, 3e-9, ([0.738098796, 4.63658692, 1.9693136],
[-0.00755816922, 0.00126913722, 0.000727999001])),
(1.0, 6e-6, ([-0.000712174377, -0.00230478303, -0.00105865966],
[6.29235213e-6, -3.30888387e-7, -2.96486623e-7]))],
dtype=erfa_ufunc.dt_eraLDBODY)
ldbody_unit = u.StructuredUnit('Msun,radian,(AU,AU/day)', ldbody.dtype)
self.ldbody = ldbody << ldbody_unit
self.ob = [-0.974170437, -0.2115201, -0.0917583114] << u.AU
self.sc = np.array([-0.763276255, -0.608633767, -0.216735543])
# From t_atciq in t_erfa_c.c
astrom, eo = erfa_ufunc.apci13(2456165.5, 0.401182685)
self.astrom_unit = u.StructuredUnit(
'yr,AU,1,AU,1,1,1,rad,rad,rad,rad,1,1,1,rad,rad,rad',
astrom.dtype)
self.astrom = astrom << self.astrom_unit
self.rc = 2.71 * u.rad
self.dc = 0.174 * u.rad
self.pr = 1e-5 * u.rad/u.year
self.pd = 5e-6 * u.rad/u.year
self.px = 0.1 * u.arcsec
self.rv = 55.0 * u.km/u.s
def test_ldn_basic(self):
sn = erfa_ufunc.ldn(self.ldbody, self.ob, self.sc)
assert_quantity_allclose(sn, [-0.7632762579693333866,
-0.6086337636093002660,
-0.2167355420646328159] * u.one,
atol=1e-12, rtol=0)
def test_ldn_in_other_unit(self):
ldbody = self.ldbody.to('kg,rad,(m,m/s)')
ob = self.ob.to('m')
sn = erfa_ufunc.ldn(ldbody, ob, self.sc)
assert_quantity_allclose(sn, [-0.7632762579693333866,
-0.6086337636093002660,
-0.2167355420646328159] * u.one,
atol=1e-12, rtol=0)
def test_ldn_in_SI(self):
sn = erfa_ufunc.ldn(self.ldbody.si, self.ob.si, self.sc)
assert_quantity_allclose(sn, [-0.7632762579693333866,
-0.6086337636093002660,
-0.2167355420646328159] * u.one,
atol=1e-12, rtol=0)
def test_aper(self):
along = self.astrom['along']
astrom2 = erfa_ufunc.aper(10*u.deg, self.astrom)
assert astrom2['eral'].unit == u.radian
assert_quantity_allclose(astrom2['eral'], along+10*u.deg)
astrom3 = self.astrom.to('s,km,1,km,1,1,1,deg,deg,deg,deg,1,1,1,rad,rad,rad')
astrom4 = erfa_ufunc.aper(10*u.deg, astrom3)
assert astrom3['eral'].unit == u.rad
assert astrom4['eral'].unit == u.deg
assert astrom4.unit == 's,km,1,km,1,1,1,deg,deg,deg,deg,1,1,1,deg,rad,rad'
assert_quantity_allclose(astrom4['eral'], along+10*u.deg)
def test_atciq_basic(self):
ri, di = erfa_ufunc.atciq(self.rc, self.dc, self.pr, self.pd,
self.px, self.rv, self.astrom)
assert_quantity_allclose(ri, 2.710121572968696744*u.rad)
assert_quantity_allclose(di, 0.1729371367219539137*u.rad)
def test_atciq_in_other_unit(self):
astrom = self.astrom.to('s,km,1,km,1,1,1,deg,deg,deg,deg,1,1,1,deg,deg,deg')
ri, di = erfa_ufunc.atciq(self.rc.to(u.deg), self.dc.to(u.deg),
self.pr.to(u.mas/u.yr), self.pd.to(u.mas/u.yr),
self.px, self.rv.to(u.m/u.s), astrom)
assert_quantity_allclose(ri, 2.710121572968696744*u.rad, atol=1e-12*u.rad)
assert_quantity_allclose(di, 0.1729371367219539137*u.rad, atol=1e-12*u.rad)
def test_atciqn(self):
ri, di = erfa_ufunc.atciqn(self.rc.to(u.deg), self.dc.to(u.deg),
self.pr.to(u.mas/u.yr), self.pd.to(u.mas/u.yr),
self.px, self.rv.to(u.m/u.s), self.astrom.si,
self.ldbody.si)
assert_quantity_allclose(ri, 2.710122008104983335*u.rad, atol=1e-12*u.rad)
assert_quantity_allclose(di, 0.1729371916492767821*u.rad, atol=1e-12*u.rad)
def test_atciqz(self):
ri, di = erfa_ufunc.atciqz(self.rc.to(u.deg), self.dc.to(u.deg),
self.astrom.si)
assert_quantity_allclose(ri, 2.709994899247256984*u.rad, atol=1e-12*u.rad)
assert_quantity_allclose(di, 0.1728740720984931891*u.rad, atol=1e-12*u.rad)
def test_aticq(self):
ri = 2.710121572969038991 * u.rad
di = 0.1729371367218230438 * u.rad
rc, dc = erfa_ufunc.aticq(ri.to(u.deg), di.to(u.deg), self.astrom.si)
assert_quantity_allclose(rc, 2.710126504531716819*u.rad, atol=1e-12*u.rad)
assert_quantity_allclose(dc, 0.1740632537627034482*u.rad, atol=1e-12*u.rad)
def test_aticqn(self):
ri = 2.709994899247599271 * u.rad
di = 0.1728740720983623469 * u.rad
rc, dc = erfa_ufunc.aticqn(ri.to(u.deg), di.to(u.deg), self.astrom.si,
self.ldbody.si)
assert_quantity_allclose(rc, 2.709999575033027333*u.rad, atol=1e-12*u.rad)
assert_quantity_allclose(dc, 0.1739999656316469990*u.rad, atol=1e-12*u.rad)
def test_atioq_atoiq(self):
astrom, _ = erfa_ufunc.apio13(2456384.5, 0.969254051, 0.1550675,
-0.527800806, -1.2345856, 2738.0,
2.47230737e-7, 1.82640464e-6,
731.0, 12.8, 0.59, 0.55)
astrom = astrom << self.astrom_unit
ri = 2.710121572969038991 * u.rad
di = 0.1729371367218230438 * u.rad
aob, zob, hob, dob, rob = erfa_ufunc.atioq(ri.to(u.deg), di.to(u.deg),
astrom.si)
assert_quantity_allclose(aob, 0.9233952224895122499e-1*u.rad, atol=1e-12*u.rad)
assert_quantity_allclose(zob, 1.407758704513549991*u.rad, atol=1e-12*u.rad)
assert_quantity_allclose(hob, -0.9247619879881698140e-1*u.rad, atol=1e-12*u.rad)
assert_quantity_allclose(dob, 0.1717653435756234676*u.rad, atol=1e-12*u.rad)
assert_quantity_allclose(rob, 2.710085107988480746*u.rad, atol=1e-12*u.rad)
# Sadly does not just use the values from above.
ob1 = 2.710085107986886201 * u.rad
ob2 = 0.1717653435758265198 * u.rad
ri2, di2 = erfa_ufunc.atoiq("R", ob1.to(u.deg), ob2.to(u.deg), astrom.si)
assert_quantity_allclose(ri2, 2.710121574447540810*u.rad, atol=1e-12*u.rad)
assert_quantity_allclose(di2, 0.17293718391166087785*u.rad, atol=1e-12*u.rad)
@pytest.mark.xfail(erfa.__version__ < '2.0.0', reason='comparisons changed')
def test_apio(self):
sp = -3.01974337e-11 * u.rad
theta = 3.14540971 * u.rad
elong = -0.527800806 * u.rad
phi = -1.2345856 * u.rad
hm = 2738.0 * u.m
xp = 2.47230737e-7 * u.rad
yp = 1.82640464e-6 * u.rad
refa = 0.000201418779 * u.rad
refb = -2.36140831e-7 * u.rad
astrom = erfa_ufunc.apio(sp.to(u.deg), theta, elong, phi, hm.to(u.km),
xp, yp, refa, refb)
assert astrom.unit == self.astrom_unit
for name, value in [
('along', -0.5278008060295995734),
('xpl', 0.1133427418130752958e-5),
('ypl', 0.1453347595780646207e-5),
('sphi', -0.9440115679003211329),
('cphi', 0.3299123514971474711),
('diurab', 0.5135843661699913529e-6),
('eral', 2.617608903970400427),
('refa', 0.2014187790000000000e-3),
('refb', -0.2361408310000000000e-6)]:
assert_quantity_allclose(astrom[name], value * self.astrom_unit[name],
rtol=1e-12, atol=0*self.astrom_unit[name])
|
cf6ca8366679c07f63231a1ceac9af6aaa9ab7ae4f343af623065967e7a72697 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import inspect
import itertools
import numpy as np
import numpy.lib.recfunctions as rfn
import pytest
from numpy.testing import assert_array_equal
from astropy import units as u
from astropy.units.quantity_helper.function_helpers import (
ARRAY_FUNCTION_ENABLED, DISPATCHED_FUNCTIONS, FUNCTION_HELPERS, IGNORED_FUNCTIONS,
SUBCLASS_SAFE_FUNCTIONS, TBD_FUNCTIONS, UNSUPPORTED_FUNCTIONS)
from astropy.utils.compat import NUMPY_LT_1_20, NUMPY_LT_1_23
needs_array_function = pytest.mark.xfail(
not ARRAY_FUNCTION_ENABLED,
reason="Needs __array_function__ support")
# To get the functions that could be covered, we look for those that
# are wrapped. Of course, this does not give a full list pre-1.17.
def get_wrapped_functions(*modules):
wrapped_functions = {}
for mod in modules:
for name, f in mod.__dict__.items():
if f is np.printoptions or name.startswith('_'):
continue
if callable(f) and hasattr(f, '__wrapped__'):
wrapped_functions[name] = f
return wrapped_functions
all_wrapped_functions = get_wrapped_functions(np, np.fft, np.linalg, np.lib.recfunctions)
all_wrapped = set(all_wrapped_functions.values())
class CoverageMeta(type):
"""Meta class that tracks which functions are covered by tests.
Assumes that a test is called 'test_<function_name>'.
"""
covered = set()
def __new__(mcls, name, bases, members):
for k, v in members.items():
if inspect.isfunction(v) and k.startswith('test'):
f = k.replace('test_', '')
if f in all_wrapped_functions:
mcls.covered.add(all_wrapped_functions[f])
return super().__new__(mcls, name, bases, members)
class BasicTestSetup(metaclass=CoverageMeta):
"""Test setup for functions that should not change the unit.
Also provides a default Quantity with shape (3, 3) and units of m.
"""
def setup(self):
self.q = np.arange(9.).reshape(3, 3) / 4. * u.m
class InvariantUnitTestSetup(BasicTestSetup):
def check(self, func, *args, **kwargs):
o = func(self.q, *args, **kwargs)
expected = func(self.q.value, *args, **kwargs) * self.q.unit
assert o.shape == expected.shape
assert np.all(o == expected)
class NoUnitTestSetup(BasicTestSetup):
def check(self, func, *args, **kwargs):
out = func(self.q, *args, **kwargs)
expected = func(self.q.value, *args, *kwargs)
assert type(out) is type(expected)
if isinstance(expected, tuple):
assert all(np.all(o == x) for o, x in zip(out, expected))
else:
assert np.all(out == expected)
class TestShapeInformation(BasicTestSetup):
def test_shape(self):
assert np.shape(self.q) == (3, 3)
def test_size(self):
assert np.size(self.q) == 9
def test_ndim(self):
assert np.ndim(self.q) == 2
class TestShapeManipulation(InvariantUnitTestSetup):
# Note: do not parametrize the below, since test names are used
# to check coverage.
def test_reshape(self):
self.check(np.reshape, (9, 1))
def test_ravel(self):
self.check(np.ravel)
def test_moveaxis(self):
self.check(np.moveaxis, 0, 1)
def test_rollaxis(self):
self.check(np.rollaxis, 0, 2)
def test_swapaxes(self):
self.check(np.swapaxes, 0, 1)
def test_transpose(self):
self.check(np.transpose)
def test_atleast_1d(self):
q = 1. * u.m
o, so = np.atleast_1d(q, self.q)
assert o.shape == (1,)
assert o == q
expected = np.atleast_1d(self.q.value) * u.m
assert np.all(so == expected)
def test_atleast_2d(self):
q = 1. * u.m
o, so = np.atleast_2d(q, self.q)
assert o.shape == (1, 1)
assert o == q
expected = np.atleast_2d(self.q.value) * u.m
assert np.all(so == expected)
def test_atleast_3d(self):
q = 1. * u.m
o, so = np.atleast_3d(q, self.q)
assert o.shape == (1, 1, 1)
assert o == q
expected = np.atleast_3d(self.q.value) * u.m
assert np.all(so == expected)
def test_expand_dims(self):
self.check(np.expand_dims, 1)
def test_squeeze(self):
o = np.squeeze(self.q[:, np.newaxis, :])
assert o.shape == (3, 3)
assert np.all(o == self.q)
def test_flip(self):
self.check(np.flip)
def test_fliplr(self):
self.check(np.fliplr)
def test_flipud(self):
self.check(np.flipud)
def test_rot90(self):
self.check(np.rot90)
def test_broadcast_to(self):
# Decided *not* to change default for subok for Quantity, since
# that would be contrary to the docstring and might break code.
self.check(np.broadcast_to, (3, 3, 3), subok=True)
out = np.broadcast_to(self.q, (3, 3, 3))
assert type(out) is np.ndarray # NOT Quantity
def test_broadcast_arrays(self):
# Decided *not* to change default for subok for Quantity, since
# that would be contrary to the docstring and might break code.
q2 = np.ones((3, 3, 3)) / u.s
o1, o2 = np.broadcast_arrays(self.q, q2, subok=True)
assert isinstance(o1, u.Quantity)
assert isinstance(o2, u.Quantity)
assert o1.shape == o2.shape == (3, 3, 3)
assert np.all(o1 == self.q)
assert np.all(o2 == q2)
a1, a2 = np.broadcast_arrays(self.q, q2)
assert type(a1) is np.ndarray
assert type(a2) is np.ndarray
class TestArgFunctions(NoUnitTestSetup):
def test_argmin(self):
self.check(np.argmin)
def test_argmax(self):
self.check(np.argmax)
def test_argsort(self):
self.check(np.argsort)
def test_lexsort(self):
self.check(np.lexsort)
def test_searchsorted(self):
q = self.q.ravel()
q2 = np.array([150., 350.]) * u.cm
out = np.searchsorted(q, q2)
expected = np.searchsorted(q.value, q2.to_value(q.unit))
assert np.all(out == expected)
def test_nonzero(self):
self.check(np.nonzero)
def test_argwhere(self):
self.check(np.argwhere)
@needs_array_function
def test_argpartition(self):
self.check(np.argpartition, 2)
def test_flatnonzero(self):
self.check(np.flatnonzero)
class TestAlongAxis(BasicTestSetup):
def test_take_along_axis(self):
indices = np.expand_dims(np.argmax(self.q, axis=0), axis=0)
out = np.take_along_axis(self.q, indices, axis=0)
expected = np.take_along_axis(self.q.value, indices,
axis=0) * self.q.unit
assert np.all(out == expected)
def test_put_along_axis(self):
q = self.q.copy()
indices = np.expand_dims(np.argmax(self.q, axis=0), axis=0)
np.put_along_axis(q, indices, axis=0, values=-100 * u.cm)
expected = q.value.copy()
np.put_along_axis(expected, indices, axis=0, values=-1)
expected = expected * q.unit
assert np.all(q == expected)
@pytest.mark.parametrize('axis', (0, 1))
def test_apply_along_axis(self, axis):
out = np.apply_along_axis(np.square, axis, self.q)
expected = np.apply_along_axis(np.square, axis,
self.q.value) * self.q.unit ** 2
assert_array_equal(out, expected)
@needs_array_function
@pytest.mark.parametrize('axes', ((1,), (0,), (0, 1)))
def test_apply_over_axes(self, axes):
def function(x, axis):
return np.sum(np.square(x), axis)
out = np.apply_over_axes(function, self.q, axes)
expected = np.apply_over_axes(function, self.q.value, axes)
expected = expected * self.q.unit ** (2 * len(axes))
assert_array_equal(out, expected)
class TestIndicesFrom(NoUnitTestSetup):
def test_diag_indices_from(self):
self.check(np.diag_indices_from)
def test_triu_indices_from(self):
self.check(np.triu_indices_from)
def test_tril_indices_from(self):
self.check(np.tril_indices_from)
class TestRealImag(InvariantUnitTestSetup):
def setup(self):
self.q = (np.arange(9.).reshape(3, 3) + 1j) * u.m
def test_real(self):
self.check(np.real)
def test_imag(self):
self.check(np.imag)
class TestCopyAndCreation(InvariantUnitTestSetup):
@needs_array_function
def test_copy(self):
self.check(np.copy)
# Also as kwarg
copy = np.copy(a=self.q)
assert_array_equal(copy, self.q)
@needs_array_function
def test_asfarray(self):
self.check(np.asfarray)
farray = np.asfarray(a=self.q)
assert_array_equal(farray, self.q)
def test_empty_like(self):
o = np.empty_like(self.q)
assert o.shape == (3, 3)
assert isinstance(o, u.Quantity)
assert o.unit == self.q.unit
o2 = np.empty_like(prototype=self.q)
assert o2.shape == (3, 3)
assert isinstance(o2, u.Quantity)
assert o2.unit == self.q.unit
o3 = np.empty_like(self.q, subok=False)
assert type(o3) is np.ndarray
def test_zeros_like(self):
self.check(np.zeros_like)
o2 = np.zeros_like(a=self.q)
assert_array_equal(o2, self.q * 0.)
def test_ones_like(self):
self.check(np.ones_like)
@needs_array_function
def test_full_like(self):
o = np.full_like(self.q, 0.5 * u.km)
expected = np.empty_like(self.q.value) * u.m
expected[...] = 0.5 * u.km
assert np.all(o == expected)
with pytest.raises(u.UnitsError):
np.full_like(self.q, 0.5 * u.s)
class TestAccessingParts(InvariantUnitTestSetup):
def test_diag(self):
self.check(np.diag)
@needs_array_function
def test_diag_1d_input(self):
# Also check 1-D case; drops unit w/o __array_function__.
q = self.q.ravel()
o = np.diag(q)
expected = np.diag(q.value) << q.unit
assert o.unit == self.q.unit
assert o.shape == expected.shape
assert_array_equal(o, expected)
def test_diagonal(self):
self.check(np.diagonal)
def test_diagflat(self):
self.check(np.diagflat)
def test_compress(self):
o = np.compress([True, False, True], self.q, axis=0)
expected = np.compress([True, False, True], self.q.value,
axis=0) * self.q.unit
assert np.all(o == expected)
def test_extract(self):
o = np.extract([True, False, True], self.q)
expected = np.extract([True, False, True],
self.q.value) * self.q.unit
assert np.all(o == expected)
def test_delete(self):
self.check(np.delete, slice(1, 2), 0)
self.check(np.delete, [0, 2], 1)
def test_trim_zeros(self):
q = self.q.ravel()
out = np.trim_zeros(q)
expected = np.trim_zeros(q.value) * u.m
assert np.all(out == expected)
def test_roll(self):
self.check(np.roll, 1)
self.check(np.roll, 1, axis=0)
def test_take(self):
self.check(np.take, [0, 1], axis=1)
self.check(np.take, 1)
class TestSettingParts(metaclass=CoverageMeta):
def test_put(self):
q = np.arange(3.) * u.m
np.put(q, [0, 2], [50, 150] * u.cm)
assert q.unit == u.m
expected = [50, 100, 150] * u.cm
assert np.all(q == expected)
@needs_array_function
def test_putmask(self):
q = np.arange(3.) * u.m
mask = [True, False, True]
values = [50, 0, 150] * u.cm
np.putmask(q, mask, values)
assert q.unit == u.m
expected = [50, 100, 150] * u.cm
assert np.all(q == expected)
with pytest.raises(u.UnitsError):
np.putmask(q, mask, values.value)
with pytest.raises(u.UnitsError):
np.putmask(q.value, mask, values)
a = np.arange(3.)
values = [50, 0, 150] * u.percent
np.putmask(a, mask, values)
expected = np.array([0.5, 1., 1.5])
assert np.all(a == expected)
@needs_array_function
def test_place(self):
q = np.arange(3.) * u.m
np.place(q, [True, False, True], [50, 150] * u.cm)
assert q.unit == u.m
expected = [50, 100, 150] * u.cm
assert np.all(q == expected)
a = np.arange(3.)
np.place(a, [True, False, True], [50, 150] * u.percent)
assert type(a) is np.ndarray
expected = np.array([0.5, 1., 1.5])
assert np.all(a == expected)
@needs_array_function
def test_copyto(self):
q = np.arange(3.) * u.m
np.copyto(q, [50, 0, 150] * u.cm, where=[True, False, True])
assert q.unit == u.m
expected = [50, 100, 150] * u.cm
assert np.all(q == expected)
a = np.arange(3.)
np.copyto(a, [50, 0, 150] * u.percent, where=[True, False, True])
assert type(a) is np.ndarray
expected = np.array([0.5, 1., 1.5])
assert np.all(a == expected)
def test_fill_diagonal(self):
q = np.arange(9.).reshape(3, 3) * u.m
expected = q.value.copy()
np.fill_diagonal(expected, 0.25)
expected = expected * u.m
np.fill_diagonal(q, 25. * u.cm)
assert q.unit == u.m
assert np.all(q == expected)
class TestRepeat(InvariantUnitTestSetup):
def test_tile(self):
self.check(np.tile, 2)
def test_repeat(self):
self.check(np.repeat, 2)
@needs_array_function
def test_resize(self):
self.check(np.resize, (4, 4))
class TestConcatenate(metaclass=CoverageMeta):
def setup(self):
self.q1 = np.arange(6.).reshape(2, 3) * u.m
self.q2 = self.q1.to(u.cm)
def check(self, func, *args, **kwargs):
q_list = kwargs.pop('q_list', [self.q1, self.q2])
q_ref = kwargs.pop('q_ref', q_list[0])
o = func(q_list, *args, **kwargs)
v_list = [q_ref._to_own_unit(q) for q in q_list]
expected = func(v_list, *args, **kwargs) * q_ref.unit
assert o.shape == expected.shape
assert np.all(o == expected)
@needs_array_function
def test_concatenate(self):
self.check(np.concatenate)
self.check(np.concatenate, axis=1)
if not NUMPY_LT_1_20:
# dtype argument only introduced in numpy 1.20
# regression test for gh-13322.
self.check(np.concatenate, dtype='f4')
self.check(np.concatenate, q_list=[np.zeros(self.q1.shape), self.q1, self.q2],
q_ref=self.q1)
out = np.empty((4, 3)) * u.dimensionless_unscaled
result = np.concatenate([self.q1, self.q2], out=out)
assert out is result
assert out.unit == self.q1.unit
expected = np.concatenate(
[self.q1.value, self.q2.to_value(self.q1.unit)]) * self.q1.unit
assert np.all(result == expected)
with pytest.raises(TypeError):
np.concatenate([self.q1, object()])
@needs_array_function
def test_stack(self):
self.check(np.stack)
@needs_array_function
def test_column_stack(self):
self.check(np.column_stack)
@needs_array_function
def test_hstack(self):
self.check(np.hstack)
@needs_array_function
def test_vstack(self):
self.check(np.vstack)
@needs_array_function
def test_dstack(self):
self.check(np.dstack)
@needs_array_function
def test_block(self):
self.check(np.block)
result = np.block([[0., 1.*u.m], [1.*u.cm, 2.*u.km]])
assert np.all(result == np.block([[0, 1.], [.01, 2000.]]) << u.m)
@needs_array_function
def test_append(self):
out = np.append(self.q1, self.q2, axis=0)
assert out.unit == self.q1.unit
expected = np.append(self.q1.value, self.q2.to_value(self.q1.unit),
axis=0) * self.q1.unit
assert np.all(out == expected)
a = np.arange(3.)
result = np.append(a, 50. * u.percent)
assert isinstance(result, u.Quantity)
assert result.unit == u.dimensionless_unscaled
expected = np.append(a, 0.5) * u.dimensionless_unscaled
assert np.all(result == expected)
@needs_array_function
def test_insert(self):
# Unit of inserted values is not ignored.
q = np.arange(12.).reshape(6, 2) * u.m
out = np.insert(q, (3, 5), [50., 25.] * u.cm)
assert isinstance(out, u.Quantity)
assert out.unit == q.unit
expected = np.insert(q.value, (3, 5), [0.5, 0.25]) << q.unit
assert np.all(out == expected)
# 0 can have any unit.
out2 = np.insert(q, (3, 5), 0)
expected2 = np.insert(q.value, (3, 5), 0) << q.unit
assert np.all(out2 == expected2)
a = np.arange(3.)
result = np.insert(a, (2,), 50. * u.percent)
assert isinstance(result, u.Quantity)
assert result.unit == u.dimensionless_unscaled
expected = np.insert(a, (2,), 0.5) * u.dimensionless_unscaled
assert np.all(result == expected)
with pytest.raises(TypeError):
np.insert(q, 3 * u.cm, 50. * u.cm)
with pytest.raises(u.UnitsError):
np.insert(q, (3, 5), 0. * u.s)
@needs_array_function
def test_pad(self):
q = np.arange(1., 6.) * u.m
out = np.pad(q, (2, 3), 'constant', constant_values=(0., 150.*u.cm))
assert out.unit == q.unit
expected = np.pad(q.value, (2, 3), 'constant',
constant_values=(0., 1.5)) * q.unit
assert np.all(out == expected)
out2 = np.pad(q, (2, 3), 'constant', constant_values=150.*u.cm)
assert out2.unit == q.unit
expected2 = np.pad(q.value, (2, 3), 'constant',
constant_values=1.5) * q.unit
assert np.all(out2 == expected2)
out3 = np.pad(q, (2, 3), 'linear_ramp', end_values=(25.*u.cm, 0.))
assert out3.unit == q.unit
expected3 = np.pad(q.value, (2, 3), 'linear_ramp',
end_values=(0.25, 0.)) * q.unit
assert np.all(out3 == expected3)
class TestSplit(metaclass=CoverageMeta):
def setup(self):
self.q = np.arange(54.).reshape(3, 3, 6) * u.m
def check(self, func, *args, **kwargs):
out = func(self.q, *args, **kwargs)
expected = func(self.q.value, *args, **kwargs)
expected = [x * self.q.unit for x in expected]
assert len(out) == len(expected)
assert all(o.shape == x.shape for o, x in zip(out, expected))
assert all(np.all(o == x) for o, x in zip(out, expected))
def test_split(self):
self.check(np.split, [1])
def test_array_split(self):
self.check(np.array_split, 2)
def test_hsplit(self):
self.check(np.hsplit, [1, 4])
def test_vsplit(self):
self.check(np.vsplit, [1])
def test_dsplit(self):
self.check(np.dsplit, [1])
class TestUfuncReductions(InvariantUnitTestSetup):
def test_amax(self):
self.check(np.amax)
def test_amin(self):
self.check(np.amin)
def test_sum(self):
self.check(np.sum)
def test_cumsum(self):
self.check(np.cumsum)
def test_any(self):
with pytest.raises(TypeError):
np.any(self.q)
def test_all(self):
with pytest.raises(TypeError):
np.all(self.q)
def test_sometrue(self):
with pytest.raises(TypeError):
np.sometrue(self.q)
def test_alltrue(self):
with pytest.raises(TypeError):
np.alltrue(self.q)
def test_prod(self):
with pytest.raises(u.UnitsError):
np.prod(self.q)
def test_product(self):
with pytest.raises(u.UnitsError):
np.product(self.q)
def test_cumprod(self):
with pytest.raises(u.UnitsError):
np.cumprod(self.q)
def test_cumproduct(self):
with pytest.raises(u.UnitsError):
np.cumproduct(self.q)
class TestUfuncLike(InvariantUnitTestSetup):
def test_ptp(self):
self.check(np.ptp)
self.check(np.ptp, axis=0)
def test_round_(self):
self.check(np.round_)
def test_around(self):
self.check(np.around)
def test_fix(self):
self.check(np.fix)
def test_angle(self):
q = np.array([1+0j, 0+1j, 1+1j, 0+0j]) * u.m
out = np.angle(q)
expected = np.angle(q.value) * u.radian
assert np.all(out == expected)
def test_i0(self):
q = np.array([0., 10., 20.]) * u.percent
out = np.i0(q)
expected = np.i0(q.to_value(u.one)) * u.one
assert isinstance(out, u.Quantity)
assert np.all(out == expected)
with pytest.raises(u.UnitsError):
np.i0(self.q)
def test_clip(self):
qmin = 200 * u.cm
qmax = [270, 280, 290] * u.cm
out = np.clip(self.q, qmin, qmax)
expected = np.clip(self.q.value, qmin.to_value(self.q.unit),
qmax.to_value(self.q.unit)) * self.q.unit
assert np.all(out == expected)
@needs_array_function
def test_sinc(self):
q = [0., 3690., -270., 690.] * u.deg
out = np.sinc(q)
expected = np.sinc(q.to_value(u.radian)) * u.one
assert isinstance(out, u.Quantity)
assert np.all(out == expected)
with pytest.raises(u.UnitsError):
np.sinc(1.*u.one)
@needs_array_function
def test_where(self):
out = np.where([True, False, True], self.q, 1. * u.km)
expected = np.where([True, False, True], self.q.value,
1000.) * self.q.unit
assert np.all(out == expected)
@needs_array_function
def test_choose(self):
# from np.choose docstring
a = np.array([0, 1]).reshape((2, 1, 1))
q1 = np.array([1, 2, 3]).reshape((1, 3, 1)) * u.cm
q2 = np.array([-1, -2, -3, -4, -5]).reshape((1, 1, 5)) * u.m
out = np.choose(a, (q1, q2))
# result is 2x3x5, res[0,:,:]=c1, res[1,:,:]=c2
expected = np.choose(a, (q1.value, q2.to_value(q1.unit))) * u.cm
assert np.all(out == expected)
@needs_array_function
def test_select(self):
q = self.q
out = np.select([q < 0.55 * u.m, q > 1. * u.m],
[q, q.to(u.cm)], default=-1. * u.km)
expected = np.select([q.value < 0.55, q.value > 1],
[q.value, q.value], default=-1000) * u.m
assert np.all(out == expected)
@needs_array_function
def test_real_if_close(self):
q = np.array([1+0j, 0+1j, 1+1j, 0+0j]) * u.m
out = np.real_if_close(q)
expected = np.real_if_close(q.value) * u.m
assert np.all(out == expected)
@needs_array_function
def test_tril(self):
self.check(np.tril)
@needs_array_function
def test_triu(self):
self.check(np.triu)
@needs_array_function
def test_unwrap(self):
q = [0., 3690., -270., 690.] * u.deg
out = np.unwrap(q)
expected = (np.unwrap(q.to_value(u.rad)) * u.rad).to(q.unit)
assert out.unit == expected.unit
assert np.allclose(out, expected, atol=1*u.urad, rtol=0)
with pytest.raises(u.UnitsError):
np.unwrap([1., 2.]*u.m)
with pytest.raises(u.UnitsError):
np.unwrap(q, discont=1.*u.m)
def test_nan_to_num(self):
q = np.array([-np.inf, +np.inf, np.nan, 3., 4.]) * u.m
out = np.nan_to_num(q)
expected = np.nan_to_num(q.value) * q.unit
assert np.all(out == expected)
@needs_array_function
def test_nan_to_num_complex(self):
q = np.array([-np.inf, +np.inf, np.nan, 3., 4.]) * u.m
out = np.nan_to_num(q, nan=1.*u.km, posinf=2.*u.km, neginf=-2*u.km)
expected = [-2000., 2000., 1000., 3., 4.] * u.m
assert np.all(out == expected)
class TestUfuncLikeTests(metaclass=CoverageMeta):
def setup(self):
self.q = np.array([-np.inf, +np.inf, np.nan, 3., 4.]) * u.m
def check(self, func):
out = func(self.q)
expected = func(self.q.value)
assert type(out) is np.ndarray
assert out.dtype.kind == 'b'
assert np.all(out == expected)
def test_isposinf(self):
self.check(np.isposinf)
def test_isneginf(self):
self.check(np.isneginf)
def test_isreal(self):
self.check(np.isreal)
assert not np.isreal([1. + 1j]*u.m)
def test_iscomplex(self):
self.check(np.iscomplex)
assert np.iscomplex([1. + 1j]*u.m)
def test_isclose(self):
q1 = np.arange(3.) * u.m
q2 = np.array([0., 102., 199.]) * u.cm
atol = 1.5 * u.cm
rtol = 1. * u.percent
out = np.isclose(q1, q2, atol=atol)
expected = np.isclose(q1.value, q2.to_value(q1.unit),
atol=atol.to_value(q1.unit))
assert type(out) is np.ndarray
assert out.dtype.kind == 'b'
assert np.all(out == expected)
out = np.isclose(q1, q2, atol=0, rtol=rtol)
expected = np.isclose(q1.value, q2.to_value(q1.unit),
atol=0, rtol=0.01)
assert type(out) is np.ndarray
assert out.dtype.kind == 'b'
assert np.all(out == expected)
@needs_array_function
def test_allclose_atol_default_unit(self):
q_cm = self.q.to(u.cm)
out = np.isclose(self.q, q_cm)
expected = np.isclose(self.q.value, q_cm.to_value(u.m))
assert np.all(out == expected)
q1 = np.arange(3.) * u.m
q2 = np.array([0., 101., 198.]) * u.cm
out = np.isclose(q1, q2, atol=0.011, rtol=0)
expected = np.isclose(q1.value, q2.to_value(q1.unit),
atol=0.011, rtol=0)
assert np.all(out == expected)
out2 = np.isclose(q2, q1, atol=0.011, rtol=0)
expected2 = np.isclose(q2.value, q1.to_value(q2.unit),
atol=0.011, rtol=0)
assert np.all(out2 == expected2)
class TestReductionLikeFunctions(InvariantUnitTestSetup):
def test_average(self):
q1 = np.arange(9.).reshape(3, 3) * u.m
q2 = np.eye(3) / u.s
o = np.average(q1, weights=q2)
expected = np.average(q1.value, weights=q2.value) * u.m
assert np.all(o == expected)
def test_mean(self):
self.check(np.mean)
def test_std(self):
self.check(np.std)
def test_var(self):
o = np.var(self.q)
expected = np.var(self.q.value) * self.q.unit ** 2
assert np.all(o == expected)
def test_median(self):
self.check(np.median)
@needs_array_function
def test_quantile(self):
self.check(np.quantile, 0.5)
o = np.quantile(self.q, 50 * u.percent)
expected = np.quantile(self.q.value, 0.5) * u.m
assert np.all(o == expected)
# For ndarray input, we return a Quantity.
o2 = np.quantile(self.q.value, 50 * u.percent)
assert o2.unit == u.dimensionless_unscaled
assert np.all(o2 == expected.value)
o3 = 0 * o2
result = np.quantile(self.q, 50 * u.percent, out=o3)
assert result is o3
assert np.all(o3 == expected)
o4 = 0 * o2
result = np.quantile(self.q, 50 * u.percent, None, o4)
assert result is o4
assert np.all(o4 == expected)
@needs_array_function
def test_percentile(self):
self.check(np.percentile, 0.5)
o = np.percentile(self.q, 0.5 * u.one)
expected = np.percentile(self.q.value, 50) * u.m
assert np.all(o == expected)
def test_trace(self):
self.check(np.trace)
@needs_array_function
def test_count_nonzero(self):
q1 = np.arange(9.).reshape(3, 3) * u.m
o = np.count_nonzero(q1)
assert type(o) is not u.Quantity
assert o == 8
o = np.count_nonzero(q1, axis=1)
# Returns integer Quantity with units of m
assert type(o) is np.ndarray
assert np.all(o == np.array([2, 3, 3]))
def test_allclose(self):
q1 = np.arange(3.) * u.m
q2 = np.array([0., 101., 199.]) * u.cm
atol = 2 * u.cm
rtol = 1. * u.percent
assert np.allclose(q1, q2, atol=atol)
assert np.allclose(q1, q2, atol=0., rtol=rtol)
@needs_array_function
def test_allclose_atol_default_unit(self):
q1 = np.arange(3.) * u.m
q2 = np.array([0., 101., 199.]) * u.cm
assert np.allclose(q1, q2, atol=0.011, rtol=0)
assert not np.allclose(q2, q1, atol=0.011, rtol=0)
def test_allclose_failures(self):
q1 = np.arange(3.) * u.m
q2 = np.array([0., 101., 199.]) * u.cm
with pytest.raises(u.UnitsError):
np.allclose(q1, q2, atol=2*u.s, rtol=0)
with pytest.raises(u.UnitsError):
np.allclose(q1, q2, atol=0, rtol=1.*u.s)
@needs_array_function
def test_array_equal(self):
q1 = np.arange(3.) * u.m
q2 = q1.to(u.cm)
assert np.array_equal(q1, q2)
q3 = q1.value * u.cm
assert not np.array_equal(q1, q3)
@needs_array_function
def test_array_equiv(self):
q1 = np.array([[0., 1., 2.]]*3) * u.m
q2 = q1[0].to(u.cm)
assert np.array_equiv(q1, q2)
q3 = q1[0].value * u.cm
assert not np.array_equiv(q1, q3)
class TestNanFunctions(InvariantUnitTestSetup):
def setup(self):
super().setup()
self.q[1, 1] = np.nan
def test_nanmax(self):
self.check(np.nanmax)
def test_nanmin(self):
self.check(np.nanmin)
def test_nanargmin(self):
out = np.nanargmin(self.q)
expected = np.nanargmin(self.q.value)
assert out == expected
def test_nanargmax(self):
out = np.nanargmax(self.q)
expected = np.nanargmax(self.q.value)
assert out == expected
def test_nanmean(self):
self.check(np.nanmean)
def test_nanmedian(self):
self.check(np.nanmedian)
def test_nansum(self):
self.check(np.nansum)
def test_nancumsum(self):
self.check(np.nancumsum)
def test_nanstd(self):
self.check(np.nanstd)
def test_nanvar(self):
out = np.nanvar(self.q)
expected = np.nanvar(self.q.value) * self.q.unit ** 2
assert np.all(out == expected)
def test_nanprod(self):
with pytest.raises(u.UnitsError):
np.nanprod(self.q)
def test_nancumprod(self):
with pytest.raises(u.UnitsError):
np.nancumprod(self.q)
@needs_array_function
def test_nanquantile(self):
self.check(np.nanquantile, 0.5)
o = np.nanquantile(self.q, 50 * u.percent)
expected = np.nanquantile(self.q.value, 0.5) * u.m
assert np.all(o == expected)
@needs_array_function
def test_nanpercentile(self):
self.check(np.nanpercentile, 0.5)
o = np.nanpercentile(self.q, 0.5 * u.one)
expected = np.nanpercentile(self.q.value, 50) * u.m
assert np.all(o == expected)
class TestVariousProductFunctions(metaclass=CoverageMeta):
"""
Test functions that are similar to gufuncs
"""
@needs_array_function
def test_cross(self):
q1 = np.arange(6.).reshape(2, 3) * u.m
q2 = np.array([4., 5., 6.]) / u.s
o = np.cross(q1, q2)
expected = np.cross(q1.value, q2.value) * u.m / u.s
assert np.all(o == expected)
@needs_array_function
def test_outer(self):
q1 = np.array([1, 2, 3]) * u.m
q2 = np.array([1, 2]) / u.s
o = np.outer(q1, q2)
assert np.all(o == np.array([[1, 2], [2, 4], [3, 6]]) * u.m / u.s)
o2 = 0 * o
result = np.outer(q1, q2, out=o2)
assert result is o2
assert np.all(o2 == o)
with pytest.raises(TypeError):
np.outer(q1, q2, out=object())
@needs_array_function
def test_inner(self):
q1 = np.array([1, 2, 3]) * u.m
q2 = np.array([4, 5, 6]) / u.s
o = np.inner(q1, q2)
assert o == 32 * u.m / u.s
@needs_array_function
def test_dot(self):
q1 = np.array([1., 2., 3.]) * u.m
q2 = np.array([4., 5., 6.]) / u.s
o = np.dot(q1, q2)
assert o == 32. * u.m / u.s
@needs_array_function
def test_vdot(self):
q1 = np.array([1j, 2j, 3j]) * u.m
q2 = np.array([4j, 5j, 6j]) / u.s
o = np.vdot(q1, q2)
assert o == (32. + 0j) * u.m / u.s
@needs_array_function
def test_tensordot(self):
# From the docstring example
a = np.arange(60.).reshape(3, 4, 5) * u.m
b = np.arange(24.).reshape(4, 3, 2) / u.s
c = np.tensordot(a, b, axes=([1, 0], [0, 1]))
expected = np.tensordot(a.value, b.value,
axes=([1, 0], [0, 1])) * u.m / u.s
assert np.all(c == expected)
@needs_array_function
def test_kron(self):
q1 = np.eye(2) * u.m
q2 = np.ones(2) / u.s
o = np.kron(q1, q2)
expected = np.kron(q1.value, q2.value) * u.m / u.s
assert np.all(o == expected)
@needs_array_function
def test_einsum(self):
q1 = np.arange(9.).reshape(3, 3) * u.m
o = np.einsum('...i', q1)
assert np.all(o == q1)
o = np.einsum('ii', q1)
expected = np.einsum('ii', q1.value) * u.m
assert np.all(o == expected)
q2 = np.eye(3) / u.s
o2 = np.einsum('ij,jk', q1, q2)
assert np.all(o2 == q1 / u.s)
o3 = 0 * o2
result = np.einsum('ij,jk', q1, q2, out=o3)
assert result is o3
assert np.all(o3 == o2)
def test_einsum_path(self):
q1 = np.arange(9.).reshape(3, 3) * u.m
o = np.einsum_path('...i', q1)
assert o[0] == ['einsum_path', (0,)]
o = np.einsum_path('ii', q1)
assert o[0] == ['einsum_path', (0,)]
q2 = np.eye(3) / u.s
o = np.einsum_path('ij,jk', q1, q2)
assert o[0] == ['einsum_path', (0, 1)]
class TestIntDiffFunctions(metaclass=CoverageMeta):
def test_trapz(self):
y = np.arange(9.) * u.m / u.s
out = np.trapz(y)
expected = np.trapz(y.value) * y.unit
assert np.all(out == expected)
dx = 10. * u.s
out = np.trapz(y, dx=dx)
expected = np.trapz(y.value, dx=dx.value) * y.unit * dx.unit
assert np.all(out == expected)
x = np.arange(9.) * u.s
out = np.trapz(y, x)
expected = np.trapz(y.value, x.value) * y.unit * x.unit
assert np.all(out == expected)
def test_diff(self):
# Simple diff works out of the box.
x = np.arange(10.) * u.m
out = np.diff(x)
expected = np.diff(x.value) * u.m
assert np.all(out == expected)
@needs_array_function
def test_diff_prepend_append(self):
x = np.arange(10.) * u.m
out = np.diff(x, prepend=-12.5*u.cm, append=1*u.km)
expected = np.diff(x.value, prepend=-0.125, append=1000.) * x.unit
assert np.all(out == expected)
x = np.arange(10.) * u.m
out = np.diff(x, prepend=-12.5*u.cm, append=1*u.km, n=2)
expected = np.diff(x.value, prepend=-0.125, append=1000.,
n=2) * x.unit
assert np.all(out == expected)
with pytest.raises(TypeError):
np.diff(x, prepend=object())
def test_gradient(self):
# Simple gradient works out of the box.
x = np.arange(10.) * u.m
out = np.gradient(x)
expected = np.gradient(x.value) * u.m
assert np.all(out == expected)
@needs_array_function
def test_gradient_spacing(self):
# Simple gradient works out of the box.
x = np.arange(10.) * u.m
spacing = 10. * u.s
out = np.gradient(x, spacing)
expected = np.gradient(x.value, spacing.value) * (x.unit /
spacing.unit)
assert np.all(out == expected)
f = np.array([[1, 2, 6], [3, 4, 5]]) * u.m
dx = 2. * u.s
y = [1., 1.5, 3.5] * u.GHz
dfdx, dfdy = np.gradient(f, dx, y)
exp_dfdx, exp_dfdy = np.gradient(f.value, dx.value, y.value)
exp_dfdx = exp_dfdx * f.unit / dx.unit
exp_dfdy = exp_dfdy * f.unit / y.unit
assert np.all(dfdx == exp_dfdx)
assert np.all(dfdy == exp_dfdy)
dfdx2 = np.gradient(f, dx, axis=0)
assert np.all(dfdx2 == exp_dfdx)
dfdy2 = np.gradient(f, y, axis=(1,))
assert np.all(dfdy2 == exp_dfdy)
class TestSpaceFunctions(metaclass=CoverageMeta):
def test_linspace(self):
# Note: linspace gets unit of end point, not superlogical.
out = np.linspace(1000.*u.m, 10.*u.km, 5)
expected = np.linspace(1, 10, 5) * u.km
assert np.all(out == expected)
q1 = np.arange(6.).reshape(2, 3) * u.m
q2 = 10000. * u.cm
out = np.linspace(q1, q2, 5)
expected = np.linspace(q1.to_value(q2.unit), q2.value, 5) * q2.unit
assert np.all(out == expected)
@needs_array_function
def test_logspace(self):
unit = u.m / u.s**2
out = np.logspace(10.*u.dex(unit), 20*u.dex(unit), 10)
expected = np.logspace(10., 20., 10) * unit
assert np.all(out == expected)
out = np.logspace(10.*u.STmag, 20*u.STmag, 10)
expected = np.logspace(10., 20., 10, base=10.**(-0.4)) * u.ST
assert u.allclose(out, expected)
@needs_array_function
def test_geomspace(self):
out = np.geomspace(1000.*u.m, 10.*u.km, 5)
expected = np.geomspace(1, 10, 5) * u.km
assert np.all(out == expected)
q1 = np.arange(1., 7.).reshape(2, 3) * u.m
q2 = 10000. * u.cm
out = np.geomspace(q1, q2, 5)
expected = np.geomspace(q1.to_value(q2.unit), q2.value, 5) * q2.unit
assert np.all(out == expected)
class TestInterpolationFunctions(metaclass=CoverageMeta):
@needs_array_function
def test_interp(self):
x = np.array([1250., 2750.]) * u.m
xp = np.arange(5.) * u.km
yp = np.arange(5.) * u.day
out = np.interp(x, xp, yp)
expected = np.interp(x.to_value(xp.unit), xp.value, yp.value) * yp.unit
assert np.all(out == expected)
out = np.interp(x, xp, yp.value)
assert type(out) is np.ndarray
assert np.all(out == expected.value)
@needs_array_function
def test_piecewise(self):
x = np.linspace(-2.5, 2.5, 6) * u.m
out = np.piecewise(x, [x < 0, x >= 0], [-1*u.s, 1*u.day])
expected = np.piecewise(x.value, [x.value < 0, x.value >= 0],
[-1, 24*3600]) * u.s
assert out.unit == expected.unit
assert np.all(out == expected)
out2 = np.piecewise(x, [x < 1 * u.m, x >= 0],
[-1*u.s, 1*u.day, lambda x: 1*u.hour])
expected2 = np.piecewise(x.value, [x.value < 1, x.value >= 0],
[-1, 24*3600, 3600]) * u.s
assert out2.unit == expected2.unit
assert np.all(out2 == expected2)
out3 = np.piecewise(x, [x < 1 * u.m, x >= 0],
[0, 1*u.percent, lambda x: 1*u.one])
expected3 = np.piecewise(x.value, [x.value < 1, x.value >= 0],
[0, 0.01, 1]) * u.one
assert out3.unit == expected3.unit
assert np.all(out3 == expected3)
with pytest.raises(TypeError): # no Quantity in condlist.
np.piecewise(x, [x], [0.])
with pytest.raises(TypeError): # no Quantity in condlist.
np.piecewise(x.value, [x], [0.])
class TestBincountDigitize(metaclass=CoverageMeta):
@needs_array_function
def test_bincount(self):
i = np.array([1, 1, 2, 3, 2, 4])
weights = np.arange(len(i)) * u.Jy
out = np.bincount(i, weights)
expected = np.bincount(i, weights.value) * weights.unit
assert_array_equal(out, expected)
with pytest.raises(TypeError):
np.bincount(weights)
@needs_array_function
def test_digitize(self):
x = np.array([1500., 2500., 4500.]) * u.m
bins = np.arange(10.) * u.km
out = np.digitize(x, bins)
expected = np.digitize(x.to_value(bins.unit), bins.value)
assert_array_equal(out, expected)
class TestHistogramFunctions(metaclass=CoverageMeta):
def setup(self):
self.x = np.array([1.1, 1.2, 1.3, 2.1, 5.1]) * u.m
self.y = np.array([1.2, 2.2, 2.4, 3.0, 4.0]) * u.cm
self.weights = np.arange(len(self.x)) / u.s
def check(self, function, *args, value_args=None, value_kwargs=None,
expected_units=None, **kwargs):
"""Check quanties are treated correctly in the histogram function.
Test is done by applying ``function(*args, **kwargs)``, where
the argument can be quantities, and comparing the result to
``function(*value_args, **value_kwargs)``, with the outputs
converted to quantities using the ``expected_units`` (where `None`
indicates the output is expected to be a regular array).
For ``**value_kwargs``, any regular ``kwargs`` are treated as
defaults, i.e., non-quantity arguments do not have to be repeated.
"""
if value_kwargs is None:
value_kwargs = kwargs
else:
for k, v in kwargs.items():
value_kwargs.setdefault(k, v)
# Get the result, using the Quantity override.
out = function(*args, **kwargs)
# Get the comparison, with non-Quantity arguments.
expected = function(*value_args, **value_kwargs)
# All histogram functions return a tuple of the actual histogram
# and the bin edges. First, check the actual histogram.
out_h = out[0]
expected_h = expected[0]
if expected_units[0] is not None:
expected_h = expected_h * expected_units[0]
assert_array_equal(out_h, expected_h)
# Check bin edges. Here, histogramdd returns an interable of the
# bin edges as the second return argument, while histogram and
# histogram2d return the bin edges directly.
if function is np.histogramdd:
bin_slice = 1
else:
bin_slice = slice(1, None)
for o_bin, e_bin, e_unit in zip(out[bin_slice],
expected[bin_slice],
expected_units[bin_slice]):
if e_unit is not None:
e_bin = e_bin * e_unit
assert_array_equal(o_bin, e_bin)
@needs_array_function
def test_histogram(self):
x = self.x
weights = self.weights
# Plain histogram.
self.check(np.histogram, x,
value_args=(x.value,),
expected_units=(None, x.unit))
# With bins.
self.check(np.histogram, x, [125, 200] * u.cm,
value_args=(x.value, [1.25, 2.]),
expected_units=(None, x.unit))
# With density.
self.check(np.histogram, x, [125, 200] * u.cm, density=True,
value_args=(x.value, [1.25, 2.]),
expected_units=(1/x.unit, x.unit))
# With weights.
self.check(np.histogram, x, [125, 200] * u.cm, weights=weights,
value_args=(x.value, [1.25, 2.]),
value_kwargs=dict(weights=weights.value),
expected_units=(weights.unit, x.unit))
# With weights and density.
self.check(np.histogram, x, [125, 200] * u.cm,
weights=weights, density=True,
value_args=(x.value, [1.25, 2.]),
value_kwargs=dict(weights=weights.value),
expected_units=(weights.unit/x.unit, x.unit))
with pytest.raises(u.UnitsError):
np.histogram(x, [125, 200] * u.s)
with pytest.raises(u.UnitsError):
np.histogram(x, [125, 200])
with pytest.raises(u.UnitsError):
np.histogram(x.value, [125, 200] * u.s)
@needs_array_function
def test_histogram_bin_edges(self):
x = np.array([1.1, 1.2, 1.3, 2.1, 5.1]) * u.m
out_b = np.histogram_bin_edges(x)
expected_b = np.histogram_bin_edges(x.value) * x.unit
assert np.all(out_b == expected_b)
# With bins
out2_b = np.histogram_bin_edges(x, [125, 200] * u.cm)
expected2_b = np.histogram_bin_edges(x.value, [1.25, 2.]) * x.unit
assert np.all(out2_b == expected2_b)
with pytest.raises(u.UnitsError):
np.histogram_bin_edges(x, [125, 200] * u.s)
with pytest.raises(u.UnitsError):
np.histogram_bin_edges(x, [125, 200])
with pytest.raises(u.UnitsError):
np.histogram_bin_edges(x.value, [125, 200] * u.s)
@needs_array_function
def test_histogram2d(self):
x, y = self.x, self.y
weights = self.weights
# Basic tests with X, Y.
self.check(np.histogram2d, x, y,
value_args=(x.value, y.value),
expected_units=(None, x.unit, y.unit))
# Check units with density.
self.check(np.histogram2d, x, y, density=True,
value_args=(x.value, y.value),
expected_units=(1/(x.unit*y.unit), x.unit, y.unit))
# Check units with weights.
self.check(np.histogram2d, x, y, weights=weights,
value_args=(x.value, y.value),
value_kwargs=dict(weights=weights.value),
expected_units=(weights.unit, x.unit, y.unit))
# Check quantity bin sizes.
inb_y = [0, 0.025, 1.] * u.m
self.check(np.histogram2d, x, y, [5, inb_y],
value_args=(x.value, y.value,
[5, np.array([0, 2.5, 100.])]),
expected_units=(None, x.unit, y.unit))
# Check we dispatch on bin sizes (and check kwarg as well).
inb2_y = [0, 250, 10000.] * u.percent
self.check(np.histogram2d, x.value, y.value, bins=[5, inb2_y],
value_args=(x.value, y.value),
value_kwargs=dict(bins=[5, np.array([0, 2.5, 100.])]),
expected_units=(None, u.one, u.one))
# Single-item bins should be integer, not Quantity.
with pytest.raises(TypeError):
np.histogram2d(x, y, 125 * u.s)
with pytest.raises(TypeError):
np.histogram2d(x.value, y.value, 125 * u.s)
# Bin units need to match units of x, y.
with pytest.raises(u.UnitsError):
np.histogram2d(x, y, [125, 200] * u.s)
with pytest.raises(u.UnitsError):
np.histogram2d(x, y, ([125, 200], [125, 200]))
with pytest.raises(u.UnitsError):
np.histogram2d(x.value, y.value, [125, 200] * u.s)
@needs_array_function
def test_histogramdd(self):
# First replicates of the histogram2d tests, but using the
# histogramdd override. Normally takes the sample as a tuple
# with a given number of dimensions, and returns the histogram
# as well as a tuple of bin edges.
sample = self.x, self.y
sample_units = self.x.unit, self.y.unit
sample_values = (self.x.value, self.y.value)
weights = self.weights
# Basic tests with X, Y
self.check(np.histogramdd, sample,
value_args=(sample_values,),
expected_units=(None, sample_units))
# Check units with density.
self.check(np.histogramdd, sample, density=True,
value_args=(sample_values,),
expected_units=(1/(self.x.unit*self.y.unit),
sample_units))
# Check units with weights.
self.check(np.histogramdd, sample, weights=weights,
value_args=(sample_values,),
value_kwargs=dict(weights=weights.value),
expected_units=(weights.unit, sample_units))
# Check quantity bin sizes.
inb_y = [0, 0.025, 1.] * u.m
self.check(np.histogramdd, sample, [5, inb_y],
value_args=(sample_values, [5, np.array([0, 2.5, 100.])]),
expected_units=(None, sample_units))
# Check we dispatch on bin sizes (and check kwarg as well).
inb2_y = [0, 250, 10000.] * u.percent
self.check(np.histogramdd, sample_values, bins=[5, inb2_y],
value_args=(sample_values,),
value_kwargs=dict(bins=[5, np.array([0, 2.5, 100.])]),
expected_units=(None, (u.one, u.one)))
# For quantities, it is probably not that likely one would pass
# in the sample as an array, but check that it works anyway.
# This also gives a 3-D check.
xyz = np.random.normal(size=(10, 3)) * u.m
self.check(np.histogramdd, xyz,
value_args=(xyz.value,),
expected_units=(None, (xyz.unit,)*3))
# Passing it in as a tuple should work just as well; note the
# *last* axis contains the sample dimension.
self.check(np.histogramdd, (xyz[:, 0], xyz[:, 1], xyz[:, 2]),
value_args=(xyz.value,),
expected_units=(None, (xyz.unit,)*3))
# Single-item bins should be integer, not Quantity.
with pytest.raises(TypeError):
np.histogramdd(sample, 125 * u.s)
# Sequence of single items should be integer.
with pytest.raises(TypeError):
np.histogramdd(sample, [125, 200] * u.s)
with pytest.raises(TypeError):
np.histogramdd(sample_values, [125, 200] * u.s)
# Units of bins should match.
with pytest.raises(u.UnitsError):
np.histogramdd(sample, ([125, 200], [125, 200]))
with pytest.raises(u.UnitsError):
np.histogramdd(sample_values, ([125, 200] * u.s, [125, 200]))
@needs_array_function
def test_correlate(self):
x1 = [1, 2, 3] * u.m
x2 = [0, 1, 0.5] * u.m
out = np.correlate(x1, x2)
expected = np.correlate(x1.value, x2.value) * u.m ** 2
assert np.all(out == expected)
@needs_array_function
def test_convolve(self):
x1 = [1, 2, 3] * u.m
x2 = [0, 1, 0.5] * u.m
out = np.convolve(x1, x2)
expected = np.convolve(x1.value, x2.value) * u.m ** 2
assert np.all(out == expected)
@needs_array_function
def test_cov(self):
# Do not see how we can use cov with Quantity
x = np.array([[0, 2], [1, 1], [2, 0]]).T * u.m
with pytest.raises(TypeError):
np.cov(x)
@needs_array_function
def test_corrcoef(self):
# Do not see how we can use cov with Quantity
x = np.array([[0, 2], [1, 1], [2, 0]]).T * u.m
with pytest.raises(TypeError):
np.corrcoef(x)
class TestSortFunctions(InvariantUnitTestSetup):
def test_sort(self):
self.check(np.sort)
@needs_array_function
def test_sort_complex(self):
self.check(np.sort_complex)
def test_msort(self):
self.check(np.msort)
def test_partition(self):
self.check(np.partition, 2)
class TestStringFunctions(metaclass=CoverageMeta):
# For these, making behaviour work means deviating only slightly from
# the docstring, and by default they fail miserably. So, might as well.
def setup(self):
self.q = np.arange(3.) * u.Jy
@needs_array_function
def test_array2string(self):
# The default formatters cannot handle units, so if we do not pass
# a relevant formatter, we are better off just treating it as an
# array (which happens for all subtypes).
out0 = np.array2string(self.q)
expected0 = str(self.q.value)
assert out0 == expected0
# Arguments are interpreted as usual.
out1 = np.array2string(self.q, separator=', ')
expected1 = '[0., 1., 2.]'
assert out1 == expected1
# If we do pass in a formatter, though, it should be used.
out2 = np.array2string(self.q, separator=', ', formatter={'all': str})
expected2 = '[0.0 Jy, 1.0 Jy, 2.0 Jy]'
assert out2 == expected2
# Also as positional argument (no, nobody will do this!)
out3 = np.array2string(self.q, None, None, None, ', ', '',
np._NoValue, {'float': str})
assert out3 == expected2
# But not if the formatter is not relevant for us.
out4 = np.array2string(self.q, separator=', ', formatter={'int': str})
assert out4 == expected1
@needs_array_function
def test_array_repr(self):
out = np.array_repr(self.q)
assert out == "Quantity([0., 1., 2.], unit='Jy')"
q2 = self.q.astype('f4')
out2 = np.array_repr(q2)
assert out2 == "Quantity([0., 1., 2.], unit='Jy', dtype=float32)"
@needs_array_function
def test_array_str(self):
out = np.array_str(self.q)
expected = str(self.q)
assert out == expected
class TestBitAndIndexFunctions(metaclass=CoverageMeta):
# Index/bit functions generally fail for floats, so the usual
# float quantity are safe, but the integer ones are not.
def setup(self):
self.q = np.arange(3) * u.m
self.uint_q = u.Quantity(np.arange(3), 'm', dtype='u1')
@needs_array_function
def test_packbits(self):
with pytest.raises(TypeError):
np.packbits(self.q)
with pytest.raises(TypeError):
np.packbits(self.uint_q)
@needs_array_function
def test_unpackbits(self):
with pytest.raises(TypeError):
np.unpackbits(self.q)
with pytest.raises(TypeError):
np.unpackbits(self.uint_q)
@needs_array_function
def test_unravel_index(self):
with pytest.raises(TypeError):
np.unravel_index(self.q, 3)
with pytest.raises(TypeError):
np.unravel_index(self.uint_q, 3)
@needs_array_function
def test_ravel_multi_index(self):
with pytest.raises(TypeError):
np.ravel_multi_index((self.q,), 3)
with pytest.raises(TypeError):
np.ravel_multi_index((self.uint_q,), 3)
@needs_array_function
def test_ix_(self):
with pytest.raises(TypeError):
np.ix_(self.q)
with pytest.raises(TypeError):
np.ix_(self.uint_q)
class TestDtypeFunctions(NoUnitTestSetup):
def test_common_type(self):
self.check(np.common_type)
def test_result_type(self):
self.check(np.result_type)
def test_can_cast(self):
self.check(np.can_cast, self.q.dtype)
self.check(np.can_cast, 'f4')
def test_min_scalar_type(self):
out = np.min_scalar_type(self.q[0])
expected = np.min_scalar_type(self.q.value[0])
assert out == expected
def test_iscomplexobj(self):
self.check(np.iscomplexobj)
def test_isrealobj(self):
self.check(np.isrealobj)
class TestMeshGrid(metaclass=CoverageMeta):
def test_meshgrid(self):
q1 = np.arange(3.) * u.m
q2 = np.arange(5.) * u.s
o1, o2 = np.meshgrid(q1, q2)
e1, e2 = np.meshgrid(q1.value, q2.value)
assert np.all(o1 == e1 * q1.unit)
assert np.all(o2 == e2 * q2.unit)
class TestMemoryFunctions(NoUnitTestSetup):
def test_shares_memory(self):
self.check(np.shares_memory, self.q.value)
def test_may_share_memory(self):
self.check(np.may_share_memory, self.q.value)
class TestSetOpsFcuntions(metaclass=CoverageMeta):
def setup(self):
self.q = np.array([[0., 1., -1.],
[3., 5., 3.],
[0., 1., -1]]) * u.m
self.q2 = np.array([0., 100., 150., 200.]) * u.cm
def check(self, function, qs, *args, **kwargs):
unit = kwargs.pop('unit', self.q.unit)
out = function(*qs, *args, **kwargs)
qv = tuple(q.to_value(self.q.unit) for q in qs)
expected = function(*qv, *args, **kwargs)
if isinstance(expected, tuple):
if unit:
expected = (expected[0] * unit,) + expected[1:]
for o, e in zip(out, expected):
assert_array_equal(o, e)
else:
if unit:
expected = expected * unit
assert_array_equal(out, expected)
def check1(self, function, *args, **kwargs):
self.check(function, (self.q,), *args, **kwargs)
def check2(self, function, *args, **kwargs):
self.check(function, (self.q, self.q2), *args, **kwargs)
@pytest.mark.parametrize('kwargs', (
dict(return_index=True, return_inverse=True),
dict(return_counts=True),
dict(return_index=True, return_inverse=True, return_counts=True)))
def test_unique(self, kwargs):
self.check1(np.unique, **kwargs)
@needs_array_function
@pytest.mark.parametrize('kwargs', (
dict(axis=0),
dict(axis=1),
dict(return_counts=True, return_inverse=False, axis=1)))
def test_unique_more_complex(self, kwargs):
self.check1(np.unique, **kwargs)
@needs_array_function
@pytest.mark.parametrize('kwargs', (
dict(),
dict(return_indices=True)))
def test_intersect1d(self, kwargs):
self.check2(np.intersect1d, **kwargs)
@needs_array_function
def test_setxor1d(self):
self.check2(np.setxor1d)
@needs_array_function
def test_union1d(self):
self.check2(np.union1d)
result = np.union1d(np.array([0., np.nan]), np.arange(3) << u.m)
assert result.unit is u.m
assert_array_equal(result.value, np.array([0., 1., 2., np.nan]))
@needs_array_function
def test_setdiff1d(self):
self.check2(np.setdiff1d)
@needs_array_function
def test_in1d(self):
self.check2(np.in1d, unit=None)
# Check zero is treated as having any unit.
assert np.in1d(np.zeros(1), self.q2)
with pytest.raises(u.UnitsError):
np.in1d(np.ones(1), self.q2)
@needs_array_function
def test_isin(self):
self.check2(np.isin, unit=None)
def test_ediff1d(self):
# ediff1d works always as it calls the Quantity method.
self.check1(np.ediff1d)
x = np.arange(10.) * u.m
out = np.ediff1d(x, to_begin=-12.5*u.cm, to_end=1*u.km)
expected = np.ediff1d(x.value, to_begin=-0.125, to_end=1000.) * x.unit
assert_array_equal(out, expected)
class TestDatetimeFunctions(BasicTestSetup):
def test_busday_count(self):
with pytest.raises(TypeError):
np.busday_count(self.q, self.q)
def test_busday_offset(self):
with pytest.raises(TypeError):
np.busday_offset(self.q, self.q)
def test_datetime_as_string(self):
with pytest.raises(TypeError):
np.datetime_as_string(self.q)
def test_is_busday(self):
with pytest.raises(TypeError):
np.is_busday(self.q)
# These functions always worked; ensure they do not regress.
# Note that they are *not* wrapped so no need to check coverage.
@pytest.mark.parametrize('function', [np.fft.fftfreq, np.fft.rfftfreq])
def test_fft_frequencies(function):
out = function(128, d=0.1*u.s)
expected = function(128, d=0.1) / u.s
assert_array_equal(out, expected)
@needs_array_function
class TestFFT(InvariantUnitTestSetup):
# These are all trivial, just preserve the unit.
def setup(self):
# Use real input; gets turned into complex as needed.
self.q = np.arange(128.).reshape(8, -1) * u.s
def test_fft(self):
self.check(np.fft.fft)
def test_ifft(self):
self.check(np.fft.ifft)
def test_rfft(self):
self.check(np.fft.rfft)
def test_irfft(self):
self.check(np.fft.irfft)
def test_fft2(self):
self.check(np.fft.fft2)
def test_ifft2(self):
self.check(np.fft.ifft2)
def test_rfft2(self):
self.check(np.fft.rfft2)
def test_irfft2(self):
self.check(np.fft.irfft2)
def test_fftn(self):
self.check(np.fft.fftn)
def test_ifftn(self):
self.check(np.fft.ifftn)
def test_rfftn(self):
self.check(np.fft.rfftn)
def test_irfftn(self):
self.check(np.fft.irfftn)
def test_hfft(self):
self.check(np.fft.hfft)
def test_ihfft(self):
self.check(np.fft.ihfft)
def test_fftshift(self):
self.check(np.fft.fftshift)
def test_ifftshift(self):
self.check(np.fft.ifftshift)
class TestLinAlg(metaclass=CoverageMeta):
def setup(self):
# Use a matrix safe for inversion, etc.
self.q = np.array([[1., -1., 2.],
[0., 3., -1.],
[-1., -1., 1.]]) << u.m
def test_cond(self):
c = np.linalg.cond(self.q)
expected = np.linalg.cond(self.q.value)
assert c == expected
def test_matrix_rank(self):
r = np.linalg.matrix_rank(self.q)
x = np.linalg.matrix_rank(self.q.value)
assert r == x
@needs_array_function
def test_matrix_rank_with_tol(self):
# Use a matrix that is not so good, so tol=1 and tol=0.01 differ.
q = np.arange(9.).reshape(3, 3) / 4 * u.m
tol = 1. * u.cm
r2 = np.linalg.matrix_rank(q, tol)
x2 = np.linalg.matrix_rank(q.value, tol.to_value(q.unit))
assert r2 == x2
def test_matrix_power(self):
q1 = np.linalg.matrix_power(self.q, 1)
assert_array_equal(q1, self.q)
q2 = np.linalg.matrix_power(self.q, 2)
assert_array_equal(q2, self.q @ self.q)
q2 = np.linalg.matrix_power(self.q, 4)
assert_array_equal(q2, self.q @ self.q @ self.q @ self.q)
@needs_array_function
def test_matrix_inv_power(self):
qinv = np.linalg.inv(self.q.value) / self.q.unit
qm1 = np.linalg.matrix_power(self.q, -1)
assert_array_equal(qm1, qinv)
qm3 = np.linalg.matrix_power(self.q, -3)
assert_array_equal(qm3, qinv @ qinv @ qinv)
@needs_array_function
def test_multi_dot(self):
q2 = np.linalg.multi_dot([self.q, self.q])
q2x = self.q @ self.q
assert_array_equal(q2, q2x)
q3 = np.linalg.multi_dot([self.q, self.q, self.q])
q3x = self.q @ self.q @ self.q
assert_array_equal(q3, q3x)
@needs_array_function
def test_svd(self):
m = np.arange(10.) * np.arange(5.)[:, np.newaxis] * u.m
svd_u, svd_s, svd_vt = np.linalg.svd(m, full_matrices=False)
svd_ux, svd_sx, svd_vtx = np.linalg.svd(m.value, full_matrices=False)
svd_sx <<= m.unit
assert_array_equal(svd_u, svd_ux)
assert_array_equal(svd_vt, svd_vtx)
assert_array_equal(svd_s, svd_sx)
assert u.allclose(svd_u @ np.diag(svd_s) @ svd_vt, m)
s2 = np.linalg.svd(m, compute_uv=False)
svd_s2x = np.linalg.svd(m.value, compute_uv=False) << m.unit
assert_array_equal(s2, svd_s2x)
@needs_array_function
def test_inv(self):
inv = np.linalg.inv(self.q)
expected = np.linalg.inv(self.q.value) / self.q.unit
assert_array_equal(inv, expected)
@needs_array_function
def test_pinv(self):
pinv = np.linalg.pinv(self.q)
expected = np.linalg.pinv(self.q.value) / self.q.unit
assert_array_equal(pinv, expected)
rcond = 0.01 * u.cm
pinv2 = np.linalg.pinv(self.q, rcond)
expected2 = np.linalg.pinv(self.q.value,
rcond.to_value(self.q.unit)) / self.q.unit
assert_array_equal(pinv2, expected2)
@needs_array_function
def test_tensorinv(self):
inv = np.linalg.tensorinv(self.q, ind=1)
expected = np.linalg.tensorinv(self.q.value, ind=1) / self.q.unit
assert_array_equal(inv, expected)
@needs_array_function
def test_det(self):
det = np.linalg.det(self.q)
expected = np.linalg.det(self.q.value)
expected <<= self.q.unit ** self.q.shape[-1]
assert_array_equal(det, expected)
with pytest.raises(np.linalg.LinAlgError):
np.linalg.det(self.q[0]) # Not 2-D
with pytest.raises(np.linalg.LinAlgError):
np.linalg.det(self.q[:-1]) # Not square.
@needs_array_function
def test_slogdet(self):
# TODO: Could be supported if we had a natural logarithm unit.
with pytest.raises(TypeError):
logdet = np.linalg.slogdet(self.q)
assert hasattr(logdet, 'unit')
@needs_array_function
def test_solve(self):
b = np.array([1., 2., 4.]) * u.m / u.s
x = np.linalg.solve(self.q, b)
xx = np.linalg.solve(self.q.value, b.value)
xx <<= b.unit / self.q.unit
assert_array_equal(x, xx)
assert u.allclose(self.q @ x, b)
@needs_array_function
def test_tensorsolve(self):
b = np.array([1., 2., 4.]) * u.m / u.s
x = np.linalg.tensorsolve(self.q, b)
xx = np.linalg.tensorsolve(self.q.value, b.value)
xx <<= b.unit / self.q.unit
assert_array_equal(x, xx)
assert u.allclose(self.q @ x, b)
@needs_array_function
def test_lstsq(self):
b = np.array([1., 2., 4.]) * u.m / u.s
x, residuals, rank, s = np.linalg.lstsq(self.q, b, rcond=None)
xx, residualsx, rankx, sx = np.linalg.lstsq(self.q.value, b.value,
rcond=None)
xx <<= b.unit / self.q.unit
residualsx <<= b.unit ** 2
sx <<= self.q.unit
assert_array_equal(x, xx)
assert_array_equal(residuals, residualsx)
assert_array_equal(s, sx)
assert rank == rankx
assert u.allclose(self.q @ x, b)
# Also do one where we can check the answer...
m = np.eye(3)
b = np.arange(3) * u.m
x, residuals, rank, s = np.linalg.lstsq(m, b, rcond=1.*u.percent)
assert_array_equal(x, b)
assert np.all(residuals == 0 * u.m**2)
assert rank == 3
assert_array_equal(s, np.array([1., 1., 1.]) << u.one)
with pytest.raises(u.UnitsError):
np.linalg.lstsq(m, b, rcond=1.*u.s)
@needs_array_function
def test_norm(self):
n = np.linalg.norm(self.q)
expected = np.linalg.norm(self.q.value) << self.q.unit
assert_array_equal(n, expected)
# Special case: 1-D, ord=0.
n1 = np.linalg.norm(self.q[0], ord=0)
expected1 = np.linalg.norm(self.q[0].value, ord=0) << u.one
assert_array_equal(n1, expected1)
@needs_array_function
def test_cholesky(self):
# Numbers from np.linalg.cholesky docstring.
q = np.array([[1, -2j], [2j, 5]]) * u.m
cd = np.linalg.cholesky(q)
cdx = np.linalg.cholesky(q.value) << q.unit ** 0.5
assert_array_equal(cd, cdx)
assert u.allclose(cd @ cd.T.conj(), q)
@needs_array_function
def test_qr(self):
# This is not exhaustive...
a = np.array([[1, -2j], [2j, 5]]) * u.m
q, r = np.linalg.qr(a)
qx, rx = np.linalg.qr(a.value)
qx <<= u.one
rx <<= a.unit
assert_array_equal(q, qx)
assert_array_equal(r, rx)
assert u.allclose(q @ r, a)
@needs_array_function
def test_eig(self):
w, v = np.linalg.eig(self.q)
wx, vx = np.linalg.eig(self.q.value)
wx <<= self.q.unit
vx <<= u.one
assert_array_equal(w, wx)
assert_array_equal(v, vx)
# Comprehensible example
q = np.diag((1, 2, 3) * u.m)
w, v = np.linalg.eig(q)
assert_array_equal(w, np.arange(1, 4) * u.m)
assert_array_equal(v, np.eye(3))
@needs_array_function
def test_eigvals(self):
w = np.linalg.eigvals(self.q)
wx = np.linalg.eigvals(self.q.value) << self.q.unit
assert_array_equal(w, wx)
# Comprehensible example
q = np.diag((1, 2, 3) * u.m)
w = np.linalg.eigvals(q)
assert_array_equal(w, np.arange(1, 4) * u.m)
@needs_array_function
def test_eigh(self):
w, v = np.linalg.eigh(self.q)
wx, vx = np.linalg.eigh(self.q.value)
wx <<= self.q.unit
vx <<= u.one
assert_array_equal(w, wx)
assert_array_equal(v, vx)
@needs_array_function
def test_eigvalsh(self):
w = np.linalg.eigvalsh(self.q)
wx = np.linalg.eigvalsh(self.q.value) << self.q.unit
assert_array_equal(w, wx)
class TestRecFunctions(metaclass=CoverageMeta):
def test_structured_to_unstructured(self):
# can't unstructure something with incompatible units
with pytest.raises(u.UnitConversionError, match="'m'"):
rfn.structured_to_unstructured(u.Quantity((0, 0.6), u.Unit("(eV, m)")))
# it works if all the units are equal
struct = u.Quantity((0, 0, 0.6), u.Unit("(eV, eV, eV)"))
unstruct = rfn.structured_to_unstructured(struct)
assert_array_equal(unstruct, [0, 0, 0.6] * u.eV)
# also if the units are convertible
struct = u.Quantity((0, 0, 0.6), u.Unit("(eV, eV, keV)"))
unstruct = rfn.structured_to_unstructured(struct)
assert_array_equal(unstruct, [0, 0, 600] * u.eV)
struct = u.Quantity((0, 0, 1.7827e-33), u.Unit("(eV, eV, g)"))
with u.add_enabled_equivalencies(u.mass_energy()):
unstruct = rfn.structured_to_unstructured(struct)
u.allclose(unstruct, [0, 0, 1.0000214] * u.eV)
# and if the dtype is nested
struct = [(5, (400.0, 3e6))] * u.Unit('m, (cm, um)')
unstruct = rfn.structured_to_unstructured(struct)
assert_array_equal(unstruct, [[5, 4, 3]] * u.m)
# For the other tests of ``structured_to_unstructured``, see
# ``test_structured.TestStructuredQuantityFunctions.test_structured_to_unstructured``
def test_unstructured_to_structured(self):
unstruct = [1, 2, 3] * u.m
dtype=np.dtype([("f1", float), ("f2", float), ("f3", float)])
# it works
struct = rfn.unstructured_to_structured(unstruct, dtype=dtype)
assert struct.unit == u.Unit("(m, m, m)")
assert_array_equal(rfn.structured_to_unstructured(struct), unstruct)
# can't structure something that's already structured
with pytest.raises(ValueError, match="arr must have at least one dimension"):
rfn.unstructured_to_structured(struct, dtype=dtype)
# For the other tests of ``structured_to_unstructured``, see
# ``test_structured.TestStructuredQuantityFunctions.test_unstructured_to_structured``
untested_functions = set()
if NUMPY_LT_1_20:
financial_functions = {f for f in all_wrapped_functions.values()
if f in np.lib.financial.__dict__.values()}
untested_functions |= financial_functions
if NUMPY_LT_1_23:
deprecated_functions = {
# Deprecated, removed in numpy 1.23
np.asscalar, np.alen,
}
else:
deprecated_functions = set()
untested_functions |= deprecated_functions
io_functions = {np.save, np.savez, np.savetxt, np.savez_compressed}
untested_functions |= io_functions
poly_functions = {
np.poly, np.polyadd, np.polyder, np.polydiv, np.polyfit, np.polyint,
np.polymul, np.polysub, np.polyval, np.roots, np.vander
}
untested_functions |= poly_functions
rec_functions = {
rfn.rec_append_fields, rfn.rec_drop_fields, rfn.rec_join,
rfn.drop_fields, rfn.rename_fields, rfn.append_fields, rfn.join_by,
rfn.repack_fields, rfn.apply_along_fields, rfn.assign_fields_by_name,
rfn.merge_arrays, rfn.stack_arrays, rfn.find_duplicates,
rfn.recursive_fill_fields, rfn.require_fields,
}
untested_functions |= rec_functions
@needs_array_function
def test_testing_completeness():
assert not CoverageMeta.covered.intersection(untested_functions)
assert all_wrapped == (CoverageMeta.covered | untested_functions)
class TestFunctionHelpersCompleteness:
@pytest.mark.parametrize('one, two', itertools.combinations(
(SUBCLASS_SAFE_FUNCTIONS,
UNSUPPORTED_FUNCTIONS,
set(FUNCTION_HELPERS.keys()),
set(DISPATCHED_FUNCTIONS.keys())), 2))
def test_no_duplicates(self, one, two):
assert not one.intersection(two)
@needs_array_function
def test_all_included(self):
included_in_helpers = (SUBCLASS_SAFE_FUNCTIONS |
UNSUPPORTED_FUNCTIONS |
set(FUNCTION_HELPERS.keys()) |
set(DISPATCHED_FUNCTIONS.keys()))
assert all_wrapped == included_in_helpers
# untested_function is created using all_wrapped_functions
@needs_array_function
def test_ignored_are_untested(self):
assert IGNORED_FUNCTIONS | TBD_FUNCTIONS == untested_functions
|
dd537ad3478e3f3c34a87b4984ec25746e8ab68b03cedb39992d8314d6442fbd | # The purpose of these tests are to ensure that calling ufuncs with quantities
# returns quantities with the right units, or raises exceptions.
import concurrent.futures
import warnings
from collections import namedtuple
import numpy as np
import pytest
from erfa import ufunc as erfa_ufunc
from numpy.testing import assert_allclose
from astropy import units as u
from astropy.units import quantity_helper as qh
from astropy.units.quantity_helper.converters import UfuncHelpers
from astropy.units.quantity_helper.helpers import helper_sqrt
from astropy.utils.compat.optional_deps import HAS_SCIPY # noqa
testcase = namedtuple('testcase', ['f', 'q_in', 'q_out'])
testexc = namedtuple('testexc', ['f', 'q_in', 'exc', 'msg'])
testwarn = namedtuple('testwarn', ['f', 'q_in', 'wfilter'])
@pytest.mark.skip
def test_testcase(tc):
results = tc.f(*tc.q_in)
# careful of the following line, would break on a function returning
# a single tuple (as opposed to tuple of return values)
results = (results, ) if type(results) != tuple else results
for result, expected in zip(results, tc.q_out):
assert result.unit == expected.unit
assert_allclose(result.value, expected.value, atol=1.E-15)
@pytest.mark.skip
def test_testexc(te):
with pytest.raises(te.exc) as exc:
te.f(*te.q_in)
if te.msg is not None:
assert te.msg in exc.value.args[0]
@pytest.mark.skip
def test_testwarn(tw):
with warnings.catch_warnings():
warnings.filterwarnings(tw.wfilter)
tw.f(*tw.q_in)
class TestUfuncHelpers:
# Note that this test should work even if scipy is present, since
# the scipy.special ufuncs are only loaded on demand.
# The test passes independently of whether erfa is already loaded
# (which will be the case for a full test, since coordinates uses it).
def test_coverage(self):
"""Test that we cover all ufunc's"""
all_np_ufuncs = {ufunc for ufunc in np.core.umath.__dict__.values()
if isinstance(ufunc, np.ufunc)}
all_q_ufuncs = (qh.UNSUPPORTED_UFUNCS |
set(qh.UFUNC_HELPERS.keys()))
# Check that every numpy ufunc is covered.
assert all_np_ufuncs - all_q_ufuncs == set()
# Check that all ufuncs we cover come from numpy or erfa.
# (Since coverage for erfa is incomplete, we do not check
# this the other way).
all_erfa_ufuncs = {ufunc for ufunc in erfa_ufunc.__dict__.values()
if isinstance(ufunc, np.ufunc)}
assert (all_q_ufuncs - all_np_ufuncs - all_erfa_ufuncs == set())
def test_scipy_registered(self):
# Should be registered as existing even if scipy is not available.
assert 'scipy.special' in qh.UFUNC_HELPERS.modules
def test_removal_addition(self):
assert np.add in qh.UFUNC_HELPERS
assert np.add not in qh.UNSUPPORTED_UFUNCS
qh.UFUNC_HELPERS[np.add] = None
assert np.add not in qh.UFUNC_HELPERS
assert np.add in qh.UNSUPPORTED_UFUNCS
qh.UFUNC_HELPERS[np.add] = qh.UFUNC_HELPERS[np.subtract]
assert np.add in qh.UFUNC_HELPERS
assert np.add not in qh.UNSUPPORTED_UFUNCS
@pytest.mark.slow
def test_thread_safety(self, fast_thread_switching):
def dummy_ufunc(*args, **kwargs):
return np.sqrt(*args, **kwargs)
def register():
return {dummy_ufunc: helper_sqrt}
workers = 8
with concurrent.futures.ThreadPoolExecutor(max_workers=workers) as executor:
for p in range(10000):
helpers = UfuncHelpers()
helpers.register_module(
'astropy.units.tests.test_quantity_ufuncs',
['dummy_ufunc'],
register
)
futures = [executor.submit(lambda: helpers[dummy_ufunc]) for i in range(workers)]
values = [future.result() for future in futures]
assert values == [helper_sqrt] * workers
class TestQuantityTrigonometricFuncs:
"""
Test trigonometric functions
"""
@pytest.mark.parametrize('tc', (
testcase(
f=np.sin,
q_in=(30. * u.degree, ),
q_out=(0.5*u.dimensionless_unscaled, )
),
testcase(
f=np.sin,
q_in=(np.array([0., np.pi / 4., np.pi / 2.]) * u.radian, ),
q_out=(np.array([0., 1. / np.sqrt(2.), 1.]) * u.one, )
),
testcase(
f=np.arcsin,
q_in=(np.sin(30. * u.degree), ),
q_out=(np.radians(30.) * u.radian, )
),
testcase(
f=np.arcsin,
q_in=(np.sin(np.array([0., np.pi / 4., np.pi / 2.]) * u.radian), ),
q_out=(np.array([0., np.pi / 4., np.pi / 2.]) * u.radian, )
),
testcase(
f=np.cos,
q_in=(np.pi / 3. * u.radian, ),
q_out=(0.5 * u.dimensionless_unscaled, )
),
testcase(
f=np.cos,
q_in=(np.array([0., np.pi / 4., np.pi / 2.]) * u.radian, ),
q_out=(np.array([1., 1. / np.sqrt(2.), 0.]) * u.one, )
),
testcase(
f=np.arccos,
q_in=(np.cos(np.pi / 3. * u.radian), ),
q_out=(np.pi / 3. * u.radian, )
),
testcase(
f=np.arccos,
q_in=(np.cos(np.array([0., np.pi / 4., np.pi / 2.]) * u.radian), ),
q_out=(np.array([0., np.pi / 4., np.pi / 2.]) * u.radian, ),
),
testcase(
f=np.tan,
q_in=(np.pi / 3. * u.radian, ),
q_out=(np.sqrt(3.) * u.dimensionless_unscaled, )
),
testcase(
f=np.tan,
q_in=(np.array([0., 45., 135., 180.]) * u.degree, ),
q_out=(np.array([0., 1., -1., 0.]) * u.dimensionless_unscaled, )
),
testcase(
f=np.arctan,
q_in=(np.tan(np.pi / 3. * u.radian), ),
q_out=(np.pi / 3. * u.radian, )
),
testcase(
f=np.arctan,
q_in=(np.tan(np.array([10., 30., 70., 80.]) * u.degree), ),
q_out=(np.radians(np.array([10., 30., 70., 80.]) * u.degree), )
),
testcase(
f=np.arctan2,
q_in=(np.array([10., 30., 70., 80.]) * u.m, 2.0 * u.km),
q_out=(np.arctan2(np.array([10., 30., 70., 80.]),
2000.) * u.radian, )
),
testcase(
f=np.arctan2,
q_in=((np.array([10., 80.]) * u.m / (2.0 * u.km)).to(u.one), 1.),
q_out=(np.arctan2(np.array([10., 80.]) / 2000., 1.) * u.radian, )
),
testcase(
f=np.deg2rad,
q_in=(180. * u.degree, ),
q_out=(np.pi * u.radian, )
),
testcase(
f=np.radians,
q_in=(180. * u.degree, ),
q_out=(np.pi * u.radian, )
),
testcase(
f=np.deg2rad,
q_in=(3. * u.radian, ),
q_out=(3. * u.radian, )
),
testcase(
f=np.radians,
q_in=(3. * u.radian, ),
q_out=(3. * u.radian, )
),
testcase(
f=np.rad2deg,
q_in=(60. * u.degree, ),
q_out=(60. * u.degree, )
),
testcase(
f=np.degrees,
q_in=(60. * u.degree, ),
q_out=(60. * u.degree, )
),
testcase(
f=np.rad2deg,
q_in=(np.pi * u.radian, ),
q_out=(180. * u.degree, )
),
testcase(
f=np.degrees,
q_in=(np.pi * u.radian, ),
q_out=(180. * u.degree, )
)
))
def test_testcases(self, tc):
return test_testcase(tc)
@pytest.mark.parametrize('te', (
testexc(
f=np.deg2rad,
q_in=(3. * u.m, ),
exc=TypeError,
msg=None
),
testexc(
f=np.radians,
q_in=(3. * u.m, ),
exc=TypeError,
msg=None
),
testexc(
f=np.rad2deg,
q_in=(3. * u.m),
exc=TypeError,
msg=None
),
testexc(
f=np.degrees,
q_in=(3. * u.m),
exc=TypeError,
msg=None
),
testexc(
f=np.sin,
q_in=(3. * u.m, ),
exc=TypeError,
msg="Can only apply 'sin' function to quantities with angle units"
),
testexc(
f=np.arcsin,
q_in=(3. * u.m, ),
exc=TypeError,
msg="Can only apply 'arcsin' function to dimensionless quantities"
),
testexc(
f=np.cos,
q_in=(3. * u.s, ),
exc=TypeError,
msg="Can only apply 'cos' function to quantities with angle units"
),
testexc(
f=np.arccos,
q_in=(3. * u.s, ),
exc=TypeError,
msg="Can only apply 'arccos' function to dimensionless quantities"
),
testexc(
f=np.tan,
q_in=(np.array([1, 2, 3]) * u.N, ),
exc=TypeError,
msg="Can only apply 'tan' function to quantities with angle units"
),
testexc(
f=np.arctan,
q_in=(np.array([1, 2, 3]) * u.N, ),
exc=TypeError,
msg="Can only apply 'arctan' function to dimensionless quantities"
),
testexc(
f=np.arctan2,
q_in=(np.array([1, 2, 3]) * u.N, 1. * u.s),
exc=u.UnitsError,
msg="compatible dimensions"
),
testexc(
f=np.arctan2,
q_in=(np.array([1, 2, 3]) * u.N, 1.),
exc=u.UnitsError,
msg="dimensionless quantities when other arg"
)
))
def test_testexcs(self, te):
return test_testexc(te)
@pytest.mark.parametrize('tw', (
testwarn(
f=np.arcsin,
q_in=(27. * u.pc / (15 * u.kpc), ),
wfilter='error'
),
))
def test_testwarns(self, tw):
return test_testwarn(tw)
class TestQuantityMathFuncs:
"""
Test other mathematical functions
"""
def test_multiply_scalar(self):
assert np.multiply(4. * u.m, 2. / u.s) == 8. * u.m / u.s
assert np.multiply(4. * u.m, 2.) == 8. * u.m
assert np.multiply(4., 2. / u.s) == 8. / u.s
def test_multiply_array(self):
assert np.all(np.multiply(np.arange(3.) * u.m, 2. / u.s) ==
np.arange(0, 6., 2.) * u.m / u.s)
@pytest.mark.skipif(not isinstance(getattr(np, 'matmul', None), np.ufunc),
reason="np.matmul is not yet a gufunc")
def test_matmul(self):
q = np.arange(3.) * u.m
r = np.matmul(q, q)
assert r == 5. * u.m ** 2
# less trivial case.
q1 = np.eye(3) * u.m
q2 = np.array([[[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]],
[[0., 1., 0.],
[0., 0., 1.],
[1., 0., 0.]],
[[0., 0., 1.],
[1., 0., 0.],
[0., 1., 0.]]]) / u.s
r2 = np.matmul(q1, q2)
assert np.all(r2 == np.matmul(q1.value, q2.value) * q1.unit * q2.unit)
@pytest.mark.parametrize('function', (np.divide, np.true_divide))
def test_divide_scalar(self, function):
assert function(4. * u.m, 2. * u.s) == function(4., 2.) * u.m / u.s
assert function(4. * u.m, 2.) == function(4., 2.) * u.m
assert function(4., 2. * u.s) == function(4., 2.) / u.s
@pytest.mark.parametrize('function', (np.divide, np.true_divide))
def test_divide_array(self, function):
assert np.all(function(np.arange(3.) * u.m, 2. * u.s) ==
function(np.arange(3.), 2.) * u.m / u.s)
def test_floor_divide_remainder_and_divmod(self):
inch = u.Unit(0.0254 * u.m)
dividend = np.array([1., 2., 3.]) * u.m
divisor = np.array([3., 4., 5.]) * inch
quotient = dividend // divisor
remainder = dividend % divisor
assert_allclose(quotient.value, [13., 19., 23.])
assert quotient.unit == u.dimensionless_unscaled
assert_allclose(remainder.value, [0.0094, 0.0696, 0.079])
assert remainder.unit == dividend.unit
quotient2 = np.floor_divide(dividend, divisor)
remainder2 = np.remainder(dividend, divisor)
assert np.all(quotient2 == quotient)
assert np.all(remainder2 == remainder)
quotient3, remainder3 = divmod(dividend, divisor)
assert np.all(quotient3 == quotient)
assert np.all(remainder3 == remainder)
with pytest.raises(TypeError):
divmod(dividend, u.km)
with pytest.raises(TypeError):
dividend // u.km
with pytest.raises(TypeError):
dividend % u.km
quotient4, remainder4 = np.divmod(dividend, divisor)
assert np.all(quotient4 == quotient)
assert np.all(remainder4 == remainder)
with pytest.raises(TypeError):
np.divmod(dividend, u.km)
def test_sqrt_scalar(self):
assert np.sqrt(4. * u.m) == 2. * u.m ** 0.5
def test_sqrt_array(self):
assert np.all(np.sqrt(np.array([1., 4., 9.]) * u.m)
== np.array([1., 2., 3.]) * u.m ** 0.5)
def test_square_scalar(self):
assert np.square(4. * u.m) == 16. * u.m ** 2
def test_square_array(self):
assert np.all(np.square(np.array([1., 2., 3.]) * u.m)
== np.array([1., 4., 9.]) * u.m ** 2)
def test_reciprocal_scalar(self):
assert np.reciprocal(4. * u.m) == 0.25 / u.m
def test_reciprocal_array(self):
assert np.all(np.reciprocal(np.array([1., 2., 4.]) * u.m)
== np.array([1., 0.5, 0.25]) / u.m)
def test_heaviside_scalar(self):
assert np.heaviside(0. * u.m, 0.5) == 0.5 * u.dimensionless_unscaled
assert np.heaviside(0. * u.s,
25 * u.percent) == 0.25 * u.dimensionless_unscaled
assert np.heaviside(2. * u.J, 0.25) == 1. * u.dimensionless_unscaled
def test_heaviside_array(self):
values = np.array([-1., 0., 0., +1.])
halfway = np.array([0.75, 0.25, 0.75, 0.25]) * u.dimensionless_unscaled
assert np.all(np.heaviside(values * u.m,
halfway * u.dimensionless_unscaled) ==
[0, 0.25, 0.75, +1.] * u.dimensionless_unscaled)
@pytest.mark.parametrize('function', (np.cbrt, ))
def test_cbrt_scalar(self, function):
assert function(8. * u.m**3) == 2. * u.m
@pytest.mark.parametrize('function', (np.cbrt, ))
def test_cbrt_array(self, function):
# Calculate cbrt on both sides since on Windows the cube root of 64
# does not exactly equal 4. See 4388.
values = np.array([1., 8., 64.])
assert np.all(function(values * u.m**3) ==
function(values) * u.m)
def test_power_scalar(self):
assert np.power(4. * u.m, 2.) == 16. * u.m ** 2
assert np.power(4., 200. * u.cm / u.m) == \
u.Quantity(16., u.dimensionless_unscaled)
# regression check on #1696
assert np.power(4. * u.m, 0.) == 1. * u.dimensionless_unscaled
def test_power_array(self):
assert np.all(np.power(np.array([1., 2., 3.]) * u.m, 3.)
== np.array([1., 8., 27.]) * u.m ** 3)
# regression check on #1696
assert np.all(np.power(np.arange(4.) * u.m, 0.) ==
1. * u.dimensionless_unscaled)
def test_float_power_array(self):
assert np.all(np.float_power(np.array([1., 2., 3.]) * u.m, 3.)
== np.array([1., 8., 27.]) * u.m ** 3)
# regression check on #1696
assert np.all(np.float_power(np.arange(4.) * u.m, 0.) ==
1. * u.dimensionless_unscaled)
def test_power_array_array(self):
with pytest.raises(ValueError):
np.power(4. * u.m, [2., 4.])
def test_power_array_array2(self):
with pytest.raises(ValueError):
np.power([2., 4.] * u.m, [2., 4.])
def test_power_array_array3(self):
# Identical unit fractions are converted automatically to dimensionless
# and should be allowed as base for np.power: #4764
q = [2., 4.] * u.m / u.m
powers = [2., 4.]
res = np.power(q, powers)
assert np.all(res.value == q.value ** powers)
assert res.unit == u.dimensionless_unscaled
# The same holds for unit fractions that are scaled dimensionless.
q2 = [2., 4.] * u.m / u.cm
# Test also against different types of exponent
for cls in (list, tuple, np.array, np.ma.array, u.Quantity):
res2 = np.power(q2, cls(powers))
assert np.all(res2.value == q2.to_value(1) ** powers)
assert res2.unit == u.dimensionless_unscaled
# Though for single powers, we keep the composite unit.
res3 = q2 ** 2
assert np.all(res3.value == q2.value ** 2)
assert res3.unit == q2.unit ** 2
assert np.all(res3 == q2 ** [2, 2])
def test_power_invalid(self):
with pytest.raises(TypeError) as exc:
np.power(3., 4. * u.m)
assert "raise something to a dimensionless" in exc.value.args[0]
def test_copysign_scalar(self):
assert np.copysign(3 * u.m, 1.) == 3. * u.m
assert np.copysign(3 * u.m, 1. * u.s) == 3. * u.m
assert np.copysign(3 * u.m, -1.) == -3. * u.m
assert np.copysign(3 * u.m, -1. * u.s) == -3. * u.m
def test_copysign_array(self):
assert np.all(np.copysign(np.array([1., 2., 3.]) * u.s, -1.) ==
-np.array([1., 2., 3.]) * u.s)
assert np.all(np.copysign(np.array([1., 2., 3.]) * u.s, -1. * u.m) ==
-np.array([1., 2., 3.]) * u.s)
assert np.all(np.copysign(np.array([1., 2., 3.]) * u.s,
np.array([-2., 2., -4.]) * u.m) ==
np.array([-1., 2., -3.]) * u.s)
q = np.copysign(np.array([1., 2., 3.]), -3 * u.m)
assert np.all(q == np.array([-1., -2., -3.]))
assert not isinstance(q, u.Quantity)
def test_ldexp_scalar(self):
assert np.ldexp(4. * u.m, 2) == 16. * u.m
def test_ldexp_array(self):
assert np.all(np.ldexp(np.array([1., 2., 3.]) * u.m, [3, 2, 1])
== np.array([8., 8., 6.]) * u.m)
def test_ldexp_invalid(self):
with pytest.raises(TypeError):
np.ldexp(3. * u.m, 4.)
with pytest.raises(TypeError):
np.ldexp(3., u.Quantity(4, u.m, dtype=int))
@pytest.mark.parametrize('function', (np.exp, np.expm1, np.exp2,
np.log, np.log2, np.log10, np.log1p))
def test_exp_scalar(self, function):
q = function(3. * u.m / (6. * u.m))
assert q.unit == u.dimensionless_unscaled
assert q.value == function(0.5)
@pytest.mark.parametrize('function', (np.exp, np.expm1, np.exp2,
np.log, np.log2, np.log10, np.log1p))
def test_exp_array(self, function):
q = function(np.array([2., 3., 6.]) * u.m / (6. * u.m))
assert q.unit == u.dimensionless_unscaled
assert np.all(q.value
== function(np.array([1. / 3., 1. / 2., 1.])))
# should also work on quantities that can be made dimensionless
q2 = function(np.array([2., 3., 6.]) * u.m / (6. * u.cm))
assert q2.unit == u.dimensionless_unscaled
assert_allclose(q2.value,
function(np.array([100. / 3., 100. / 2., 100.])))
@pytest.mark.parametrize('function', (np.exp, np.expm1, np.exp2,
np.log, np.log2, np.log10, np.log1p))
def test_exp_invalid_units(self, function):
# Can't use exp() with non-dimensionless quantities
with pytest.raises(TypeError) as exc:
function(3. * u.m / u.s)
assert exc.value.args[0] == ("Can only apply '{}' function to "
"dimensionless quantities"
.format(function.__name__))
def test_modf_scalar(self):
q = np.modf(9. * u.m / (600. * u.cm))
assert q == (0.5 * u.dimensionless_unscaled,
1. * u.dimensionless_unscaled)
def test_modf_array(self):
v = np.arange(10.) * u.m / (500. * u.cm)
q = np.modf(v)
n = np.modf(v.to_value(u.dimensionless_unscaled))
assert q[0].unit == u.dimensionless_unscaled
assert q[1].unit == u.dimensionless_unscaled
assert all(q[0].value == n[0])
assert all(q[1].value == n[1])
def test_frexp_scalar(self):
q = np.frexp(3. * u.m / (6. * u.m))
assert q == (np.array(0.5), np.array(0.0))
def test_frexp_array(self):
q = np.frexp(np.array([2., 3., 6.]) * u.m / (6. * u.m))
assert all((_q0, _q1) == np.frexp(_d) for _q0, _q1, _d
in zip(q[0], q[1], [1. / 3., 1. / 2., 1.]))
def test_frexp_invalid_units(self):
# Can't use prod() with non-dimensionless quantities
with pytest.raises(TypeError) as exc:
np.frexp(3. * u.m / u.s)
assert exc.value.args[0] == ("Can only apply 'frexp' function to "
"unscaled dimensionless quantities")
# also does not work on quantities that can be made dimensionless
with pytest.raises(TypeError) as exc:
np.frexp(np.array([2., 3., 6.]) * u.m / (6. * u.cm))
assert exc.value.args[0] == ("Can only apply 'frexp' function to "
"unscaled dimensionless quantities")
@pytest.mark.parametrize('function', (np.logaddexp, np.logaddexp2))
def test_dimensionless_twoarg_array(self, function):
q = function(np.array([2., 3., 6.]) * u.m / (6. * u.cm), 1.)
assert q.unit == u.dimensionless_unscaled
assert_allclose(q.value,
function(np.array([100. / 3., 100. / 2., 100.]), 1.))
@pytest.mark.parametrize('function', (np.logaddexp, np.logaddexp2))
def test_dimensionless_twoarg_invalid_units(self, function):
with pytest.raises(TypeError) as exc:
function(1. * u.km / u.s, 3. * u.m / u.s)
assert exc.value.args[0] == ("Can only apply '{}' function to "
"dimensionless quantities"
.format(function.__name__))
class TestInvariantUfuncs:
@pytest.mark.parametrize(('ufunc'), [np.absolute, np.fabs,
np.conj, np.conjugate,
np.negative, np.spacing, np.rint,
np.floor, np.ceil, np.positive])
def test_invariant_scalar(self, ufunc):
q_i = 4.7 * u.m
q_o = ufunc(q_i)
assert isinstance(q_o, u.Quantity)
assert q_o.unit == q_i.unit
assert q_o.value == ufunc(q_i.value)
@pytest.mark.parametrize(('ufunc'), [np.absolute, np.conjugate,
np.negative, np.rint,
np.floor, np.ceil])
def test_invariant_array(self, ufunc):
q_i = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s
q_o = ufunc(q_i)
assert isinstance(q_o, u.Quantity)
assert q_o.unit == q_i.unit
assert np.all(q_o.value == ufunc(q_i.value))
@pytest.mark.parametrize(('ufunc'), [np.add, np.subtract, np.hypot,
np.maximum, np.minimum, np.nextafter,
np.remainder, np.mod, np.fmod])
def test_invariant_twoarg_scalar(self, ufunc):
q_i1 = 4.7 * u.m
q_i2 = 9.4 * u.km
q_o = ufunc(q_i1, q_i2)
assert isinstance(q_o, u.Quantity)
assert q_o.unit == q_i1.unit
assert_allclose(q_o.value, ufunc(q_i1.value, q_i2.to_value(q_i1.unit)))
@pytest.mark.parametrize(('ufunc'), [np.add, np.subtract, np.hypot,
np.maximum, np.minimum, np.nextafter,
np.remainder, np.mod, np.fmod])
def test_invariant_twoarg_array(self, ufunc):
q_i1 = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s
q_i2 = np.array([10., -5., 1.e6]) * u.g / u.us
q_o = ufunc(q_i1, q_i2)
assert isinstance(q_o, u.Quantity)
assert q_o.unit == q_i1.unit
assert_allclose(q_o.value, ufunc(q_i1.value, q_i2.to_value(q_i1.unit)))
@pytest.mark.parametrize(('ufunc', 'arbitrary'), [
(np.add, 0.), (np.subtract, 0.), (np.hypot, 0.),
(np.maximum, 0.), (np.minimum, 0.), (np.nextafter, 0.),
(np.remainder, np.inf), (np.mod, np.inf), (np.fmod, np.inf)])
def test_invariant_twoarg_one_arbitrary(self, ufunc, arbitrary):
q_i1 = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s
q_o = ufunc(q_i1, arbitrary)
assert isinstance(q_o, u.Quantity)
assert q_o.unit == q_i1.unit
assert_allclose(q_o.value, ufunc(q_i1.value, arbitrary))
@pytest.mark.parametrize(('ufunc'), [np.add, np.subtract, np.hypot,
np.maximum, np.minimum, np.nextafter,
np.remainder, np.mod, np.fmod])
def test_invariant_twoarg_invalid_units(self, ufunc):
q_i1 = 4.7 * u.m
q_i2 = 9.4 * u.s
with pytest.raises(u.UnitsError) as exc:
ufunc(q_i1, q_i2)
assert "compatible dimensions" in exc.value.args[0]
class TestComparisonUfuncs:
@pytest.mark.parametrize(('ufunc'), [np.greater, np.greater_equal,
np.less, np.less_equal,
np.not_equal, np.equal])
def test_comparison_valid_units(self, ufunc):
q_i1 = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s
q_i2 = np.array([10., -5., 1.e6]) * u.g / u.Ms
q_o = ufunc(q_i1, q_i2)
assert not isinstance(q_o, u.Quantity)
assert q_o.dtype == bool
assert np.all(q_o == ufunc(q_i1.value, q_i2.to_value(q_i1.unit)))
q_o2 = ufunc(q_i1 / q_i2, 2.)
assert not isinstance(q_o2, u.Quantity)
assert q_o2.dtype == bool
assert np.all(q_o2 == ufunc((q_i1 / q_i2)
.to_value(u.dimensionless_unscaled), 2.))
# comparison with 0., inf, nan is OK even for dimensional quantities
# (though ignore numpy runtime warnings for comparisons with nan).
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=RuntimeWarning)
for arbitrary_unit_value in (0., np.inf, np.nan):
ufunc(q_i1, arbitrary_unit_value)
ufunc(q_i1, arbitrary_unit_value*np.ones(len(q_i1)))
# and just for completeness
ufunc(q_i1, np.array([0., np.inf, np.nan]))
@pytest.mark.parametrize(('ufunc'), [np.greater, np.greater_equal,
np.less, np.less_equal,
np.not_equal, np.equal])
def test_comparison_invalid_units(self, ufunc):
q_i1 = 4.7 * u.m
q_i2 = 9.4 * u.s
with pytest.raises(u.UnitsError) as exc:
ufunc(q_i1, q_i2)
assert "compatible dimensions" in exc.value.args[0]
@pytest.mark.parametrize('ufunc', (np.isfinite, np.isinf, np.isnan,
np.signbit))
def test_onearg_test_ufuncs(self, ufunc):
q = [1., np.inf, -np.inf, np.nan, -1., 0.] * u.m
out = ufunc(q)
assert not isinstance(out, u.Quantity)
assert out.dtype == bool
assert np.all(out == ufunc(q.value))
# Ignore RuntimeWarning raised on Windows and s390.
@pytest.mark.filterwarnings('ignore:.*invalid value encountered in sign')
def test_sign(self):
q = [1., np.inf, -np.inf, np.nan, -1., 0.] * u.m
out = np.sign(q)
assert not isinstance(out, u.Quantity)
assert out.dtype == q.dtype
assert np.all((out == np.sign(q.value)) |
(np.isnan(out) & np.isnan(q.value)))
class TestInplaceUfuncs:
@pytest.mark.parametrize(('value'), [1., np.arange(10.)])
def test_one_argument_ufunc_inplace(self, value):
# without scaling
s = value * u.rad
check = s
np.sin(s, out=s)
assert check is s
assert check.unit == u.dimensionless_unscaled
# with scaling
s2 = (value * u.rad).to(u.deg)
check2 = s2
np.sin(s2, out=s2)
assert check2 is s2
assert check2.unit == u.dimensionless_unscaled
assert_allclose(s.value, s2.value)
@pytest.mark.parametrize(('value'), [1., np.arange(10.)])
def test_one_argument_ufunc_inplace_2(self, value):
"""Check inplace works with non-quantity input and quantity output"""
s = value * u.m
check = s
np.absolute(value, out=s)
assert check is s
assert np.all(check.value == np.absolute(value))
assert check.unit is u.dimensionless_unscaled
np.sqrt(value, out=s)
assert check is s
assert np.all(check.value == np.sqrt(value))
assert check.unit is u.dimensionless_unscaled
np.exp(value, out=s)
assert check is s
assert np.all(check.value == np.exp(value))
assert check.unit is u.dimensionless_unscaled
np.arcsin(value/10., out=s)
assert check is s
assert np.all(check.value == np.arcsin(value/10.))
assert check.unit is u.radian
@pytest.mark.parametrize(('value'), [1., np.arange(10.)])
def test_one_argument_two_output_ufunc_inplace(self, value):
v = 100. * value * u.cm / u.m
v_copy = v.copy()
tmp = v.copy()
check = v
np.modf(v, tmp, v)
assert check is v
assert check.unit == u.dimensionless_unscaled
v2 = v_copy.to(u.dimensionless_unscaled)
check2 = v2
np.modf(v2, tmp, v2)
assert check2 is v2
assert check2.unit == u.dimensionless_unscaled
# can also replace in last position if no scaling is needed
v3 = v_copy.to(u.dimensionless_unscaled)
check3 = v3
np.modf(v3, v3, tmp)
assert check3 is v3
assert check3.unit == u.dimensionless_unscaled
# can also replace input with first output when scaling
v4 = v_copy.copy()
check4 = v4
np.modf(v4, v4, tmp)
assert check4 is v4
assert check4.unit == u.dimensionless_unscaled
@pytest.mark.parametrize(('value'), [1., np.arange(10.)])
def test_two_argument_ufunc_inplace_1(self, value):
s = value * u.cycle
check = s
s /= 2.
assert check is s
assert np.all(check.value == value / 2.)
s /= u.s
assert check is s
assert check.unit == u.cycle / u.s
s *= 2. * u.s
assert check is s
assert np.all(check == value * u.cycle)
@pytest.mark.parametrize(('value'), [1., np.arange(10.)])
def test_two_argument_ufunc_inplace_2(self, value):
s = value * u.cycle
check = s
np.arctan2(s, s, out=s)
assert check is s
assert check.unit == u.radian
with pytest.raises(u.UnitsError):
s += 1. * u.m
assert check is s
assert check.unit == u.radian
np.arctan2(1. * u.deg, s, out=s)
assert check is s
assert check.unit == u.radian
np.add(1. * u.deg, s, out=s)
assert check is s
assert check.unit == u.deg
np.multiply(2. / u.s, s, out=s)
assert check is s
assert check.unit == u.deg / u.s
def test_two_argument_ufunc_inplace_3(self):
s = np.array([1., 2., 3.]) * u.dimensionless_unscaled
np.add(np.array([1., 2., 3.]), np.array([1., 2., 3.]) * 2., out=s)
assert np.all(s.value == np.array([3., 6., 9.]))
assert s.unit is u.dimensionless_unscaled
np.arctan2(np.array([1., 2., 3.]), np.array([1., 2., 3.]) * 2., out=s)
assert_allclose(s.value, np.arctan2(1., 2.))
assert s.unit is u.radian
@pytest.mark.parametrize(('value'), [1., np.arange(10.)])
def test_two_argument_two_output_ufunc_inplace(self, value):
v = value * u.m
divisor = 70.*u.cm
v1 = v.copy()
tmp = v.copy()
check = np.divmod(v1, divisor, out=(tmp, v1))
assert check[0] is tmp and check[1] is v1
assert tmp.unit == u.dimensionless_unscaled
assert v1.unit == v.unit
v2 = v.copy()
check2 = np.divmod(v2, divisor, out=(v2, tmp))
assert check2[0] is v2 and check2[1] is tmp
assert v2.unit == u.dimensionless_unscaled
assert tmp.unit == v.unit
v3a = v.copy()
v3b = v.copy()
check3 = np.divmod(v3a, divisor, out=(v3a, v3b))
assert check3[0] is v3a and check3[1] is v3b
assert v3a.unit == u.dimensionless_unscaled
assert v3b.unit == v.unit
def test_ufunc_inplace_non_contiguous_data(self):
# ensure inplace works also for non-contiguous data (closes #1834)
s = np.arange(10.) * u.m
s_copy = s.copy()
s2 = s[::2]
s2 += 1. * u.cm
assert np.all(s[::2] > s_copy[::2])
assert np.all(s[1::2] == s_copy[1::2])
def test_ufunc_inplace_non_standard_dtype(self):
"""Check that inplace operations check properly for casting.
First two tests that check that float32 is kept close #3976.
"""
a1 = u.Quantity([1, 2, 3, 4], u.m, dtype=np.float32)
a1 *= np.float32(10)
assert a1.unit is u.m
assert a1.dtype == np.float32
a2 = u.Quantity([1, 2, 3, 4], u.m, dtype=np.float32)
a2 += (20.*u.km)
assert a2.unit is u.m
assert a2.dtype == np.float32
# For integer, in-place only works if no conversion is done.
a3 = u.Quantity([1, 2, 3, 4], u.m, dtype=np.int32)
a3 += u.Quantity(10, u.m, dtype=np.int64)
assert a3.unit is u.m
assert a3.dtype == np.int32
a4 = u.Quantity([1, 2, 3, 4], u.m, dtype=np.int32)
with pytest.raises(TypeError):
a4 += u.Quantity(10, u.mm, dtype=np.int64)
@pytest.mark.parametrize('ufunc', (np.equal, np.greater))
def test_comparison_ufuncs_inplace(self, ufunc):
q_i1 = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s
q_i2 = np.array([10., -5., 1.e6]) * u.g / u.Ms
check = np.empty(q_i1.shape, bool)
ufunc(q_i1.value, q_i2.to_value(q_i1.unit), out=check)
result = np.empty(q_i1.shape, bool)
q_o = ufunc(q_i1, q_i2, out=result)
assert q_o is result
assert type(q_o) is np.ndarray
assert q_o.dtype == bool
assert np.all(q_o == check)
@pytest.mark.parametrize('ufunc', (np.isfinite, np.signbit))
def test_onearg_test_ufuncs_inplace(self, ufunc):
q = [1., np.inf, -np.inf, np.nan, -1., 0.] * u.m
check = np.empty(q.shape, bool)
ufunc(q.value, out=check)
result = np.empty(q.shape, bool)
out = ufunc(q, out=result)
assert out is result
assert type(out) is np.ndarray
assert out.dtype == bool
assert np.all(out == ufunc(q.value))
# Ignore RuntimeWarning raised on Windows and s390.
@pytest.mark.filterwarnings('ignore:.*invalid value encountered in sign')
def test_sign_inplace(self):
q = [1., np.inf, -np.inf, np.nan, -1., 0.] * u.m
check = np.empty(q.shape, q.dtype)
np.sign(q.value, out=check)
result = np.empty(q.shape, q.dtype)
out = np.sign(q, out=result)
assert out is result
assert type(out) is np.ndarray
assert out.dtype == q.dtype
assert np.all((out == np.sign(q.value)) |
(np.isnan(out) & np.isnan(q.value)))
@pytest.mark.skipif(not hasattr(np.core.umath, 'clip'),
reason='no clip ufunc available')
class TestClip:
"""Test the clip ufunc.
In numpy, this is hidden behind a function that does not backwards
compatibility checks. We explicitly test the ufunc here.
"""
def setup(self):
self.clip = np.core.umath.clip
def test_clip_simple(self):
q = np.arange(-1., 10.) * u.m
q_min = 125 * u.cm
q_max = 0.0055 * u.km
result = self.clip(q, q_min, q_max)
assert result.unit == q.unit
expected = self.clip(q.value, q_min.to_value(q.unit),
q_max.to_value(q.unit)) * q.unit
assert np.all(result == expected)
def test_clip_unitless_parts(self):
q = np.arange(-1., 10.) * u.m
qlim = 0.0055 * u.km
# one-sided
result1 = self.clip(q, -np.inf, qlim)
expected1 = self.clip(q.value, -np.inf, qlim.to_value(q.unit)) * q.unit
assert np.all(result1 == expected1)
result2 = self.clip(q, qlim, np.inf)
expected2 = self.clip(q.value, qlim.to_value(q.unit), np.inf) * q.unit
assert np.all(result2 == expected2)
# Zero
result3 = self.clip(q, np.zeros(q.shape), qlim)
expected3 = self.clip(q.value, 0, qlim.to_value(q.unit)) * q.unit
assert np.all(result3 == expected3)
# Two unitless parts, array-shaped.
result4 = self.clip(q, np.zeros(q.shape), np.full(q.shape, np.inf))
expected4 = self.clip(q.value, 0, np.inf) * q.unit
assert np.all(result4 == expected4)
def test_clip_dimensionless(self):
q = np.arange(-1., 10.) * u.dimensionless_unscaled
result = self.clip(q, 200 * u.percent, 5.)
expected = self.clip(q, 2., 5.)
assert result.unit == u.dimensionless_unscaled
assert np.all(result == expected)
def test_clip_ndarray(self):
a = np.arange(-1., 10.)
result = self.clip(a, 200 * u.percent, 5. * u.dimensionless_unscaled)
assert isinstance(result, u.Quantity)
expected = self.clip(a, 2., 5.) * u.dimensionless_unscaled
assert np.all(result == expected)
def test_clip_quantity_inplace(self):
q = np.arange(-1., 10.) * u.m
q_min = 125 * u.cm
q_max = 0.0055 * u.km
expected = self.clip(q.value, q_min.to_value(q.unit),
q_max.to_value(q.unit)) * q.unit
result = self.clip(q, q_min, q_max, out=q)
assert result is q
assert np.all(result == expected)
def test_clip_ndarray_dimensionless_output(self):
a = np.arange(-1., 10.)
q = np.zeros_like(a) * u.m
expected = self.clip(a, 2., 5.) * u.dimensionless_unscaled
result = self.clip(a, 200 * u.percent, 5. * u.dimensionless_unscaled,
out=q)
assert result is q
assert result.unit == u.dimensionless_unscaled
assert np.all(result == expected)
def test_clip_errors(self):
q = np.arange(-1., 10.) * u.m
with pytest.raises(u.UnitsError):
self.clip(q, 0, 1*u.s)
with pytest.raises(u.UnitsError):
self.clip(q.value, 0, 1*u.s)
with pytest.raises(u.UnitsError):
self.clip(q, -1, 0.)
with pytest.raises(u.UnitsError):
self.clip(q, 0., 1.)
class TestUfuncAt:
"""Test that 'at' method for ufuncs (calculates in-place at given indices)
For Quantities, since calculations are in-place, it makes sense only
if the result is still a quantity, and if the unit does not have to change
"""
def test_one_argument_ufunc_at(self):
q = np.arange(10.) * u.m
i = np.array([1, 2])
qv = q.value.copy()
np.negative.at(q, i)
np.negative.at(qv, i)
assert np.all(q.value == qv)
assert q.unit is u.m
# cannot change from quantity to bool array
with pytest.raises(TypeError):
np.isfinite.at(q, i)
# for selective in-place, cannot change the unit
with pytest.raises(u.UnitsError):
np.square.at(q, i)
# except if the unit does not change (i.e., dimensionless)
d = np.arange(10.) * u.dimensionless_unscaled
dv = d.value.copy()
np.square.at(d, i)
np.square.at(dv, i)
assert np.all(d.value == dv)
assert d.unit is u.dimensionless_unscaled
d = np.arange(10.) * u.dimensionless_unscaled
dv = d.value.copy()
np.log.at(d, i)
np.log.at(dv, i)
assert np.all(d.value == dv)
assert d.unit is u.dimensionless_unscaled
# also for sine it doesn't work, even if given an angle
a = np.arange(10.) * u.radian
with pytest.raises(u.UnitsError):
np.sin.at(a, i)
# except, for consistency, if we have made radian equivalent to
# dimensionless (though hopefully it will never be needed)
av = a.value.copy()
with u.add_enabled_equivalencies(u.dimensionless_angles()):
np.sin.at(a, i)
np.sin.at(av, i)
assert_allclose(a.value, av)
# but we won't do double conversion
ad = np.arange(10.) * u.degree
with pytest.raises(u.UnitsError):
np.sin.at(ad, i)
def test_two_argument_ufunc_at(self):
s = np.arange(10.) * u.m
i = np.array([1, 2])
check = s.value.copy()
np.add.at(s, i, 1.*u.km)
np.add.at(check, i, 1000.)
assert np.all(s.value == check)
assert s.unit is u.m
with pytest.raises(u.UnitsError):
np.add.at(s, i, 1.*u.s)
# also raise UnitsError if unit would have to be changed
with pytest.raises(u.UnitsError):
np.multiply.at(s, i, 1*u.s)
# but be fine if it does not
s = np.arange(10.) * u.m
check = s.value.copy()
np.multiply.at(s, i, 2.*u.dimensionless_unscaled)
np.multiply.at(check, i, 2)
assert np.all(s.value == check)
s = np.arange(10.) * u.m
np.multiply.at(s, i, 2.)
assert np.all(s.value == check)
# of course cannot change class of data either
with pytest.raises(TypeError):
np.greater.at(s, i, 1.*u.km)
class TestUfuncReduceReduceatAccumulate:
"""Test 'reduce', 'reduceat' and 'accumulate' methods for ufuncs
For Quantities, it makes sense only if the unit does not have to change
"""
def test_one_argument_ufunc_reduce_accumulate(self):
# one argument cannot be used
s = np.arange(10.) * u.radian
i = np.array([0, 5, 1, 6])
with pytest.raises(ValueError):
np.sin.reduce(s)
with pytest.raises(ValueError):
np.sin.accumulate(s)
with pytest.raises(ValueError):
np.sin.reduceat(s, i)
def test_two_argument_ufunc_reduce_accumulate(self):
s = np.arange(10.) * u.m
i = np.array([0, 5, 1, 6])
check = s.value.copy()
s_add_reduce = np.add.reduce(s)
check_add_reduce = np.add.reduce(check)
assert s_add_reduce.value == check_add_reduce
assert s_add_reduce.unit is u.m
s_add_accumulate = np.add.accumulate(s)
check_add_accumulate = np.add.accumulate(check)
assert np.all(s_add_accumulate.value == check_add_accumulate)
assert s_add_accumulate.unit is u.m
s_add_reduceat = np.add.reduceat(s, i)
check_add_reduceat = np.add.reduceat(check, i)
assert np.all(s_add_reduceat.value == check_add_reduceat)
assert s_add_reduceat.unit is u.m
# reduce(at) or accumulate on comparisons makes no sense,
# as intermediate result is not even a Quantity
with pytest.raises(TypeError):
np.greater.reduce(s)
with pytest.raises(TypeError):
np.greater.accumulate(s)
with pytest.raises(TypeError):
np.greater.reduceat(s, i)
# raise UnitsError if unit would have to be changed
with pytest.raises(u.UnitsError):
np.multiply.reduce(s)
with pytest.raises(u.UnitsError):
np.multiply.accumulate(s)
with pytest.raises(u.UnitsError):
np.multiply.reduceat(s, i)
# but be fine if it does not
s = np.arange(10.) * u.dimensionless_unscaled
check = s.value.copy()
s_multiply_reduce = np.multiply.reduce(s)
check_multiply_reduce = np.multiply.reduce(check)
assert s_multiply_reduce.value == check_multiply_reduce
assert s_multiply_reduce.unit is u.dimensionless_unscaled
s_multiply_accumulate = np.multiply.accumulate(s)
check_multiply_accumulate = np.multiply.accumulate(check)
assert np.all(s_multiply_accumulate.value == check_multiply_accumulate)
assert s_multiply_accumulate.unit is u.dimensionless_unscaled
s_multiply_reduceat = np.multiply.reduceat(s, i)
check_multiply_reduceat = np.multiply.reduceat(check, i)
assert np.all(s_multiply_reduceat.value == check_multiply_reduceat)
assert s_multiply_reduceat.unit is u.dimensionless_unscaled
class TestUfuncOuter:
"""Test 'outer' methods for ufuncs
Just a few spot checks, since it uses the same code as the regular
ufunc call
"""
def test_one_argument_ufunc_outer(self):
# one argument cannot be used
s = np.arange(10.) * u.radian
with pytest.raises(ValueError):
np.sin.outer(s)
def test_two_argument_ufunc_outer(self):
s1 = np.arange(10.) * u.m
s2 = np.arange(2.) * u.s
check1 = s1.value
check2 = s2.value
s12_multiply_outer = np.multiply.outer(s1, s2)
check12_multiply_outer = np.multiply.outer(check1, check2)
assert np.all(s12_multiply_outer.value == check12_multiply_outer)
assert s12_multiply_outer.unit == s1.unit * s2.unit
# raise UnitsError if appropriate
with pytest.raises(u.UnitsError):
np.add.outer(s1, s2)
# but be fine if it does not
s3 = np.arange(2.) * s1.unit
check3 = s3.value
s13_add_outer = np.add.outer(s1, s3)
check13_add_outer = np.add.outer(check1, check3)
assert np.all(s13_add_outer.value == check13_add_outer)
assert s13_add_outer.unit is s1.unit
s13_greater_outer = np.greater.outer(s1, s3)
check13_greater_outer = np.greater.outer(check1, check3)
assert type(s13_greater_outer) is np.ndarray
assert np.all(s13_greater_outer == check13_greater_outer)
if HAS_SCIPY:
from scipy import special as sps
erf_like_ufuncs = (
sps.erf, sps.erfc, sps.erfcx, sps.erfi,
sps.gamma, sps.gammaln, sps.loggamma, sps.gammasgn, sps.psi,
sps.rgamma, sps.digamma, sps.wofz, sps.dawsn,
sps.entr, sps.exprel, sps.expm1, sps.log1p, sps.exp2, sps.exp10)
if isinstance(sps.erfinv, np.ufunc):
erf_like_ufuncs += (sps.erfinv, sps.erfcinv)
def test_scipy_registration():
"""Check that scipy gets loaded upon first use."""
assert sps.erf not in qh.UFUNC_HELPERS
sps.erf(1. * u.percent)
assert sps.erf in qh.UFUNC_HELPERS
if isinstance(sps.erfinv, np.ufunc):
assert sps.erfinv in qh.UFUNC_HELPERS
else:
assert sps.erfinv not in qh.UFUNC_HELPERS
class TestScipySpecialUfuncs:
@pytest.mark.parametrize('function', erf_like_ufuncs)
def test_erf_scalar(self, function):
TestQuantityMathFuncs.test_exp_scalar(None, function)
@pytest.mark.parametrize('function', erf_like_ufuncs)
def test_erf_array(self, function):
TestQuantityMathFuncs.test_exp_array(None, function)
@pytest.mark.parametrize('function', erf_like_ufuncs)
def test_erf_invalid_units(self, function):
TestQuantityMathFuncs.test_exp_invalid_units(None, function)
@pytest.mark.parametrize('function', (sps.cbrt, ))
def test_cbrt_scalar(self, function):
TestQuantityMathFuncs.test_cbrt_scalar(None, function)
@pytest.mark.parametrize('function', (sps.cbrt, ))
def test_cbrt_array(self, function):
TestQuantityMathFuncs.test_cbrt_array(None, function)
@pytest.mark.parametrize('function', (sps.radian, ))
def test_radian(self, function):
q1 = function(180. * u.degree, 0. * u.arcmin, 0. * u.arcsec)
assert_allclose(q1.value, np.pi)
assert q1.unit == u.radian
q2 = function(0. * u.degree, 30. * u.arcmin, 0. * u.arcsec)
assert_allclose(q2.value, (30. * u.arcmin).to(u.radian).value)
assert q2.unit == u.radian
q3 = function(0. * u.degree, 0. * u.arcmin, 30. * u.arcsec)
assert_allclose(q3.value, (30. * u.arcsec).to(u.radian).value)
# the following doesn't make much sense in terms of the name of the
# routine, but we check it gives the correct result.
q4 = function(3. * u.radian, 0. * u.arcmin, 0. * u.arcsec)
assert_allclose(q4.value, 3.)
assert q4.unit == u.radian
with pytest.raises(TypeError):
function(3. * u.m, 2. * u.s, 1. * u.kg)
jv_like_ufuncs = (
sps.jv, sps.jn, sps.jve, sps.yn, sps.yv, sps.yve, sps.kn, sps.kv,
sps.kve, sps.iv, sps.ive, sps.hankel1, sps.hankel1e, sps.hankel2,
sps.hankel2e)
@pytest.mark.parametrize('function', jv_like_ufuncs)
def test_jv_scalar(self, function):
q = function(2. * u.m / (2. * u.m), 3. * u.m / (6. * u.m))
assert q.unit == u.dimensionless_unscaled
assert q.value == function(1.0, 0.5)
@pytest.mark.parametrize('function', jv_like_ufuncs)
def test_jv_array(self, function):
q = function(np.ones(3) * u.m / (1. * u.m),
np.array([2., 3., 6.]) * u.m / (6. * u.m))
assert q.unit == u.dimensionless_unscaled
assert np.all(q.value == function(
np.ones(3),
np.array([1. / 3., 1. / 2., 1.]))
)
# should also work on quantities that can be made dimensionless
q2 = function(np.ones(3) * u.m / (1. * u.m),
np.array([2., 3., 6.]) * u.m / (6. * u.cm))
assert q2.unit == u.dimensionless_unscaled
assert_allclose(q2.value,
function(np.ones(3),
np.array([100. / 3., 100. / 2., 100.])))
@pytest.mark.parametrize('function', jv_like_ufuncs)
def test_jv_invalid_units(self, function):
# Can't use jv() with non-dimensionless quantities
with pytest.raises(TypeError) as exc:
function(1. * u.kg, 3. * u.m / u.s)
assert exc.value.args[0] == ("Can only apply '{}' function to "
"dimensionless quantities"
.format(function.__name__))
|
e8b405d6612670fa30d1c63208c786309865eb84234dd19bbf77783641b992ff | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# STDLIB
import sys
import typing
# THIRD PARTY
import numpy as np
import pytest
# LOCAL
from astropy import units as u
from astropy.units._typing import HAS_ANNOTATED
# list of pairs (target unit/physical type, input unit)
x_inputs = [(u.arcsec, u.deg), ('angle', u.deg),
(u.kpc/u.Myr, u.km/u.s), ('speed', u.km/u.s),
([u.arcsec, u.km], u.deg), ([u.arcsec, u.km], u.km), # multiple allowed
(['angle', 'length'], u.deg), (['angle', 'length'], u.km)]
y_inputs = [(u.m, u.km), (u.km, u.m),
(u.arcsec, u.deg), ('angle', u.deg),
(u.kpc/u.Myr, u.km/u.s), ('speed', u.km/u.s)]
@pytest.fixture(scope="module",
params=list(range(len(x_inputs))))
def x_input(request):
return x_inputs[request.param]
@pytest.fixture(scope="module",
params=list(range(len(y_inputs))))
def y_input(request):
return y_inputs[request.param]
# ---- Tests that use the fixtures defined above ----
def test_args(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input(x=x_target, y=y_target)
def myfunc_args(x, y):
return x, y
x, y = myfunc_args(1*x_unit, 1*y_unit)
assert isinstance(x, u.Quantity)
assert isinstance(y, u.Quantity)
assert x.unit == x_unit
assert y.unit == y_unit
def test_args_nonquantity(x_input):
x_target, x_unit = x_input
@u.quantity_input(x=x_target)
def myfunc_args(x, y):
return x, y
x, y = myfunc_args(1*x_unit, 100)
assert isinstance(x, u.Quantity)
assert isinstance(y, int)
assert x.unit == x_unit
def test_wrong_unit(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input(x=x_target, y=y_target)
def myfunc_args(x, y):
return x, y
with pytest.raises(u.UnitsError) as e:
x, y = myfunc_args(1*x_unit, 100*u.Joule) # has to be an unspecified unit
str_to = str(y_target)
assert str(e.value) == f"Argument 'y' to function 'myfunc_args' must be in units convertible to '{str_to}'."
def test_wrong_unit_annotated(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input
def myfunc_args(x: x_target, y: y_target):
return x, y
with pytest.raises(u.UnitsError, match="Argument 'y' to function 'myfunc_args'"):
x, y = myfunc_args(1*x_unit, 100*u.Joule) # has to be an unspecified unit
def test_not_quantity(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input(x=x_target, y=y_target)
def myfunc_args(x, y):
return x, y
with pytest.raises(TypeError) as e:
x, y = myfunc_args(1*x_unit, 100)
assert str(e.value) == "Argument 'y' to function 'myfunc_args' has no 'unit' attribute. You should pass in an astropy Quantity instead."
def test_not_quantity_annotated(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input
def myfunc_args(x: x_target, y: y_target):
return x, y
with pytest.raises(TypeError) as e:
x, y = myfunc_args(1*x_unit, 100)
assert str(e.value) == "Argument 'y' to function 'myfunc_args' has no 'unit' attribute. You should pass in an astropy Quantity instead."
def test_kwargs(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input(x=x_target, y=y_target)
def myfunc_args(x, my_arg, y=1*y_unit):
return x, my_arg, y
x, my_arg, y = myfunc_args(1*x_unit, 100, y=100*y_unit)
assert isinstance(x, u.Quantity)
assert isinstance(my_arg, int)
assert isinstance(y, u.Quantity)
assert y.unit == y_unit
def test_unused_kwargs(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input(x=x_target, y=y_target)
def myfunc_args(x, my_arg1, y=y_unit, my_arg2=1000):
return x, my_arg1, y, my_arg2
x, my_arg1, y, my_arg2 = myfunc_args(1*x_unit, 100,
y=100*y_unit, my_arg2=10)
assert isinstance(x, u.Quantity)
assert isinstance(my_arg1, int)
assert isinstance(y, u.Quantity)
assert isinstance(my_arg2, int)
assert y.unit == y_unit
assert my_arg2 == 10
def test_kwarg_wrong_unit(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input(x=x_target, y=y_target)
def myfunc_args(x, y=10*y_unit):
return x, y
with pytest.raises(u.UnitsError) as e:
x, y = myfunc_args(1*x_unit, y=100*u.Joule)
str_to = str(y_target)
assert str(e.value) == f"Argument 'y' to function 'myfunc_args' must be in units convertible to '{str_to}'."
def test_kwarg_not_quantity(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input(x=x_target, y=y_target)
def myfunc_args(x, y=10*y_unit):
return x, y
with pytest.raises(TypeError) as e:
x, y = myfunc_args(1*x_unit, y=100)
assert str(e.value) == "Argument 'y' to function 'myfunc_args' has no 'unit' attribute. You should pass in an astropy Quantity instead."
def test_kwarg_default(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input(x=x_target, y=y_target)
def myfunc_args(x, y=10*y_unit):
return x, y
x, y = myfunc_args(1*x_unit)
assert isinstance(x, u.Quantity)
assert isinstance(y, u.Quantity)
assert x.unit == x_unit
assert y.unit == y_unit
def test_kwargs_input(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input(x=x_target, y=y_target)
def myfunc_args(x=1*x_unit, y=1*y_unit):
return x, y
kwargs = {'x': 10*x_unit, 'y': 10*y_unit}
x, y = myfunc_args(**kwargs)
assert isinstance(x, u.Quantity)
assert isinstance(y, u.Quantity)
assert x.unit == x_unit
assert y.unit == y_unit
def test_kwargs_extra(x_input):
x_target, x_unit = x_input
@u.quantity_input(x=x_target)
def myfunc_args(x, **kwargs):
return x
x = myfunc_args(1*x_unit)
assert isinstance(x, u.Quantity)
assert x.unit == x_unit
# ---- Tests that don't used the fixtures ----
@pytest.mark.parametrize("x_unit,y_unit", [
(u.arcsec, u.eV),
('angle', 'energy')])
def test_arg_equivalencies(x_unit, y_unit):
@u.quantity_input(x=x_unit, y=y_unit,
equivalencies=u.mass_energy())
def myfunc_args(x, y):
return x, y+(10*u.J) # Add an energy to check equiv is working
x, y = myfunc_args(1*u.arcsec, 100*u.gram)
assert isinstance(x, u.Quantity)
assert isinstance(y, u.Quantity)
assert x.unit == u.arcsec
assert y.unit == u.gram
@pytest.mark.parametrize("x_unit,energy_unit", [
(u.arcsec, u.eV),
('angle', 'energy')])
def test_kwarg_equivalencies(x_unit, energy_unit):
@u.quantity_input(x=x_unit, energy=energy_unit, equivalencies=u.mass_energy())
def myfunc_args(x, energy=10*u.eV):
return x, energy+(10*u.J) # Add an energy to check equiv is working
x, energy = myfunc_args(1*u.arcsec, 100*u.gram)
assert isinstance(x, u.Quantity)
assert isinstance(energy, u.Quantity)
assert x.unit == u.arcsec
assert energy.unit == u.gram
def test_no_equivalent():
class test_unit:
pass
class test_quantity:
unit = test_unit()
@u.quantity_input(x=u.arcsec)
def myfunc_args(x):
return x
with pytest.raises(TypeError) as e:
x, y = myfunc_args(test_quantity())
assert str(e.value) == "Argument 'x' to function 'myfunc_args' has a 'unit' attribute without an 'is_equivalent' method. You should pass in an astropy Quantity instead."
def test_kwarg_invalid_physical_type():
@u.quantity_input(x='angle', y='africanswallow')
def myfunc_args(x, y=10*u.deg):
return x, y
with pytest.raises(ValueError) as e:
x, y = myfunc_args(1*u.arcsec, y=100*u.deg)
assert str(e.value) == "Invalid unit or physical type 'africanswallow'."
def test_default_value_check():
x_target = u.deg
x_unit = u.arcsec
with pytest.raises(TypeError):
@u.quantity_input(x=x_target)
def myfunc_args(x=1.):
return x
x = myfunc_args()
x = myfunc_args(1*x_unit)
assert isinstance(x, u.Quantity)
assert x.unit == x_unit
def test_str_unit_typo():
@u.quantity_input
def myfunc_args(x: "kilograam"):
return x
with pytest.raises(ValueError):
result = myfunc_args(u.kg)
@pytest.mark.skipif(not HAS_ANNOTATED, reason="need `Annotated`")
class TestTypeAnnotations:
@pytest.mark.parametrize("annot",
[u.m, u.Quantity[u.m], u.Quantity[u.m, "more"]]
if HAS_ANNOTATED else [None]) # Note: parametrization is done even if test class is skipped
def test_single_annotation_unit(self, annot):
"""Try a variety of valid annotations."""
@u.quantity_input
def myfunc_args(x: annot, y: str):
return x, y
i_q, i_str = 2 * u.m, "cool string"
o_q, o_str = myfunc_args(i_q, i_str)
assert o_q == i_q
assert o_str == i_str
def test_args_None():
x_target = u.deg
x_unit = u.arcsec
y_target = u.km
y_unit = u.kpc
@u.quantity_input(x=[x_target, None], y=[None, y_target])
def myfunc_args(x, y):
return x, y
x, y = myfunc_args(1*x_unit, None)
assert isinstance(x, u.Quantity)
assert x.unit == x_unit
assert y is None
x, y = myfunc_args(None, 1*y_unit)
assert isinstance(y, u.Quantity)
assert y.unit == y_unit
assert x is None
def test_args_None_kwarg():
x_target = u.deg
x_unit = u.arcsec
y_target = u.km
@u.quantity_input(x=x_target, y=y_target)
def myfunc_args(x, y=None):
return x, y
x, y = myfunc_args(1*x_unit)
assert isinstance(x, u.Quantity)
assert x.unit == x_unit
assert y is None
x, y = myfunc_args(1*x_unit, None)
assert isinstance(x, u.Quantity)
assert x.unit == x_unit
assert y is None
with pytest.raises(TypeError):
x, y = myfunc_args(None, None)
@pytest.mark.parametrize('val', [1., 1, np.arange(10), np.arange(10.)])
def test_allow_dimensionless_numeric(val):
"""
When dimensionless_unscaled is an allowed unit, numbers and numeric numpy
arrays are allowed through
"""
@u.quantity_input(velocity=[u.km/u.s, u.dimensionless_unscaled])
def myfunc(velocity):
return velocity
assert np.all(myfunc(val) == val)
@pytest.mark.parametrize('val', [1., 1, np.arange(10), np.arange(10.)])
def test_allow_dimensionless_numeric_strict(val):
"""
When dimensionless_unscaled is an allowed unit, but we are being strict,
don't allow numbers and numeric numpy arrays through
"""
@u.quantity_input(velocity=[u.km/u.s, u.dimensionless_unscaled],
strict_dimensionless=True)
def myfunc(velocity):
return velocity
with pytest.raises(TypeError):
assert myfunc(val)
@pytest.mark.parametrize('val', [1*u.deg, [1, 2, 3]*u.m])
def test_dimensionless_with_nondimensionless_input(val):
"""
When dimensionless_unscaled is the only allowed unit, don't let input with
non-dimensionless units through
"""
@u.quantity_input(x=u.dimensionless_unscaled)
def myfunc(x):
return x
with pytest.raises(u.UnitsError):
myfunc(val)
@pytest.mark.skipif(sys.version_info < (3, 9), reason="requires py3.9+")
def test_annotated_not_quantity():
"""Test when annotation looks like a Quantity[X], but isn't."""
@u.quantity_input()
def myfunc(x: typing.Annotated[object, u.m]):
return x
# nothing happens when wrong unit is passed
assert myfunc(1) == 1
assert myfunc(1 * u.m) == 1 * u.m
assert myfunc(1 * u.s) == 1 * u.s
@pytest.mark.skipif(sys.version_info < (3, 9), reason="requires py3.9+")
def test_annotated_not_unit():
"""Test when annotation looks like a Quantity[X], but the unit's wrong."""
@u.quantity_input()
def myfunc(x: typing.Annotated[u.Quantity, object()]):
return x
# nothing happens when wrong unit is passed
assert myfunc(1) == 1
assert myfunc(1 * u.m) == 1 * u.m
assert myfunc(1 * u.s) == 1 * u.s
|
3009dad4836dfea7f76839fd15927ed99f44ce2c13d4dc8525fcbf091d6d3e21 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numbers
import numpy as np
from astropy.units import (
CompositeUnit, Unit, UnitConversionError, UnitsError, UnitTypeError, dimensionless_unscaled,
photometric)
from .core import FunctionQuantity, FunctionUnitBase
from .units import dB, dex, mag
__all__ = ['LogUnit', 'MagUnit', 'DexUnit', 'DecibelUnit',
'LogQuantity', 'Magnitude', 'Decibel', 'Dex',
'STmag', 'ABmag', 'M_bol', 'm_bol']
class LogUnit(FunctionUnitBase):
"""Logarithmic unit containing a physical one
Usually, logarithmic units are instantiated via specific subclasses
such `MagUnit`, `DecibelUnit`, and `DexUnit`.
Parameters
----------
physical_unit : `~astropy.units.Unit` or `string`
Unit that is encapsulated within the logarithmic function unit.
If not given, dimensionless.
function_unit : `~astropy.units.Unit` or `string`
By default, the same as the logarithmic unit set by the subclass.
"""
# the four essential overrides of FunctionUnitBase
@property
def _default_function_unit(self):
return dex
@property
def _quantity_class(self):
return LogQuantity
def from_physical(self, x):
"""Transformation from value in physical to value in logarithmic units.
Used in equivalency."""
return dex.to(self._function_unit, np.log10(x))
def to_physical(self, x):
"""Transformation from value in logarithmic to value in physical units.
Used in equivalency."""
return 10 ** self._function_unit.to(dex, x)
# ^^^^ the four essential overrides of FunctionUnitBase
# add addition and subtraction, which imply multiplication/division of
# the underlying physical units
def _add_and_adjust_physical_unit(self, other, sign_self, sign_other):
"""Add/subtract LogUnit to/from another unit, and adjust physical unit.
self and other are multiplied by sign_self and sign_other, resp.
We wish to do: ±lu_1 + ±lu_2 -> lu_f (lu=logarithmic unit)
and pu_1^(±1) * pu_2^(±1) -> pu_f (pu=physical unit)
Raises
------
UnitsError
If function units are not equivalent.
"""
# First, insist on compatible logarithmic type. Here, plain u.mag,
# u.dex, and u.dB are OK, i.e., other does not have to be LogUnit
# (this will indirectly test whether other is a unit at all).
try:
getattr(other, 'function_unit', other)._to(self._function_unit)
except AttributeError:
# if other is not a unit (i.e., does not have _to).
return NotImplemented
except UnitsError:
raise UnitsError("Can only add/subtract logarithmic units of"
"of compatible type.")
other_physical_unit = getattr(other, 'physical_unit',
dimensionless_unscaled)
physical_unit = CompositeUnit(
1, [self._physical_unit, other_physical_unit],
[sign_self, sign_other])
return self._copy(physical_unit)
def __neg__(self):
return self._copy(self.physical_unit**(-1))
def __add__(self, other):
# Only know how to add to a logarithmic unit with compatible type,
# be it a plain one (u.mag, etc.,) or another LogUnit
return self._add_and_adjust_physical_unit(other, +1, +1)
def __radd__(self, other):
return self._add_and_adjust_physical_unit(other, +1, +1)
def __sub__(self, other):
return self._add_and_adjust_physical_unit(other, +1, -1)
def __rsub__(self, other):
# here, in normal usage other cannot be LogUnit; only equivalent one
# would be u.mag,u.dB,u.dex. But might as well use common routine.
return self._add_and_adjust_physical_unit(other, -1, +1)
class MagUnit(LogUnit):
"""Logarithmic physical units expressed in magnitudes
Parameters
----------
physical_unit : `~astropy.units.Unit` or `string`
Unit that is encapsulated within the magnitude function unit.
If not given, dimensionless.
function_unit : `~astropy.units.Unit` or `string`
By default, this is ``mag``, but this allows one to use an equivalent
unit such as ``2 mag``.
"""
@property
def _default_function_unit(self):
return mag
@property
def _quantity_class(self):
return Magnitude
class DexUnit(LogUnit):
"""Logarithmic physical units expressed in magnitudes
Parameters
----------
physical_unit : `~astropy.units.Unit` or `string`
Unit that is encapsulated within the magnitude function unit.
If not given, dimensionless.
function_unit : `~astropy.units.Unit` or `string`
By default, this is ``dex``, but this allows one to use an equivalent
unit such as ``0.5 dex``.
"""
@property
def _default_function_unit(self):
return dex
@property
def _quantity_class(self):
return Dex
def to_string(self, format='generic'):
if format == 'cds':
if self.physical_unit == dimensionless_unscaled:
return "[-]" # by default, would get "[---]".
else:
return f"[{self.physical_unit.to_string(format=format)}]"
else:
return super().to_string()
class DecibelUnit(LogUnit):
"""Logarithmic physical units expressed in dB
Parameters
----------
physical_unit : `~astropy.units.Unit` or `string`
Unit that is encapsulated within the decibel function unit.
If not given, dimensionless.
function_unit : `~astropy.units.Unit` or `string`
By default, this is ``dB``, but this allows one to use an equivalent
unit such as ``2 dB``.
"""
@property
def _default_function_unit(self):
return dB
@property
def _quantity_class(self):
return Decibel
class LogQuantity(FunctionQuantity):
"""A representation of a (scaled) logarithm of a number with a unit
Parameters
----------
value : number, `~astropy.units.Quantity`, `~astropy.units.function.logarithmic.LogQuantity`, or sequence of quantity-like.
The numerical value of the logarithmic quantity. If a number or
a `~astropy.units.Quantity` with a logarithmic unit, it will be
converted to ``unit`` and the physical unit will be inferred from
``unit``. If a `~astropy.units.Quantity` with just a physical unit,
it will converted to the logarithmic unit, after, if necessary,
converting it to the physical unit inferred from ``unit``.
unit : str, `~astropy.units.UnitBase`, or `~astropy.units.function.FunctionUnitBase`, optional
For an `~astropy.units.function.FunctionUnitBase` instance, the
physical unit will be taken from it; for other input, it will be
inferred from ``value``. By default, ``unit`` is set by the subclass.
dtype : `~numpy.dtype`, optional
The ``dtype`` of the resulting Numpy array or scalar that will
hold the value. If not provided, is is determined automatically
from the input value.
copy : bool, optional
If `True` (default), then the value is copied. Otherwise, a copy will
only be made if ``__array__`` returns a copy, if value is a nested
sequence, or if a copy is needed to satisfy an explicitly given
``dtype``. (The `False` option is intended mostly for internal use,
to speed up initialization where a copy is known to have been made.
Use with care.)
Examples
--------
Typically, use is made of an `~astropy.units.function.FunctionQuantity`
subclass, as in::
>>> import astropy.units as u
>>> u.Magnitude(-2.5)
<Magnitude -2.5 mag>
>>> u.Magnitude(10.*u.count/u.second)
<Magnitude -2.5 mag(ct / s)>
>>> u.Decibel(1.*u.W, u.DecibelUnit(u.mW)) # doctest: +FLOAT_CMP
<Decibel 30. dB(mW)>
"""
# only override of FunctionQuantity
_unit_class = LogUnit
# additions that work just for logarithmic units
def __add__(self, other):
# Add function units, thus multiplying physical units. If no unit is
# given, assume dimensionless_unscaled; this will give the appropriate
# exception in LogUnit.__add__.
new_unit = self.unit + getattr(other, 'unit', dimensionless_unscaled)
# Add actual logarithmic values, rescaling, e.g., dB -> dex.
result = self._function_view + getattr(other, '_function_view', other)
return self._new_view(result, new_unit)
def __radd__(self, other):
return self.__add__(other)
def __iadd__(self, other):
new_unit = self.unit + getattr(other, 'unit', dimensionless_unscaled)
# Do calculation in-place using _function_view of array.
function_view = self._function_view
function_view += getattr(other, '_function_view', other)
self._set_unit(new_unit)
return self
def __sub__(self, other):
# Subtract function units, thus dividing physical units.
new_unit = self.unit - getattr(other, 'unit', dimensionless_unscaled)
# Subtract actual logarithmic values, rescaling, e.g., dB -> dex.
result = self._function_view - getattr(other, '_function_view', other)
return self._new_view(result, new_unit)
def __rsub__(self, other):
new_unit = self.unit.__rsub__(
getattr(other, 'unit', dimensionless_unscaled))
result = self._function_view.__rsub__(
getattr(other, '_function_view', other))
# Ensure the result is in right function unit scale
# (with rsub, this does not have to be one's own).
result = result.to(new_unit.function_unit)
return self._new_view(result, new_unit)
def __isub__(self, other):
new_unit = self.unit - getattr(other, 'unit', dimensionless_unscaled)
# Do calculation in-place using _function_view of array.
function_view = self._function_view
function_view -= getattr(other, '_function_view', other)
self._set_unit(new_unit)
return self
def __mul__(self, other):
# Multiply by a float or a dimensionless quantity
if isinstance(other, numbers.Number):
# Multiplying a log means putting the factor into the exponent
# of the unit
new_physical_unit = self.unit.physical_unit**other
result = self.view(np.ndarray) * other
return self._new_view(result, self.unit._copy(new_physical_unit))
else:
return super().__mul__(other)
def __rmul__(self, other):
return self.__mul__(other)
def __imul__(self, other):
if isinstance(other, numbers.Number):
new_physical_unit = self.unit.physical_unit**other
function_view = self._function_view
function_view *= other
self._set_unit(self.unit._copy(new_physical_unit))
return self
else:
return super().__imul__(other)
def __truediv__(self, other):
# Divide by a float or a dimensionless quantity
if isinstance(other, numbers.Number):
# Dividing a log means putting the nominator into the exponent
# of the unit
new_physical_unit = self.unit.physical_unit**(1/other)
result = self.view(np.ndarray) / other
return self._new_view(result, self.unit._copy(new_physical_unit))
else:
return super().__truediv__(other)
def __itruediv__(self, other):
if isinstance(other, numbers.Number):
new_physical_unit = self.unit.physical_unit**(1/other)
function_view = self._function_view
function_view /= other
self._set_unit(self.unit._copy(new_physical_unit))
return self
else:
return super().__itruediv__(other)
def __pow__(self, other):
# We check if this power is OK by applying it first to the unit.
try:
other = float(other)
except TypeError:
return NotImplemented
new_unit = self.unit ** other
new_value = self.view(np.ndarray) ** other
return self._new_view(new_value, new_unit)
def __ilshift__(self, other):
try:
other = Unit(other)
except UnitTypeError:
return NotImplemented
if not isinstance(other, self._unit_class):
return NotImplemented
try:
factor = self.unit.physical_unit._to(other.physical_unit)
except UnitConversionError:
# Maybe via equivalencies? Now we do make a temporary copy.
try:
value = self._to_value(other)
except UnitConversionError:
return NotImplemented
self.view(np.ndarray)[...] = value
else:
self.view(np.ndarray)[...] += self.unit.from_physical(factor)
self._set_unit(other)
return self
# Methods that do not work for function units generally but are OK for
# logarithmic units as they imply differences and independence of
# physical unit.
def var(self, axis=None, dtype=None, out=None, ddof=0):
return self._wrap_function(np.var, axis, dtype, out=out, ddof=ddof,
unit=self.unit.function_unit**2)
def std(self, axis=None, dtype=None, out=None, ddof=0):
return self._wrap_function(np.std, axis, dtype, out=out, ddof=ddof,
unit=self.unit._copy(dimensionless_unscaled))
def ptp(self, axis=None, out=None):
return self._wrap_function(np.ptp, axis, out=out,
unit=self.unit._copy(dimensionless_unscaled))
def diff(self, n=1, axis=-1):
return self._wrap_function(np.diff, n, axis,
unit=self.unit._copy(dimensionless_unscaled))
def ediff1d(self, to_end=None, to_begin=None):
return self._wrap_function(np.ediff1d, to_end, to_begin,
unit=self.unit._copy(dimensionless_unscaled))
_supported_functions = (FunctionQuantity._supported_functions |
{getattr(np, function) for function in
('var', 'std', 'ptp', 'diff', 'ediff1d')})
class Dex(LogQuantity):
_unit_class = DexUnit
class Decibel(LogQuantity):
_unit_class = DecibelUnit
class Magnitude(LogQuantity):
_unit_class = MagUnit
dex._function_unit_class = DexUnit
dB._function_unit_class = DecibelUnit
mag._function_unit_class = MagUnit
STmag = MagUnit(photometric.STflux)
STmag.__doc__ = "ST magnitude: STmag=-21.1 corresponds to 1 erg/s/cm2/A"
ABmag = MagUnit(photometric.ABflux)
ABmag.__doc__ = "AB magnitude: ABmag=-48.6 corresponds to 1 erg/s/cm2/Hz"
M_bol = MagUnit(photometric.Bol)
M_bol.__doc__ = ("Absolute bolometric magnitude: M_bol=0 corresponds to "
"L_bol0={}".format(photometric.Bol.si))
m_bol = MagUnit(photometric.bol)
m_bol.__doc__ = ("Apparent bolometric magnitude: m_bol=0 corresponds to "
"f_bol0={}".format(photometric.bol.si))
|
d162717d5713babf9de7447ab48857e14703248030795e2af641df3aa3f2ae49 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Function Units and Quantities."""
from abc import ABCMeta, abstractmethod
import numpy as np
from astropy.units import (
Quantity, Unit, UnitBase, UnitConversionError, UnitsError, UnitTypeError,
dimensionless_unscaled)
__all__ = ['FunctionUnitBase', 'FunctionQuantity']
SUPPORTED_UFUNCS = {getattr(np.core.umath, ufunc) for ufunc in (
'isfinite', 'isinf', 'isnan', 'sign', 'signbit',
'rint', 'floor', 'ceil', 'trunc',
'_ones_like', 'ones_like', 'positive') if hasattr(np.core.umath, ufunc)}
# TODO: the following could work if helper changed relative to Quantity:
# - spacing should return dimensionless, not same unit
# - negative should negate unit too,
# - add, subtract, comparisons can work if units added/subtracted
SUPPORTED_FUNCTIONS = {getattr(np, function) for function in
('clip', 'trace', 'mean', 'min', 'max', 'round')}
# subclassing UnitBase or CompositeUnit was found to be problematic, requiring
# a large number of overrides. Hence, define new class.
class FunctionUnitBase(metaclass=ABCMeta):
"""Abstract base class for function units.
Function units are functions containing a physical unit, such as dB(mW).
Most of the arithmetic operations on function units are defined in this
base class.
While instantiation is defined, this class should not be used directly.
Rather, subclasses should be used that override the abstract properties
`_default_function_unit` and `_quantity_class`, and the abstract methods
`from_physical`, and `to_physical`.
Parameters
----------
physical_unit : `~astropy.units.Unit` or `string`
Unit that is encapsulated within the function unit.
If not given, dimensionless.
function_unit : `~astropy.units.Unit` or `string`
By default, the same as the function unit set by the subclass.
"""
# ↓↓↓ the following four need to be set by subclasses
# Make this a property so we can ensure subclasses define it.
@property
@abstractmethod
def _default_function_unit(self):
"""Default function unit corresponding to the function.
This property should be overridden by subclasses, with, e.g.,
`~astropy.unit.MagUnit` returning `~astropy.unit.mag`.
"""
# This has to be a property because the function quantity will not be
# known at unit definition time, as it gets defined after.
@property
@abstractmethod
def _quantity_class(self):
"""Function quantity class corresponding to this function unit.
This property should be overridden by subclasses, with, e.g.,
`~astropy.unit.MagUnit` returning `~astropy.unit.Magnitude`.
"""
@abstractmethod
def from_physical(self, x):
"""Transformation from value in physical to value in function units.
This method should be overridden by subclasses. It is used to
provide automatic transformations using an equivalency.
"""
@abstractmethod
def to_physical(self, x):
"""Transformation from value in function to value in physical units.
This method should be overridden by subclasses. It is used to
provide automatic transformations using an equivalency.
"""
# ↑↑↑ the above four need to be set by subclasses
# have priority over arrays, regular units, and regular quantities
__array_priority__ = 30000
def __init__(self, physical_unit=None, function_unit=None):
if physical_unit is None:
self._physical_unit = dimensionless_unscaled
else:
self._physical_unit = Unit(physical_unit)
if (not isinstance(self._physical_unit, UnitBase) or
self._physical_unit.is_equivalent(
self._default_function_unit)):
raise UnitConversionError("Unit {} is not a physical unit."
.format(self._physical_unit))
if function_unit is None:
self._function_unit = self._default_function_unit
else:
# any function unit should be equivalent to subclass default
function_unit = Unit(getattr(function_unit, 'function_unit',
function_unit))
if function_unit.is_equivalent(self._default_function_unit):
self._function_unit = function_unit
else:
raise UnitConversionError(
"Cannot initialize '{}' instance with function unit '{}'"
", as it is not equivalent to default function unit '{}'."
.format(self.__class__.__name__, function_unit,
self._default_function_unit))
def _copy(self, physical_unit=None):
"""Copy oneself, possibly with a different physical unit."""
if physical_unit is None:
physical_unit = self.physical_unit
return self.__class__(physical_unit, self.function_unit)
@property
def physical_unit(self):
return self._physical_unit
@property
def function_unit(self):
return self._function_unit
@property
def equivalencies(self):
"""List of equivalencies between function and physical units.
Uses the `from_physical` and `to_physical` methods.
"""
return [(self, self.physical_unit,
self.to_physical, self.from_physical)]
# ↓↓↓ properties/methods required to behave like a unit
def decompose(self, bases=set()):
"""Copy the current unit with the physical unit decomposed.
For details, see `~astropy.units.UnitBase.decompose`.
"""
return self._copy(self.physical_unit.decompose(bases))
@property
def si(self):
"""Copy the current function unit with the physical unit in SI."""
return self._copy(self.physical_unit.si)
@property
def cgs(self):
"""Copy the current function unit with the physical unit in CGS."""
return self._copy(self.physical_unit.cgs)
def _get_physical_type_id(self):
"""Get physical type corresponding to physical unit."""
return self.physical_unit._get_physical_type_id()
@property
def physical_type(self):
"""Return the physical type of the physical unit (e.g., 'length')."""
return self.physical_unit.physical_type
def is_equivalent(self, other, equivalencies=[]):
"""
Returns `True` if this unit is equivalent to ``other``.
Parameters
----------
other : `~astropy.units.Unit`, string, or tuple
The unit to convert to. If a tuple of units is specified, this
method returns true if the unit matches any of those in the tuple.
equivalencies : list of tuple
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`astropy:unit_equivalencies`.
This list is in addition to the built-in equivalencies between the
function unit and the physical one, as well as possible global
defaults set by, e.g., `~astropy.units.set_enabled_equivalencies`.
Use `None` to turn off any global equivalencies.
Returns
-------
bool
"""
if isinstance(other, tuple):
return any(self.is_equivalent(u, equivalencies=equivalencies)
for u in other)
other_physical_unit = getattr(other, 'physical_unit', (
dimensionless_unscaled if self.function_unit.is_equivalent(other)
else other))
return self.physical_unit.is_equivalent(other_physical_unit,
equivalencies)
def to(self, other, value=1., equivalencies=[]):
"""
Return the converted values in the specified unit.
Parameters
----------
other : `~astropy.units.Unit`, `~astropy.units.function.FunctionUnitBase`, or str
The unit to convert to.
value : int, float, or scalar array-like, optional
Value(s) in the current unit to be converted to the specified unit.
If not provided, defaults to 1.0.
equivalencies : list of tuple
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`astropy:unit_equivalencies`.
This list is in meant to treat only equivalencies between different
physical units; the built-in equivalency between the function
unit and the physical one is automatically taken into account.
Returns
-------
values : scalar or array
Converted value(s). Input value sequences are returned as
numpy arrays.
Raises
------
`~astropy.units.UnitsError`
If units are inconsistent.
"""
# conversion to one's own physical unit should be fastest
if other is self.physical_unit:
return self.to_physical(value)
other_function_unit = getattr(other, 'function_unit', other)
if self.function_unit.is_equivalent(other_function_unit):
# when other is an equivalent function unit:
# first convert physical units to other's physical units
other_physical_unit = getattr(other, 'physical_unit',
dimensionless_unscaled)
if self.physical_unit != other_physical_unit:
value_other_physical = self.physical_unit.to(
other_physical_unit, self.to_physical(value),
equivalencies)
# make function unit again, in own system
value = self.from_physical(value_other_physical)
# convert possible difference in function unit (e.g., dex->dB)
return self.function_unit.to(other_function_unit, value)
else:
try:
# when other is not a function unit
return self.physical_unit.to(other, self.to_physical(value),
equivalencies)
except UnitConversionError as e:
if self.function_unit == Unit('mag'):
# One can get to raw magnitudes via math that strips the dimensions off.
# Include extra information in the exception to remind users of this.
msg = "Did you perhaps subtract magnitudes so the unit got lost?"
e.args += (msg,)
raise e
else:
raise
def is_unity(self):
return False
def __eq__(self, other):
return (self.physical_unit == getattr(other, 'physical_unit',
dimensionless_unscaled) and
self.function_unit == getattr(other, 'function_unit', other))
def __ne__(self, other):
return not self.__eq__(other)
def __rlshift__(self, other):
"""Unit conversion operator ``<<``"""
try:
return self._quantity_class(other, self, copy=False, subok=True)
except Exception:
return NotImplemented
def __mul__(self, other):
if isinstance(other, (str, UnitBase, FunctionUnitBase)):
if self.physical_unit == dimensionless_unscaled:
# If dimensionless, drop back to normal unit and retry.
return self.function_unit * other
else:
raise UnitsError("Cannot multiply a function unit "
"with a physical dimension with any unit.")
else:
# Anything not like a unit, try initialising as a function quantity.
try:
return self._quantity_class(other, unit=self)
except Exception:
return NotImplemented
def __rmul__(self, other):
return self.__mul__(other)
def __truediv__(self, other):
if isinstance(other, (str, UnitBase, FunctionUnitBase)):
if self.physical_unit == dimensionless_unscaled:
# If dimensionless, drop back to normal unit and retry.
return self.function_unit / other
else:
raise UnitsError("Cannot divide a function unit "
"with a physical dimension by any unit.")
else:
# Anything not like a unit, try initialising as a function quantity.
try:
return self._quantity_class(1./other, unit=self)
except Exception:
return NotImplemented
def __rtruediv__(self, other):
if isinstance(other, (str, UnitBase, FunctionUnitBase)):
if self.physical_unit == dimensionless_unscaled:
# If dimensionless, drop back to normal unit and retry.
return other / self.function_unit
else:
raise UnitsError("Cannot divide a function unit "
"with a physical dimension into any unit")
else:
# Don't know what to do with anything not like a unit.
return NotImplemented
def __pow__(self, power):
if power == 0:
return dimensionless_unscaled
elif power == 1:
return self._copy()
if self.physical_unit == dimensionless_unscaled:
return self.function_unit ** power
raise UnitsError("Cannot raise a function unit "
"with a physical dimension to any power but 0 or 1.")
def __pos__(self):
return self._copy()
def to_string(self, format='generic'):
"""
Output the unit in the given format as a string.
The physical unit is appended, within parentheses, to the function
unit, as in "dB(mW)", with both units set using the given format
Parameters
----------
format : `astropy.units.format.Base` instance or str
The name of a format or a formatter object. If not
provided, defaults to the generic format.
"""
if format not in ('generic', 'unscaled', 'latex', 'latex_inline'):
raise ValueError(f"Function units cannot be written in {format} "
"format. Only 'generic', 'unscaled', 'latex' and "
"'latex_inline' are supported.")
self_str = self.function_unit.to_string(format)
pu_str = self.physical_unit.to_string(format)
if pu_str == '':
pu_str = '1'
if format.startswith('latex'):
self_str += r'$\mathrm{{\left( {0} \right)}}$'.format(
pu_str[1:-1]) # need to strip leading and trailing "$"
else:
self_str += f'({pu_str})'
return self_str
def __str__(self):
"""Return string representation for unit."""
self_str = str(self.function_unit)
pu_str = str(self.physical_unit)
if pu_str:
self_str += f'({pu_str})'
return self_str
def __repr__(self):
# By default, try to give a representation using `Unit(<string>)`,
# with string such that parsing it would give the correct FunctionUnit.
if callable(self.function_unit):
return f'Unit("{self.to_string()}")'
else:
return '{}("{}"{})'.format(
self.__class__.__name__, self.physical_unit,
"" if self.function_unit is self._default_function_unit
else f', unit="{self.function_unit}"')
def _repr_latex_(self):
"""
Generate latex representation of unit name. This is used by
the IPython notebook to print a unit with a nice layout.
Returns
-------
Latex string
"""
return self.to_string('latex')
def __hash__(self):
return hash((self.function_unit, self.physical_unit))
class FunctionQuantity(Quantity):
"""A representation of a (scaled) function of a number with a unit.
Function quantities are quantities whose units are functions containing a
physical unit, such as dB(mW). Most of the arithmetic operations on
function quantities are defined in this base class.
While instantiation is also defined here, this class should not be
instantiated directly. Rather, subclasses should be made which have
``_unit_class`` pointing back to the corresponding function unit class.
Parameters
----------
value : number, quantity-like, or sequence thereof
The numerical value of the function quantity. If a number or
a `~astropy.units.Quantity` with a function unit, it will be converted
to ``unit`` and the physical unit will be inferred from ``unit``.
If a `~astropy.units.Quantity` with just a physical unit, it will
converted to the function unit, after, if necessary, converting it to
the physical unit inferred from ``unit``.
unit : str, `~astropy.units.UnitBase`, or `~astropy.units.function.FunctionUnitBase`, optional
For an `~astropy.units.function.FunctionUnitBase` instance, the
physical unit will be taken from it; for other input, it will be
inferred from ``value``. By default, ``unit`` is set by the subclass.
dtype : `~numpy.dtype`, optional
The dtype of the resulting Numpy array or scalar that will
hold the value. If not provided, it is determined from the input,
except that any input that cannot represent float (integer and bool)
is converted to float.
copy : bool, optional
If `True` (default), then the value is copied. Otherwise, a copy will
only be made if ``__array__`` returns a copy, if value is a nested
sequence, or if a copy is needed to satisfy an explicitly given
``dtype``. (The `False` option is intended mostly for internal use,
to speed up initialization where a copy is known to have been made.
Use with care.)
order : {'C', 'F', 'A'}, optional
Specify the order of the array. As in `~numpy.array`. Ignored
if the input does not need to be converted and ``copy=False``.
subok : bool, optional
If `False` (default), the returned array will be forced to be of the
class used. Otherwise, subclasses will be passed through.
ndmin : int, optional
Specifies the minimum number of dimensions that the resulting array
should have. Ones will be pre-pended to the shape as needed to meet
this requirement. This parameter is ignored if the input is a
`~astropy.units.Quantity` and ``copy=False``.
Raises
------
TypeError
If the value provided is not a Python numeric type.
TypeError
If the unit provided is not a `~astropy.units.function.FunctionUnitBase`
or `~astropy.units.Unit` object, or a parseable string unit.
"""
_unit_class = None
"""Default `~astropy.units.function.FunctionUnitBase` subclass.
This should be overridden by subclasses.
"""
# Ensure priority over ndarray, regular Unit & Quantity, and FunctionUnit.
__array_priority__ = 40000
# Define functions that work on FunctionQuantity.
_supported_ufuncs = SUPPORTED_UFUNCS
_supported_functions = SUPPORTED_FUNCTIONS
def __new__(cls, value, unit=None, dtype=np.inexact, copy=True, order=None,
subok=False, ndmin=0):
if unit is not None:
# Convert possible string input to a (function) unit.
unit = Unit(unit)
if not isinstance(unit, FunctionUnitBase):
# By default, use value's physical unit.
value_unit = getattr(value, 'unit', None)
if value_unit is None:
# if iterable, see if first item has a unit
# (mixed lists fail in super call below).
try:
value_unit = getattr(value[0], 'unit')
except Exception:
pass
physical_unit = getattr(value_unit, 'physical_unit', value_unit)
unit = cls._unit_class(physical_unit, function_unit=unit)
# initialise!
return super().__new__(cls, value, unit, dtype=dtype, copy=copy,
order=order, subok=subok, ndmin=ndmin)
# ↓↓↓ properties not found in Quantity
@property
def physical(self):
"""The physical quantity corresponding the function one."""
return self.to(self.unit.physical_unit)
@property
def _function_view(self):
"""View as Quantity with function unit, dropping the physical unit.
Use `~astropy.units.quantity.Quantity.value` for just the value.
"""
return self._new_view(unit=self.unit.function_unit)
# ↓↓↓ methods overridden to change the behavior
@property
def si(self):
"""Return a copy with the physical unit in SI units."""
return self.__class__(self.physical.si)
@property
def cgs(self):
"""Return a copy with the physical unit in CGS units."""
return self.__class__(self.physical.cgs)
def decompose(self, bases=[]):
"""Generate a new `FunctionQuantity` with the physical unit decomposed.
For details, see `~astropy.units.Quantity.decompose`.
"""
return self.__class__(self.physical.decompose(bases))
# ↓↓↓ methods overridden to add additional behavior
def __quantity_subclass__(self, unit):
if isinstance(unit, FunctionUnitBase):
return self.__class__, True
else:
return super().__quantity_subclass__(unit)[0], False
def _set_unit(self, unit):
if not isinstance(unit, self._unit_class):
# Have to take care of, e.g., (10*u.mag).view(u.Magnitude)
try:
# "or 'nonsense'" ensures `None` breaks, just in case.
unit = self._unit_class(function_unit=unit or 'nonsense')
except Exception:
raise UnitTypeError(
"{} instances require {} function units"
.format(type(self).__name__, self._unit_class.__name__) +
f", so cannot set it to '{unit}'.")
self._unit = unit
def __array_ufunc__(self, function, method, *inputs, **kwargs):
# TODO: it would be more logical to have this in Quantity already,
# instead of in UFUNC_HELPERS, where it cannot be overridden.
# And really it should just return NotImplemented, since possibly
# another argument might know what to do.
if function not in self._supported_ufuncs:
raise UnitTypeError(
f"Cannot use ufunc '{function.__name__}' with function quantities")
return super().__array_ufunc__(function, method, *inputs, **kwargs)
# ↓↓↓ methods overridden to change behavior
def __mul__(self, other):
if self.unit.physical_unit == dimensionless_unscaled:
return self._function_view * other
raise UnitTypeError("Cannot multiply function quantities which "
"are not dimensionless with anything.")
def __truediv__(self, other):
if self.unit.physical_unit == dimensionless_unscaled:
return self._function_view / other
raise UnitTypeError("Cannot divide function quantities which "
"are not dimensionless by anything.")
def __rtruediv__(self, other):
if self.unit.physical_unit == dimensionless_unscaled:
return self._function_view.__rtruediv__(other)
raise UnitTypeError("Cannot divide function quantities which "
"are not dimensionless into anything.")
def _comparison(self, other, comparison_func):
"""Do a comparison between self and other, raising UnitsError when
other cannot be converted to self because it has different physical
unit, and returning NotImplemented when there are other errors."""
try:
# will raise a UnitsError if physical units not equivalent
other_in_own_unit = self._to_own_unit(other, check_precision=False)
except UnitsError as exc:
if self.unit.physical_unit != dimensionless_unscaled:
raise exc
try:
other_in_own_unit = self._function_view._to_own_unit(
other, check_precision=False)
except Exception:
raise exc
except Exception:
return NotImplemented
return comparison_func(other_in_own_unit)
def __eq__(self, other):
try:
return self._comparison(other, self.value.__eq__)
except UnitsError:
return False
def __ne__(self, other):
try:
return self._comparison(other, self.value.__ne__)
except UnitsError:
return True
def __gt__(self, other):
return self._comparison(other, self.value.__gt__)
def __ge__(self, other):
return self._comparison(other, self.value.__ge__)
def __lt__(self, other):
return self._comparison(other, self.value.__lt__)
def __le__(self, other):
return self._comparison(other, self.value.__le__)
def __lshift__(self, other):
"""Unit conversion operator `<<`"""
try:
other = Unit(other, parse_strict='silent')
except UnitTypeError:
return NotImplemented
return self.__class__(self, other, copy=False, subok=True)
# Ensure Quantity methods are used only if they make sense.
def _wrap_function(self, function, *args, **kwargs):
if function in self._supported_functions:
return super()._wrap_function(function, *args, **kwargs)
# For dimensionless, we can convert to regular quantities.
if all(arg.unit.physical_unit == dimensionless_unscaled
for arg in (self,) + args
if (hasattr(arg, 'unit') and
hasattr(arg.unit, 'physical_unit'))):
args = tuple(getattr(arg, '_function_view', arg) for arg in args)
return self._function_view._wrap_function(function, *args, **kwargs)
raise TypeError("Cannot use method that uses function '{}' with "
"function quantities that are not dimensionless."
.format(function.__name__))
# Override functions that are supported but do not use _wrap_function
# in Quantity.
def max(self, axis=None, out=None, keepdims=False):
return self._wrap_function(np.max, axis, out=out, keepdims=keepdims)
def min(self, axis=None, out=None, keepdims=False):
return self._wrap_function(np.min, axis, out=out, keepdims=keepdims)
def sum(self, axis=None, dtype=None, out=None, keepdims=False):
return self._wrap_function(np.sum, axis, dtype, out=out,
keepdims=keepdims)
def cumsum(self, axis=None, dtype=None, out=None):
return self._wrap_function(np.cumsum, axis, dtype, out=out)
def clip(self, a_min, a_max, out=None):
return self._wrap_function(np.clip, self._to_own_unit(a_min),
self._to_own_unit(a_max), out=out)
|
c2ac2d6130ffd79249b795f2f981218dce2ed529a42c4ed030e359f37d20f0ca | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package defines units that can also be used as functions of other units.
If called, their arguments are used to initialize the corresponding function
unit (e.g., ``u.mag(u.ct/u.s)``). Note that the prefixed versions cannot be
called, as it would be unclear what, e.g., ``u.mmag(u.ct/u.s)`` would mean.
"""
from astropy.units.core import _add_prefixes
from .mixin import IrreducibleFunctionUnit, RegularFunctionUnit
_ns = globals()
###########################################################################
# Logarithmic units
# These calls are what core.def_unit would do, but we need to use the callable
# unit versions. The actual function unit classes get added in logarithmic.
dex = IrreducibleFunctionUnit(['dex'], namespace=_ns,
doc="Dex: Base 10 logarithmic unit")
dB = RegularFunctionUnit(['dB', 'decibel'], 0.1 * dex, namespace=_ns,
doc="Decibel: ten per base 10 logarithmic unit")
mag = RegularFunctionUnit(['mag'], -0.4 * dex, namespace=_ns,
doc=("Astronomical magnitude: "
"-2.5 per base 10 logarithmic unit"))
_add_prefixes(mag, namespace=_ns, prefixes=True)
###########################################################################
# CLEANUP
del RegularFunctionUnit
del IrreducibleFunctionUnit
###########################################################################
# DOCSTRING
# This generates a docstring for this module that describes all of the
# standard units defined here.
from astropy.units.utils import generate_unit_summary as _generate_unit_summary
if __doc__ is not None:
__doc__ += _generate_unit_summary(globals())
|
d6b48aa1fb31d95075aecd483ad3e4a94406ec5d1a24a34803323d1e1880c46a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy.units.core import IrreducibleUnit, Unit
class FunctionMixin:
"""Mixin class that makes UnitBase subclasses callable.
Provides a __call__ method that passes on arguments to a FunctionUnit.
Instances of this class should define ``_function_unit_class`` pointing
to the relevant class.
See units.py and logarithmic.py for usage.
"""
def __call__(self, unit=None):
return self._function_unit_class(physical_unit=unit,
function_unit=self)
class IrreducibleFunctionUnit(FunctionMixin, IrreducibleUnit):
pass
class RegularFunctionUnit(FunctionMixin, Unit):
pass
|
c1c47dbc91d2204b1fd2a6c12b3ec3bb8b7cd25f6ea10bc5943318a0aff5c6ef | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import copy
from collections.abc import MappingView
from types import MappingProxyType
import numpy as np
from astropy import units as u
from astropy.utils.state import ScienceState
from astropy.utils.decorators import format_doc, classproperty, deprecated
from astropy.coordinates.angles import Angle
from astropy.coordinates.matrix_utilities import rotation_matrix, matrix_product, matrix_transpose
from astropy.coordinates import representation as r
from astropy.coordinates.baseframe import (BaseCoordinateFrame,
frame_transform_graph,
base_doc)
from astropy.coordinates.attributes import (CoordinateAttribute,
QuantityAttribute,
DifferentialAttribute)
from astropy.coordinates.transformations import AffineTransform
from astropy.coordinates.errors import ConvertError
from .icrs import ICRS
__all__ = ['Galactocentric']
# Measured by minimizing the difference between a plane of coordinates along
# l=0, b=[-90,90] and the Galactocentric x-z plane
# This is not used directly, but accessed via `get_roll0`. We define it here to
# prevent having to create new Angle objects every time `get_roll0` is called.
_ROLL0 = Angle(58.5986320306*u.degree)
class _StateProxy(MappingView):
"""
`~collections.abc.MappingView` with a read-only ``getitem`` through
`~types.MappingProxyType`.
"""
def __init__(self, mapping):
super().__init__(mapping)
self._mappingproxy = MappingProxyType(self._mapping) # read-only
def __getitem__(self, key):
"""Read-only ``getitem``."""
return self._mappingproxy[key]
def __deepcopy__(self, memo):
return copy.deepcopy(self._mapping, memo=memo)
class galactocentric_frame_defaults(ScienceState):
"""This class controls the global setting of default values for the frame
attributes in the `~astropy.coordinates.Galactocentric` frame, which may be
updated in future versions of ``astropy``. Note that when using
`~astropy.coordinates.Galactocentric`, changing values here will not affect
any attributes that are set explicitly by passing values in to the
`~astropy.coordinates.Galactocentric` initializer. Modifying these defaults
will only affect the frame attribute values when using the frame as, e.g.,
``Galactocentric`` or ``Galactocentric()`` with no explicit arguments.
This class controls the parameter settings by specifying a string name,
with the following pre-specified options:
- 'pre-v4.0': The current default value, which sets the default frame
attribute values to their original (pre-astropy-v4.0) values.
- 'v4.0': The attribute values as updated in Astropy version 4.0.
- 'latest': An alias of the most recent parameter set (currently: 'v4.0')
Alternatively, user-defined parameter settings may be registered, with
:meth:`~astropy.coordinates.galactocentric_frame_defaults.register`,
and used identically as pre-specified parameter sets. At minimum,
registrations must have unique names and a dictionary of parameters
with keys "galcen_coord", "galcen_distance", "galcen_v_sun", "z_sun",
"roll". See examples below.
This class also tracks the references for all parameter values in the
attribute ``references``, as well as any further information the registry.
The pre-specified options can be extended to include similar
state information as user-defined parameter settings -- for example, to add
parameter uncertainties.
The preferred method for getting a parameter set and metadata, by name, is
:meth:`~galactocentric_frame_defaults.get_from_registry` since
it ensures the immutability of the registry.
See :ref:`astropy:astropy-coordinates-galactocentric-defaults` for more
information.
Examples
--------
The default `~astropy.coordinates.Galactocentric` frame parameters can be
modified globally::
>>> from astropy.coordinates import galactocentric_frame_defaults
>>> _ = galactocentric_frame_defaults.set('v4.0') # doctest: +SKIP
>>> Galactocentric() # doctest: +SKIP
<Galactocentric Frame (galcen_coord=<ICRS Coordinate: (ra, dec) in deg
(266.4051, -28.936175)>, galcen_distance=8.122 kpc, galcen_v_sun=(12.9, 245.6, 7.78) km / s, z_sun=20.8 pc, roll=0.0 deg)>
>>> _ = galactocentric_frame_defaults.set('pre-v4.0') # doctest: +SKIP
>>> Galactocentric() # doctest: +SKIP
<Galactocentric Frame (galcen_coord=<ICRS Coordinate: (ra, dec) in deg
(266.4051, -28.936175)>, galcen_distance=8.3 kpc, galcen_v_sun=(11.1, 232.24, 7.25) km / s, z_sun=27.0 pc, roll=0.0 deg)>
The default parameters can also be updated by using this class as a context
manager::
>>> with galactocentric_frame_defaults.set('pre-v4.0'):
... print(Galactocentric()) # doctest: +FLOAT_CMP
<Galactocentric Frame (galcen_coord=<ICRS Coordinate: (ra, dec) in deg
(266.4051, -28.936175)>, galcen_distance=8.3 kpc, galcen_v_sun=(11.1, 232.24, 7.25) km / s, z_sun=27.0 pc, roll=0.0 deg)>
Again, changing the default parameter values will not affect frame
attributes that are explicitly specified::
>>> import astropy.units as u
>>> with galactocentric_frame_defaults.set('pre-v4.0'):
... print(Galactocentric(galcen_distance=8.0*u.kpc)) # doctest: +FLOAT_CMP
<Galactocentric Frame (galcen_coord=<ICRS Coordinate: (ra, dec) in deg
(266.4051, -28.936175)>, galcen_distance=8.0 kpc, galcen_v_sun=(11.1, 232.24, 7.25) km / s, z_sun=27.0 pc, roll=0.0 deg)>
Additional parameter sets may be registered, for instance to use the
Dehnen & Binney (1998) measurements of the solar motion. We can also
add metadata, such as the 1-sigma errors. In this example we will modify
the required key "parameters", change the recommended key "references" to
match "parameters", and add the extra key "error" (any key can be added)::
>>> state = galactocentric_frame_defaults.get_from_registry("v4.0")
>>> state["parameters"]["galcen_v_sun"] = (10.00, 225.25, 7.17) * (u.km / u.s)
>>> state["references"]["galcen_v_sun"] = "https://ui.adsabs.harvard.edu/full/1998MNRAS.298..387D"
>>> state["error"] = {"galcen_v_sun": (0.36, 0.62, 0.38) * (u.km / u.s)}
>>> galactocentric_frame_defaults.register(name="DB1998", **state)
Just as in the previous examples, the new parameter set can be retrieved with::
>>> state = galactocentric_frame_defaults.get_from_registry("DB1998")
>>> print(state["error"]["galcen_v_sun"]) # doctest: +FLOAT_CMP
[0.36 0.62 0.38] km / s
"""
_latest_value = 'v4.0'
_value = None
_references = None
_state = dict() # all other data
# Note: _StateProxy() produces read-only view of enclosed mapping.
_registry = {
"v4.0": {
"parameters": _StateProxy(
{
"galcen_coord": ICRS(
ra=266.4051 * u.degree, dec=-28.936175 * u.degree
),
"galcen_distance": 8.122 * u.kpc,
"galcen_v_sun": r.CartesianDifferential(
[12.9, 245.6, 7.78] * (u.km / u.s)
),
"z_sun": 20.8 * u.pc,
"roll": 0 * u.deg,
}
),
"references": _StateProxy(
{
"galcen_coord": "https://ui.adsabs.harvard.edu/abs/2004ApJ...616..872R",
"galcen_distance": "https://ui.adsabs.harvard.edu/abs/2018A%26A...615L..15G",
"galcen_v_sun": [
"https://ui.adsabs.harvard.edu/abs/2018RNAAS...2..210D",
"https://ui.adsabs.harvard.edu/abs/2018A%26A...615L..15G",
"https://ui.adsabs.harvard.edu/abs/2004ApJ...616..872R",
],
"z_sun": "https://ui.adsabs.harvard.edu/abs/2019MNRAS.482.1417B",
"roll": None,
}
),
},
"pre-v4.0": {
"parameters": _StateProxy(
{
"galcen_coord": ICRS(
ra=266.4051 * u.degree, dec=-28.936175 * u.degree
),
"galcen_distance": 8.3 * u.kpc,
"galcen_v_sun": r.CartesianDifferential(
[11.1, 220 + 12.24, 7.25] * (u.km / u.s)
),
"z_sun": 27.0 * u.pc,
"roll": 0 * u.deg,
}
),
"references": _StateProxy(
{
"galcen_coord": "https://ui.adsabs.harvard.edu/abs/2004ApJ...616..872R",
"galcen_distance": "https://ui.adsabs.harvard.edu/#abs/2009ApJ...692.1075G",
"galcen_v_sun": [
"https://ui.adsabs.harvard.edu/#abs/2010MNRAS.403.1829S",
"https://ui.adsabs.harvard.edu/#abs/2015ApJS..216...29B",
],
"z_sun": "https://ui.adsabs.harvard.edu/#abs/2001ApJ...553..184C",
"roll": None,
}
),
},
}
@classproperty # read-only
def parameters(cls):
return cls._value
@classproperty # read-only
def references(cls):
return cls._references
@classmethod
def get_from_registry(cls, name: str):
"""
Return Galactocentric solar parameters and metadata given string names
for the parameter sets. This method ensures the returned state is a
mutable copy, so any changes made do not affect the registry state.
Returns
-------
state : dict
Copy of the registry for the string name.
Should contain, at minimum:
- "parameters": dict
Galactocentric solar parameters
- "references" : Dict[str, Union[str, Sequence[str]]]
References for "parameters".
Fields are str or sequence of str.
Raises
------
KeyError
If invalid string input to registry
to retrieve solar parameters for Galactocentric frame.
"""
# Resolve the meaning of 'latest': latest parameter set is from v4.0
# - update this as newer parameter choices are added
if name == 'latest':
name = cls._latest_value
# Get the state from the registry.
# Copy to ensure registry is immutable to modifications of "_value".
# Raises KeyError if `name` is invalid string input to registry
# to retrieve solar parameters for Galactocentric frame.
state = copy.deepcopy(cls._registry[name]) # ensure mutable
return state
@deprecated("v4.2", alternative="`get_from_registry`")
@classmethod
def get_solar_params_from_string(cls, arg):
"""
Return Galactocentric solar parameters given string names
for the parameter sets.
Returns
-------
parameters : dict
Copy of Galactocentric solar parameters from registry
Raises
------
KeyError
If invalid string input to registry
to retrieve solar parameters for Galactocentric frame.
"""
return cls.get_from_registry(arg)["parameters"]
@classmethod
def validate(cls, value):
if value is None:
value = cls._latest_value
if isinstance(value, str):
state = cls.get_from_registry(value)
cls._references = state["references"]
cls._state = state
parameters = state["parameters"]
elif isinstance(value, dict):
parameters = value
elif isinstance(value, Galactocentric):
# turn the frame instance into a dict of frame attributes
parameters = dict()
for k in value.frame_attributes:
parameters[k] = getattr(value, k)
cls._references = value.frame_attribute_references.copy()
cls._state = dict(parameters=parameters,
references=cls._references)
else:
raise ValueError("Invalid input to retrieve solar parameters for "
"Galactocentric frame: input must be a string, "
"dict, or Galactocentric instance")
return parameters
@classmethod
def register(cls, name: str, parameters: dict, references=None,
**meta: dict):
"""Register a set of parameters.
Parameters
----------
name : str
The registration name for the parameter and metadata set.
parameters : dict
The solar parameters for Galactocentric frame.
references : dict or None, optional
References for contents of `parameters`.
None becomes empty dict.
**meta : dict, optional
Any other properties to register.
"""
# check on contents of `parameters`
must_have = {"galcen_coord", "galcen_distance", "galcen_v_sun",
"z_sun", "roll"}
missing = must_have.difference(parameters)
if missing:
raise ValueError(f"Missing parameters: {missing}")
references = references or {} # None -> {}
state = dict(parameters=parameters, references=references)
state.update(meta) # meta never has keys "parameters" or "references"
cls._registry[name] = state
doc_components = """
x : `~astropy.units.Quantity`, optional
Cartesian, Galactocentric :math:`x` position component.
y : `~astropy.units.Quantity`, optional
Cartesian, Galactocentric :math:`y` position component.
z : `~astropy.units.Quantity`, optional
Cartesian, Galactocentric :math:`z` position component.
v_x : `~astropy.units.Quantity`, optional
Cartesian, Galactocentric :math:`v_x` velocity component.
v_y : `~astropy.units.Quantity`, optional
Cartesian, Galactocentric :math:`v_y` velocity component.
v_z : `~astropy.units.Quantity`, optional
Cartesian, Galactocentric :math:`v_z` velocity component.
"""
doc_footer = """
Other parameters
----------------
galcen_coord : `ICRS`, optional, keyword-only
The ICRS coordinates of the Galactic center.
galcen_distance : `~astropy.units.Quantity`, optional, keyword-only
The distance from the sun to the Galactic center.
galcen_v_sun : `~astropy.coordinates.representation.CartesianDifferential`, `~astropy.units.Quantity` ['speed'], optional, keyword-only
The velocity of the sun *in the Galactocentric frame* as Cartesian
velocity components.
z_sun : `~astropy.units.Quantity` ['length'], optional, keyword-only
The distance from the sun to the Galactic midplane.
roll : `~astropy.coordinates.Angle`, optional, keyword-only
The angle to rotate about the final x-axis, relative to the
orientation for Galactic. For example, if this roll angle is 0,
the final x-z plane will align with the Galactic coordinates x-z
plane. Unless you really know what this means, you probably should
not change this!
Examples
--------
To transform to the Galactocentric frame with the default
frame attributes, pass the uninstantiated class name to the
``transform_to()`` method of a `~astropy.coordinates.SkyCoord` object::
>>> import astropy.units as u
>>> import astropy.coordinates as coord
>>> c = coord.SkyCoord(ra=[158.3122, 24.5] * u.degree,
... dec=[-17.3, 81.52] * u.degree,
... distance=[11.5, 24.12] * u.kpc,
... frame='icrs')
>>> c.transform_to(coord.Galactocentric) # doctest: +FLOAT_CMP
<SkyCoord (Galactocentric: galcen_coord=<ICRS Coordinate: (ra, dec) in deg
(266.4051, -28.936175)>, galcen_distance=8.122 kpc, galcen_v_sun=(12.9, 245.6, 7.78) km / s, z_sun=20.8 pc, roll=0.0 deg): (x, y, z) in kpc
[( -9.43489286, -9.40062188, 6.51345359),
(-21.11044918, 18.76334013, 7.83175149)]>
To specify a custom set of parameters, you have to include extra keyword
arguments when initializing the Galactocentric frame object::
>>> c.transform_to(coord.Galactocentric(galcen_distance=8.1*u.kpc)) # doctest: +FLOAT_CMP
<SkyCoord (Galactocentric: galcen_coord=<ICRS Coordinate: (ra, dec) in deg
(266.4051, -28.936175)>, galcen_distance=8.1 kpc, galcen_v_sun=(12.9, 245.6, 7.78) km / s, z_sun=20.8 pc, roll=0.0 deg): (x, y, z) in kpc
[( -9.41284763, -9.40062188, 6.51346272),
(-21.08839478, 18.76334013, 7.83184184)]>
Similarly, transforming from the Galactocentric frame to another coordinate frame::
>>> c = coord.SkyCoord(x=[-8.3, 4.5] * u.kpc,
... y=[0., 81.52] * u.kpc,
... z=[0.027, 24.12] * u.kpc,
... frame=coord.Galactocentric)
>>> c.transform_to(coord.ICRS) # doctest: +FLOAT_CMP
<SkyCoord (ICRS): (ra, dec, distance) in (deg, deg, kpc)
[( 88.22423301, 29.88672864, 0.17813456),
(289.72864549, 49.9865043 , 85.93949064)]>
Or, with custom specification of the Galactic center::
>>> c = coord.SkyCoord(x=[-8.0, 4.5] * u.kpc,
... y=[0., 81.52] * u.kpc,
... z=[21.0, 24120.0] * u.pc,
... frame=coord.Galactocentric,
... z_sun=21 * u.pc, galcen_distance=8. * u.kpc)
>>> c.transform_to(coord.ICRS) # doctest: +FLOAT_CMP
<SkyCoord (ICRS): (ra, dec, distance) in (deg, deg, kpc)
[( 86.2585249 , 28.85773187, 2.75625475e-05),
(289.77285255, 50.06290457, 8.59216010e+01)]>
"""
@format_doc(base_doc, components=doc_components, footer=doc_footer)
class Galactocentric(BaseCoordinateFrame):
r"""
A coordinate or frame in the Galactocentric system.
This frame allows specifying the Sun-Galactic center distance, the height of
the Sun above the Galactic midplane, and the solar motion relative to the
Galactic center. However, as there is no modern standard definition of a
Galactocentric reference frame, it is important to pay attention to the
default values used in this class if precision is important in your code.
The default values of the parameters of this frame are taken from the
original definition of the frame in 2014. As such, the defaults are somewhat
out of date relative to recent measurements made possible by, e.g., Gaia.
The defaults can, however, be changed at runtime by setting the parameter
set name in `~astropy.coordinates.galactocentric_frame_defaults`.
The current default parameter set is ``"pre-v4.0"``, indicating that the
parameters were adopted before ``astropy`` version 4.0. A regularly-updated
parameter set can instead be used by setting
``galactocentric_frame_defaults.set ('latest')``, and other parameter set
names may be added in future versions. To find out the scientific papers
that the current default parameters are derived from, use
``galcen.frame_attribute_references`` (where ``galcen`` is an instance of
this frame), which will update even if the default parameter set is changed.
The position of the Sun is assumed to be on the x axis of the final,
right-handed system. That is, the x axis points from the position of
the Sun projected to the Galactic midplane to the Galactic center --
roughly towards :math:`(l,b) = (0^\circ,0^\circ)`. For the default
transformation (:math:`{\rm roll}=0^\circ`), the y axis points roughly
towards Galactic longitude :math:`l=90^\circ`, and the z axis points
roughly towards the North Galactic Pole (:math:`b=90^\circ`).
For a more detailed look at the math behind this transformation, see
the document :ref:`astropy:coordinates-galactocentric`.
The frame attributes are listed under **Other Parameters**.
"""
default_representation = r.CartesianRepresentation
default_differential = r.CartesianDifferential
# frame attributes
galcen_coord = CoordinateAttribute(frame=ICRS)
galcen_distance = QuantityAttribute(unit=u.kpc)
galcen_v_sun = DifferentialAttribute(
allowed_classes=[r.CartesianDifferential])
z_sun = QuantityAttribute(unit=u.pc)
roll = QuantityAttribute(unit=u.deg)
def __init__(self, *args, **kwargs):
# Set default frame attribute values based on the ScienceState instance
# for the solar parameters defined above
default_params = galactocentric_frame_defaults.get()
self.frame_attribute_references = \
galactocentric_frame_defaults.references.copy()
for k in default_params:
if k in kwargs:
# If a frame attribute is set by the user, remove its reference
self.frame_attribute_references.pop(k, None)
# Keep the frame attribute if it is set by the user, otherwise use
# the default value
kwargs[k] = kwargs.get(k, default_params[k])
super().__init__(*args, **kwargs)
@classmethod
def get_roll0(cls):
"""
The additional roll angle (about the final x axis) necessary to align
the final z axis to match the Galactic yz-plane. Setting the ``roll``
frame attribute to -this method's return value removes this rotation,
allowing the use of the `Galactocentric` frame in more general contexts.
"""
# note that the actual value is defined at the module level. We make at
# a property here because this module isn't actually part of the public
# API, so it's better for it to be accessible from Galactocentric
return _ROLL0
# ICRS to/from Galactocentric ----------------------->
def get_matrix_vectors(galactocentric_frame, inverse=False):
"""
Use the ``inverse`` argument to get the inverse transformation, matrix and
offsets to go from Galactocentric to ICRS.
"""
# shorthand
gcf = galactocentric_frame
# rotation matrix to align x(ICRS) with the vector to the Galactic center
mat1 = rotation_matrix(-gcf.galcen_coord.dec, 'y')
mat2 = rotation_matrix(gcf.galcen_coord.ra, 'z')
# extra roll away from the Galactic x-z plane
mat0 = rotation_matrix(gcf.get_roll0() - gcf.roll, 'x')
# construct transformation matrix and use it
R = matrix_product(mat0, mat1, mat2)
# Now need to translate by Sun-Galactic center distance around x' and
# rotate about y' to account for tilt due to Sun's height above the plane
translation = r.CartesianRepresentation(gcf.galcen_distance * [1., 0., 0.])
z_d = gcf.z_sun / gcf.galcen_distance
H = rotation_matrix(-np.arcsin(z_d), 'y')
# compute total matrices
A = matrix_product(H, R)
# Now we re-align the translation vector to account for the Sun's height
# above the midplane
offset = -translation.transform(H)
if inverse:
# the inverse of a rotation matrix is a transpose, which is much faster
# and more stable to compute
A = matrix_transpose(A)
offset = (-offset).transform(A)
offset_v = r.CartesianDifferential.from_cartesian(
(-gcf.galcen_v_sun).to_cartesian().transform(A))
offset = offset.with_differentials(offset_v)
else:
offset = offset.with_differentials(gcf.galcen_v_sun)
return A, offset
def _check_coord_repr_diff_types(c):
if isinstance(c.data, r.UnitSphericalRepresentation):
raise ConvertError("Transforming to/from a Galactocentric frame "
"requires a 3D coordinate, e.g. (angle, angle, "
"distance) or (x, y, z).")
if ('s' in c.data.differentials and
isinstance(c.data.differentials['s'],
(r.UnitSphericalDifferential,
r.UnitSphericalCosLatDifferential,
r.RadialDifferential))):
raise ConvertError("Transforming to/from a Galactocentric frame "
"requires a 3D velocity, e.g., proper motion "
"components and radial velocity.")
@frame_transform_graph.transform(AffineTransform, ICRS, Galactocentric)
def icrs_to_galactocentric(icrs_coord, galactocentric_frame):
_check_coord_repr_diff_types(icrs_coord)
return get_matrix_vectors(galactocentric_frame)
@frame_transform_graph.transform(AffineTransform, Galactocentric, ICRS)
def galactocentric_to_icrs(galactocentric_coord, icrs_frame):
_check_coord_repr_diff_types(galactocentric_coord)
return get_matrix_vectors(galactocentric_coord, inverse=True)
# Create loopback transformation
frame_transform_graph._add_merged_transform(Galactocentric, ICRS, Galactocentric)
|
fc858fa13cde8ecebb68b0645c29f11b5a2aa6264b6470d69f42c2f85419d669 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy.coordinates.matrix_utilities import (rotation_matrix,
matrix_product,
matrix_transpose)
from astropy.coordinates.baseframe import frame_transform_graph
from astropy.coordinates.transformations import StaticMatrixTransform
from .galactic import Galactic
from .supergalactic import Supergalactic
@frame_transform_graph.transform(StaticMatrixTransform, Galactic, Supergalactic)
def gal_to_supergal():
mat1 = rotation_matrix(90, 'z')
mat2 = rotation_matrix(90 - Supergalactic._nsgp_gal.b.degree, 'y')
mat3 = rotation_matrix(Supergalactic._nsgp_gal.l.degree, 'z')
return matrix_product(mat1, mat2, mat3)
@frame_transform_graph.transform(StaticMatrixTransform, Supergalactic, Galactic)
def supergal_to_gal():
return matrix_transpose(gal_to_supergal())
|
613fe7ea0dfd7a4e33b1685087b26f6647a25f2f021674020633e6fdd027e676 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy.utils.decorators import format_doc
from astropy.coordinates.baseframe import frame_transform_graph, base_doc
from astropy.coordinates.attributes import TimeAttribute
from astropy.coordinates.transformations import DynamicMatrixTransform
from astropy.coordinates import earth_orientation as earth
from .baseradec import BaseRADecFrame, doc_components
from .utils import EQUINOX_J2000
__all__ = ['FK5']
doc_footer = """
Other parameters
----------------
equinox : `~astropy.time.Time`
The equinox of this frame.
"""
@format_doc(base_doc, components=doc_components, footer=doc_footer)
class FK5(BaseRADecFrame):
"""
A coordinate or frame in the FK5 system.
Note that this is a barycentric version of FK5 - that is, the origin for
this frame is the Solar System Barycenter, *not* the Earth geocenter.
The frame attributes are listed under **Other Parameters**.
"""
equinox = TimeAttribute(default=EQUINOX_J2000)
@staticmethod
def _precession_matrix(oldequinox, newequinox):
"""
Compute and return the precession matrix for FK5 based on Capitaine et
al. 2003/IAU2006. Used inside some of the transformation functions.
Parameters
----------
oldequinox : `~astropy.time.Time`
The equinox to precess from.
newequinox : `~astropy.time.Time`
The equinox to precess to.
Returns
-------
newcoord : array
The precession matrix to transform to the new equinox
"""
return earth.precession_matrix_Capitaine(oldequinox, newequinox)
# This is the "self-transform". Defined at module level because the decorator
# needs a reference to the FK5 class
@frame_transform_graph.transform(DynamicMatrixTransform, FK5, FK5)
def fk5_to_fk5(fk5coord1, fk5frame2):
return fk5coord1._precession_matrix(fk5coord1.equinox, fk5frame2.equinox)
|
ea91c6848cd36f878398a83ab5b7340d2e7c9097912c66915abb8961b5f55dbe | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Contains the transformation functions for getting to/from ITRS, TEME, GCRS, and CIRS.
These are distinct from the ICRS and AltAz functions because they are just
rotations without aberration corrections or offsets.
"""
import numpy as np
import erfa
from astropy.coordinates.baseframe import frame_transform_graph
from astropy.coordinates.transformations import FunctionTransformWithFiniteDifference
from astropy.coordinates.matrix_utilities import matrix_transpose
from .icrs import ICRS
from .gcrs import GCRS, PrecessedGeocentric
from .cirs import CIRS
from .itrs import ITRS
from .equatorial import TEME, TETE
from .utils import get_polar_motion, get_jd12, EARTH_CENTER
# # first define helper functions
def teme_to_itrs_mat(time):
# Sidereal time, rotates from ITRS to mean equinox
# Use 1982 model for consistency with Vallado et al (2006)
# http://www.celestrak.com/publications/aiaa/2006-6753/AIAA-2006-6753.pdf
gst = erfa.gmst82(*get_jd12(time, 'ut1'))
# Polar Motion
# Do not include TIO locator s' because it is not used in Vallado 2006
xp, yp = get_polar_motion(time)
pmmat = erfa.pom00(xp, yp, 0)
# rotation matrix
# c2tcio expects a GCRS->CIRS matrix as it's first argument.
# Here, we just set that to an I-matrix, because we're already
# in TEME and the difference between TEME and CIRS is just the
# rotation by the sidereal time rather than the Earth Rotation Angle
return erfa.c2tcio(np.eye(3), gst, pmmat)
def gcrs_to_cirs_mat(time):
# celestial-to-intermediate matrix
return erfa.c2i06a(*get_jd12(time, 'tt'))
def cirs_to_itrs_mat(time):
# compute the polar motion p-matrix
xp, yp = get_polar_motion(time)
sp = erfa.sp00(*get_jd12(time, 'tt'))
pmmat = erfa.pom00(xp, yp, sp)
# now determine the Earth Rotation Angle for the input obstime
# era00 accepts UT1, so we convert if need be
era = erfa.era00(*get_jd12(time, 'ut1'))
# c2tcio expects a GCRS->CIRS matrix, but we just set that to an I-matrix
# because we're already in CIRS
return erfa.c2tcio(np.eye(3), era, pmmat)
def tete_to_itrs_mat(time, rbpn=None):
"""Compute the polar motion p-matrix at the given time.
If the nutation-precession matrix is already known, it should be passed in,
as this is by far the most expensive calculation.
"""
xp, yp = get_polar_motion(time)
sp = erfa.sp00(*get_jd12(time, 'tt'))
pmmat = erfa.pom00(xp, yp, sp)
# now determine the greenwich apparent siderial time for the input obstime
# we use the 2006A model for consistency with RBPN matrix use in GCRS <-> TETE
ujd1, ujd2 = get_jd12(time, 'ut1')
jd1, jd2 = get_jd12(time, 'tt')
if rbpn is None:
# erfa.gst06a calls pnm06a to calculate rbpn and then gst06. Use it in
# favour of getting rbpn with erfa.pnm06a to avoid a possibly large array.
gast = erfa.gst06a(ujd1, ujd2, jd1, jd2)
else:
gast = erfa.gst06(ujd1, ujd2, jd1, jd2, rbpn)
# c2tcio expects a GCRS->CIRS matrix, but we just set that to an I-matrix
# because we're already in CIRS equivalent frame
return erfa.c2tcio(np.eye(3), gast, pmmat)
def gcrs_precession_mat(equinox):
gamb, phib, psib, epsa = erfa.pfw06(*get_jd12(equinox, 'tt'))
return erfa.fw2m(gamb, phib, psib, epsa)
def get_location_gcrs(location, obstime, ref_to_itrs, gcrs_to_ref):
"""Create a GCRS frame at the location and obstime.
The reference frame z axis must point to the Celestial Intermediate Pole
(as is the case for CIRS and TETE).
This function is here to avoid location.get_gcrs(obstime), which would
recalculate matrices that are already available below (and return a GCRS
coordinate, rather than a frame with obsgeoloc and obsgeovel). Instead,
it uses the private method that allows passing in the matrices.
"""
obsgeoloc, obsgeovel = location._get_gcrs_posvel(obstime,
ref_to_itrs, gcrs_to_ref)
return GCRS(obstime=obstime, obsgeoloc=obsgeoloc, obsgeovel=obsgeovel)
# now the actual transforms
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, GCRS, TETE)
def gcrs_to_tete(gcrs_coo, tete_frame):
# Classical NPB matrix, IAU 2006/2000A
# (same as in builtin_frames.utils.get_cip).
rbpn = erfa.pnm06a(*get_jd12(tete_frame.obstime, 'tt'))
# Get GCRS coordinates for the target observer location and time.
loc_gcrs = get_location_gcrs(tete_frame.location, tete_frame.obstime,
tete_to_itrs_mat(tete_frame.obstime, rbpn=rbpn),
rbpn)
gcrs_coo2 = gcrs_coo.transform_to(loc_gcrs)
# Now we are relative to the correct observer, do the transform to TETE.
# These rotations are defined at the geocenter, but can be applied to
# topocentric positions as well, assuming rigid Earth. See p57 of
# https://www.usno.navy.mil/USNO/astronomical-applications/publications/Circular_179.pdf
crepr = gcrs_coo2.cartesian.transform(rbpn)
return tete_frame.realize_frame(crepr)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, TETE, GCRS)
def tete_to_gcrs(tete_coo, gcrs_frame):
# Compute the pn matrix, and then multiply by its transpose.
rbpn = erfa.pnm06a(*get_jd12(tete_coo.obstime, 'tt'))
newrepr = tete_coo.cartesian.transform(matrix_transpose(rbpn))
# We now have a GCRS vector for the input location and obstime.
# Turn it into a GCRS frame instance.
loc_gcrs = get_location_gcrs(tete_coo.location, tete_coo.obstime,
tete_to_itrs_mat(tete_coo.obstime, rbpn=rbpn),
rbpn)
gcrs = loc_gcrs.realize_frame(newrepr)
# Finally, do any needed offsets (no-op if same obstime and location)
return gcrs.transform_to(gcrs_frame)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, TETE, ITRS)
def tete_to_itrs(tete_coo, itrs_frame):
# first get us to TETE at the target obstime, and geocentric position
tete_coo2 = tete_coo.transform_to(TETE(obstime=itrs_frame.obstime,
location=EARTH_CENTER))
# now get the pmatrix
pmat = tete_to_itrs_mat(itrs_frame.obstime)
crepr = tete_coo2.cartesian.transform(pmat)
return itrs_frame.realize_frame(crepr)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, ITRS, TETE)
def itrs_to_tete(itrs_coo, tete_frame):
# compute the pmatrix, and then multiply by its transpose
pmat = tete_to_itrs_mat(itrs_coo.obstime)
newrepr = itrs_coo.cartesian.transform(matrix_transpose(pmat))
tete = TETE(newrepr, obstime=itrs_coo.obstime)
# now do any needed offsets (no-op if same obstime)
return tete.transform_to(tete_frame)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, GCRS, CIRS)
def gcrs_to_cirs(gcrs_coo, cirs_frame):
# first get the pmatrix
pmat = gcrs_to_cirs_mat(cirs_frame.obstime)
# Get GCRS coordinates for the target observer location and time.
loc_gcrs = get_location_gcrs(cirs_frame.location, cirs_frame.obstime,
cirs_to_itrs_mat(cirs_frame.obstime), pmat)
gcrs_coo2 = gcrs_coo.transform_to(loc_gcrs)
# Now we are relative to the correct observer, do the transform to CIRS.
crepr = gcrs_coo2.cartesian.transform(pmat)
return cirs_frame.realize_frame(crepr)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, CIRS, GCRS)
def cirs_to_gcrs(cirs_coo, gcrs_frame):
# Compute the pmatrix, and then multiply by its transpose,
pmat = gcrs_to_cirs_mat(cirs_coo.obstime)
newrepr = cirs_coo.cartesian.transform(matrix_transpose(pmat))
# We now have a GCRS vector for the input location and obstime.
# Turn it into a GCRS frame instance.
loc_gcrs = get_location_gcrs(cirs_coo.location, cirs_coo.obstime,
cirs_to_itrs_mat(cirs_coo.obstime), pmat)
gcrs = loc_gcrs.realize_frame(newrepr)
# Finally, do any needed offsets (no-op if same obstime and location)
return gcrs.transform_to(gcrs_frame)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, CIRS, ITRS)
def cirs_to_itrs(cirs_coo, itrs_frame):
# first get us to geocentric CIRS at the target obstime
cirs_coo2 = cirs_coo.transform_to(CIRS(obstime=itrs_frame.obstime,
location=EARTH_CENTER))
# now get the pmatrix
pmat = cirs_to_itrs_mat(itrs_frame.obstime)
crepr = cirs_coo2.cartesian.transform(pmat)
return itrs_frame.realize_frame(crepr)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, ITRS, CIRS)
def itrs_to_cirs(itrs_coo, cirs_frame):
# compute the pmatrix, and then multiply by its transpose
pmat = cirs_to_itrs_mat(itrs_coo.obstime)
newrepr = itrs_coo.cartesian.transform(matrix_transpose(pmat))
cirs = CIRS(newrepr, obstime=itrs_coo.obstime)
# now do any needed offsets (no-op if same obstime)
return cirs.transform_to(cirs_frame)
# TODO: implement GCRS<->CIRS if there's call for it. The thing that's awkward
# is that they both have obstimes, so an extra set of transformations are necessary.
# so unless there's a specific need for that, better to just have it go through the above
# two steps anyway
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, GCRS, PrecessedGeocentric)
def gcrs_to_precessedgeo(from_coo, to_frame):
# first get us to GCRS with the right attributes (might be a no-op)
gcrs_coo = from_coo.transform_to(GCRS(obstime=to_frame.obstime,
obsgeoloc=to_frame.obsgeoloc,
obsgeovel=to_frame.obsgeovel))
# now precess to the requested equinox
pmat = gcrs_precession_mat(to_frame.equinox)
crepr = gcrs_coo.cartesian.transform(pmat)
return to_frame.realize_frame(crepr)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, PrecessedGeocentric, GCRS)
def precessedgeo_to_gcrs(from_coo, to_frame):
# first un-precess
pmat = gcrs_precession_mat(from_coo.equinox)
crepr = from_coo.cartesian.transform(matrix_transpose(pmat))
gcrs_coo = GCRS(crepr,
obstime=from_coo.obstime,
obsgeoloc=from_coo.obsgeoloc,
obsgeovel=from_coo.obsgeovel)
# then move to the GCRS that's actually desired
return gcrs_coo.transform_to(to_frame)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, TEME, ITRS)
def teme_to_itrs(teme_coo, itrs_frame):
# use the pmatrix to transform to ITRS in the source obstime
pmat = teme_to_itrs_mat(teme_coo.obstime)
crepr = teme_coo.cartesian.transform(pmat)
itrs = ITRS(crepr, obstime=teme_coo.obstime)
# transform the ITRS coordinate to the target obstime
return itrs.transform_to(itrs_frame)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, ITRS, TEME)
def itrs_to_teme(itrs_coo, teme_frame):
# transform the ITRS coordinate to the target obstime
itrs_coo2 = itrs_coo.transform_to(ITRS(obstime=teme_frame.obstime))
# compute the pmatrix, and then multiply by its transpose
pmat = teme_to_itrs_mat(teme_frame.obstime)
newrepr = itrs_coo2.cartesian.transform(matrix_transpose(pmat))
return teme_frame.realize_frame(newrepr)
# Create loopback transformations
frame_transform_graph._add_merged_transform(ITRS, CIRS, ITRS)
frame_transform_graph._add_merged_transform(PrecessedGeocentric, GCRS, PrecessedGeocentric)
frame_transform_graph._add_merged_transform(TEME, ITRS, TEME)
frame_transform_graph._add_merged_transform(TETE, ICRS, TETE)
|
8ac6ae291e52aaf9c1161b439cb8335c90619d99ff8b059683a3c89065a13848 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy.utils.decorators import format_doc
from astropy.coordinates.attributes import TimeAttribute
from .utils import DEFAULT_OBSTIME
from astropy.coordinates.baseframe import base_doc
from .baseradec import BaseRADecFrame, doc_components
__all__ = ['HCRS']
doc_footer = """
Other parameters
----------------
obstime : `~astropy.time.Time`
The time at which the observation is taken. Used for determining the
position of the Sun.
"""
@format_doc(base_doc, components=doc_components, footer=doc_footer)
class HCRS(BaseRADecFrame):
"""
A coordinate or frame in a Heliocentric system, with axes aligned to ICRS.
The ICRS has an origin at the Barycenter and axes which are fixed with
respect to space.
This coordinate system is distinct from ICRS mainly in that it is relative
to the Sun's center-of-mass rather than the solar system Barycenter.
In principle, therefore, this frame should include the effects of
aberration (unlike ICRS), but this is not done, since they are very small,
of the order of 8 milli-arcseconds.
For more background on the ICRS and related coordinate transformations, see
the references provided in the :ref:`astropy:astropy-coordinates-seealso`
section of the documentation.
The frame attributes are listed under **Other Parameters**.
"""
obstime = TimeAttribute(default=DEFAULT_OBSTIME)
# Transformations are defined in icrs_circ_transforms.py
|
e0c1d919b2cb2218492399c19f627b3a2d21a347d8b54479bb3c6bc40c082786 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from astropy import units as u
from astropy.utils.decorators import format_doc
from astropy.coordinates.baseframe import frame_transform_graph, base_doc
from astropy.coordinates.attributes import TimeAttribute
from astropy.coordinates.transformations import (
FunctionTransformWithFiniteDifference, DynamicMatrixTransform)
from astropy.coordinates.representation import (CartesianRepresentation,
UnitSphericalRepresentation)
from astropy.coordinates import earth_orientation as earth
from .utils import EQUINOX_B1950
from .baseradec import doc_components, BaseRADecFrame
__all__ = ['FK4', 'FK4NoETerms']
doc_footer_fk4 = """
Other parameters
----------------
equinox : `~astropy.time.Time`
The equinox of this frame.
obstime : `~astropy.time.Time`
The time this frame was observed. If ``None``, will be the same as
``equinox``.
"""
@format_doc(base_doc, components=doc_components, footer=doc_footer_fk4)
class FK4(BaseRADecFrame):
"""
A coordinate or frame in the FK4 system.
Note that this is a barycentric version of FK4 - that is, the origin for
this frame is the Solar System Barycenter, *not* the Earth geocenter.
The frame attributes are listed under **Other Parameters**.
"""
equinox = TimeAttribute(default=EQUINOX_B1950)
obstime = TimeAttribute(default=None, secondary_attribute='equinox')
# the "self" transform
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, FK4, FK4)
def fk4_to_fk4(fk4coord1, fk4frame2):
# deceptively complicated: need to transform to No E-terms FK4, precess, and
# then come back, because precession is non-trivial with E-terms
fnoe_w_eqx1 = fk4coord1.transform_to(FK4NoETerms(equinox=fk4coord1.equinox))
fnoe_w_eqx2 = fnoe_w_eqx1.transform_to(FK4NoETerms(equinox=fk4frame2.equinox))
return fnoe_w_eqx2.transform_to(fk4frame2)
@format_doc(base_doc, components=doc_components, footer=doc_footer_fk4)
class FK4NoETerms(BaseRADecFrame):
"""
A coordinate or frame in the FK4 system, but with the E-terms of aberration
removed.
The frame attributes are listed under **Other Parameters**.
"""
equinox = TimeAttribute(default=EQUINOX_B1950)
obstime = TimeAttribute(default=None, secondary_attribute='equinox')
@staticmethod
def _precession_matrix(oldequinox, newequinox):
"""
Compute and return the precession matrix for FK4 using Newcomb's method.
Used inside some of the transformation functions.
Parameters
----------
oldequinox : `~astropy.time.Time`
The equinox to precess from.
newequinox : `~astropy.time.Time`
The equinox to precess to.
Returns
-------
newcoord : array
The precession matrix to transform to the new equinox
"""
return earth._precession_matrix_besselian(oldequinox.byear, newequinox.byear)
# the "self" transform
@frame_transform_graph.transform(DynamicMatrixTransform, FK4NoETerms, FK4NoETerms)
def fk4noe_to_fk4noe(fk4necoord1, fk4neframe2):
return fk4necoord1._precession_matrix(fk4necoord1.equinox, fk4neframe2.equinox)
# FK4-NO-E to/from FK4 ----------------------------->
# Unlike other frames, this module include *two* frame classes for FK4
# coordinates - one including the E-terms of aberration (FK4), and
# one not including them (FK4NoETerms). The following functions
# implement the transformation between these two.
def fk4_e_terms(equinox):
"""
Return the e-terms of aberration vector
Parameters
----------
equinox : Time object
The equinox for which to compute the e-terms
"""
# Constant of aberration at J2000; from Explanatory Supplement to the
# Astronomical Almanac (Seidelmann, 2005).
k = 0.0056932 # in degrees (v_earth/c ~ 1e-4 rad ~ 0.0057 deg)
k = np.radians(k)
# Eccentricity of the Earth's orbit
e = earth.eccentricity(equinox.jd)
# Mean longitude of perigee of the solar orbit
g = earth.mean_lon_of_perigee(equinox.jd)
g = np.radians(g)
# Obliquity of the ecliptic
o = earth.obliquity(equinox.jd, algorithm=1980)
o = np.radians(o)
return (e * k * np.sin(g),
-e * k * np.cos(g) * np.cos(o),
-e * k * np.cos(g) * np.sin(o))
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, FK4, FK4NoETerms)
def fk4_to_fk4_no_e(fk4coord, fk4noeframe):
# Extract cartesian vector
rep = fk4coord.cartesian
# Find distance (for re-normalization)
d_orig = rep.norm()
rep /= d_orig
# Apply E-terms of aberration. Note that this depends on the equinox (not
# the observing time/epoch) of the coordinates. See issue #1496 for a
# discussion of this.
eterms_a = CartesianRepresentation(
u.Quantity(fk4_e_terms(fk4coord.equinox), u.dimensionless_unscaled,
copy=False), copy=False)
rep = rep - eterms_a + eterms_a.dot(rep) * rep
# Find new distance (for re-normalization)
d_new = rep.norm()
# Renormalize
rep *= d_orig / d_new
# now re-cast into an appropriate Representation, and precess if need be
if isinstance(fk4coord.data, UnitSphericalRepresentation):
rep = rep.represent_as(UnitSphericalRepresentation)
# if no obstime was given in the new frame, use the old one for consistency
newobstime = fk4coord._obstime if fk4noeframe._obstime is None else fk4noeframe._obstime
fk4noe = FK4NoETerms(rep, equinox=fk4coord.equinox, obstime=newobstime)
if fk4coord.equinox != fk4noeframe.equinox:
# precession
fk4noe = fk4noe.transform_to(fk4noeframe)
return fk4noe
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, FK4NoETerms, FK4)
def fk4_no_e_to_fk4(fk4noecoord, fk4frame):
# first precess, if necessary
if fk4noecoord.equinox != fk4frame.equinox:
fk4noe_w_fk4equinox = FK4NoETerms(equinox=fk4frame.equinox,
obstime=fk4noecoord.obstime)
fk4noecoord = fk4noecoord.transform_to(fk4noe_w_fk4equinox)
# Extract cartesian vector
rep = fk4noecoord.cartesian
# Find distance (for re-normalization)
d_orig = rep.norm()
rep /= d_orig
# Apply E-terms of aberration. Note that this depends on the equinox (not
# the observing time/epoch) of the coordinates. See issue #1496 for a
# discussion of this.
eterms_a = CartesianRepresentation(
u.Quantity(fk4_e_terms(fk4noecoord.equinox), u.dimensionless_unscaled,
copy=False), copy=False)
rep0 = rep.copy()
for _ in range(10):
rep = (eterms_a + rep0) / (1. + eterms_a.dot(rep))
# Find new distance (for re-normalization)
d_new = rep.norm()
# Renormalize
rep *= d_orig / d_new
# now re-cast into an appropriate Representation, and precess if need be
if isinstance(fk4noecoord.data, UnitSphericalRepresentation):
rep = rep.represent_as(UnitSphericalRepresentation)
return fk4frame.realize_frame(rep)
|
71faf8b940a647befb3409db9ba7d3ad6bfd530167c12b28e8933a0670dadc6d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy.utils.decorators import format_doc
from astropy.coordinates import representation as r
from astropy.coordinates.baseframe import BaseCoordinateFrame, RepresentationMapping, base_doc
__all__ = ['BaseRADecFrame']
doc_components = """
ra : `~astropy.coordinates.Angle`, optional, keyword-only
The RA for this object (``dec`` must also be given and ``representation``
must be None).
dec : `~astropy.coordinates.Angle`, optional, keyword-only
The Declination for this object (``ra`` must also be given and
``representation`` must be None).
distance : `~astropy.units.Quantity` ['length'], optional, keyword-only
The Distance for this object along the line-of-sight.
(``representation`` must be None).
pm_ra_cosdec : `~astropy.units.Quantity` ['angular speed'], optional, keyword-only
The proper motion in Right Ascension (including the ``cos(dec)`` factor)
for this object (``pm_dec`` must also be given).
pm_dec : `~astropy.units.Quantity` ['angular speed'], optional, keyword-only
The proper motion in Declination for this object (``pm_ra_cosdec`` must
also be given).
radial_velocity : `~astropy.units.Quantity` ['speed'], optional, keyword-only
The radial velocity of this object.
"""
@format_doc(base_doc, components=doc_components, footer="")
class BaseRADecFrame(BaseCoordinateFrame):
"""
A base class that defines default representation info for frames that
represent longitude and latitude as Right Ascension and Declination
following typical "equatorial" conventions.
"""
frame_specific_representation_info = {
r.SphericalRepresentation: [
RepresentationMapping('lon', 'ra'),
RepresentationMapping('lat', 'dec')
]
}
default_representation = r.SphericalRepresentation
default_differential = r.SphericalCosLatDifferential
|
713ad19d52bd3114e377b2817003e3a0c4619078ea670af5badf87a3d57dee53 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package contains the coordinate frames implemented by astropy.
Users shouldn't use this module directly, but rather import from the
`astropy.coordinates` module. While it is likely to exist for the long-term,
the existence of this package and details of its organization should be
considered an implementation detail, and is not guaranteed to hold for future
versions of astropy.
Notes
-----
The builtin frame classes are all imported automatically into this package's
namespace, so there's no need to access the sub-modules directly.
To implement a new frame in Astropy, a developer should add the frame as a new
module in this package. Any "self" transformations (i.e., those that transform
from one frame to another frame of the same class) should be included in that
module. Transformation functions connecting the new frame to other frames
should be in a separate module, which should be imported in this package's
``__init__.py`` to ensure the transformations are hooked up when this package is
imported. Placing the transformation functions in separate modules avoids
circular dependencies, because they need references to the frame classes.
"""
from .baseradec import BaseRADecFrame
from .icrs import ICRS
from .fk5 import FK5
from .fk4 import FK4, FK4NoETerms
from .galactic import Galactic
from .galactocentric import Galactocentric, galactocentric_frame_defaults
from .supergalactic import Supergalactic
from .altaz import AltAz
from .hadec import HADec
from .gcrs import GCRS, PrecessedGeocentric
from .cirs import CIRS
from .itrs import ITRS
from .hcrs import HCRS
from .equatorial import TEME, TETE
from .ecliptic import * # there are a lot of these so we don't list them all explicitly
from .skyoffset import SkyOffsetFrame
# need to import transformations so that they get registered in the graph
from . import icrs_fk5_transforms
from . import fk4_fk5_transforms
from . import galactic_transforms
from . import supergalactic_transforms
from . import icrs_cirs_transforms
from . import cirs_observed_transforms
from . import icrs_observed_transforms
from . import intermediate_rotation_transforms
from . import ecliptic_transforms
# Import this after importing other frames, since this requires various
# transformtions to set up the LSR frames
from .lsr import LSR, GalacticLSR, LSRK, LSRD
from astropy.coordinates.baseframe import frame_transform_graph
# we define an __all__ because otherwise the transformation modules
# get included
__all__ = ['ICRS', 'FK5', 'FK4', 'FK4NoETerms', 'Galactic', 'Galactocentric',
'galactocentric_frame_defaults',
'Supergalactic', 'AltAz', 'HADec', 'GCRS', 'CIRS', 'ITRS', 'HCRS',
'TEME', 'TETE', 'PrecessedGeocentric', 'GeocentricMeanEcliptic',
'BarycentricMeanEcliptic', 'HeliocentricMeanEcliptic',
'GeocentricTrueEcliptic', 'BarycentricTrueEcliptic',
'HeliocentricTrueEcliptic',
'SkyOffsetFrame', 'GalacticLSR', 'LSR', 'LSRK', 'LSRD',
'BaseEclipticFrame', 'BaseRADecFrame', 'make_transform_graph_docs',
'HeliocentricEclipticIAU76', 'CustomBarycentricEcliptic']
def make_transform_graph_docs(transform_graph):
"""
Generates a string that can be used in other docstrings to include a
transformation graph, showing the available transforms and
coordinate systems.
Parameters
----------
transform_graph : `~.coordinates.TransformGraph`
Returns
-------
docstring : str
A string that can be added to the end of a docstring to show the
transform graph.
"""
from textwrap import dedent
coosys = [transform_graph.lookup_name(item) for
item in transform_graph.get_names()]
# currently, all of the priorities are set to 1, so we don't need to show
# then in the transform graph.
graphstr = transform_graph.to_dot_graph(addnodes=coosys,
priorities=False)
docstr = """
The diagram below shows all of the built in coordinate systems,
their aliases (useful for converting other coordinates to them using
attribute-style access) and the pre-defined transformations between
them. The user is free to override any of these transformations by
defining new transformations between these systems, but the
pre-defined transformations should be sufficient for typical usage.
The color of an edge in the graph (i.e. the transformations between two
frames) is set by the type of transformation; the legend box defines the
mapping from transform class name to color.
.. Wrap the graph in a div with a custom class to allow themeing.
.. container:: frametransformgraph
.. graphviz::
"""
docstr = dedent(docstr) + ' ' + graphstr.replace('\n', '\n ')
# colors are in dictionary at the bottom of transformations.py
from astropy.coordinates.transformations import trans_to_color
html_list_items = []
for cls, color in trans_to_color.items():
block = f"""
<li style='list-style: none;'>
<p style="font-size: 12px;line-height: 24px;font-weight: normal;color: #848484;padding: 0;margin: 0;">
<b>{cls.__name__}:</b>
<span style="font-size: 24px; color: {color};"><b>➝</b></span>
</p>
</li>
"""
html_list_items.append(block)
nl = '\n'
graph_legend = f"""
.. raw:: html
<ul>
{nl.join(html_list_items)}
</ul>
"""
docstr = docstr + dedent(graph_legend)
return docstr
_transform_graph_docs = make_transform_graph_docs(frame_transform_graph)
# Here, we override the module docstring so that sphinx renders the transform
# graph without the developer documentation in the main docstring above.
__doc__ = _transform_graph_docs
|
1b8551f1795bb99b2ede1094cc11acc2f963171103d6ed7f76e988d95aba9283 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy import units as u
from astropy.utils.decorators import format_doc
from astropy.coordinates import representation as r
from astropy.coordinates.baseframe import BaseCoordinateFrame, RepresentationMapping, base_doc
from .galactic import Galactic
__all__ = ['Supergalactic']
doc_components = """
sgl : `~astropy.coordinates.Angle`, optional, keyword-only
The supergalactic longitude for this object (``sgb`` must also be given and
``representation`` must be None).
sgb : `~astropy.coordinates.Angle`, optional, keyword-only
The supergalactic latitude for this object (``sgl`` must also be given and
``representation`` must be None).
distance : `~astropy.units.Quantity` ['speed'], optional, keyword-only
The Distance for this object along the line-of-sight.
pm_sgl_cossgb : `~astropy.units.Quantity` ['angular speed'], optional, keyword-only
The proper motion in Right Ascension for this object (``pm_sgb`` must
also be given).
pm_sgb : `~astropy.units.Quantity` ['angular speed'], optional, keyword-only
The proper motion in Declination for this object (``pm_sgl_cossgb`` must
also be given).
radial_velocity : `~astropy.units.Quantity` ['speed'], optional, keyword-only
The radial velocity of this object.
"""
@format_doc(base_doc, components=doc_components, footer="")
class Supergalactic(BaseCoordinateFrame):
"""
Supergalactic Coordinates
(see Lahav et al. 2000, <https://ui.adsabs.harvard.edu/abs/2000MNRAS.312..166L>,
and references therein).
"""
frame_specific_representation_info = {
r.SphericalRepresentation: [
RepresentationMapping('lon', 'sgl'),
RepresentationMapping('lat', 'sgb')
],
r.CartesianRepresentation: [
RepresentationMapping('x', 'sgx'),
RepresentationMapping('y', 'sgy'),
RepresentationMapping('z', 'sgz')
],
r.CartesianDifferential: [
RepresentationMapping('d_x', 'v_x', u.km/u.s),
RepresentationMapping('d_y', 'v_y', u.km/u.s),
RepresentationMapping('d_z', 'v_z', u.km/u.s)
],
}
default_representation = r.SphericalRepresentation
default_differential = r.SphericalCosLatDifferential
# North supergalactic pole in Galactic coordinates.
# Needed for transformations to/from Galactic coordinates.
_nsgp_gal = Galactic(l=47.37*u.degree, b=+6.32*u.degree)
|
655c9544d74a9c984bad46c161dc22bac9fef8512802362aa0fd9a07a9e49ad4 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy.utils.decorators import format_doc
from astropy.coordinates.baseframe import base_doc
from .baseradec import BaseRADecFrame, doc_components
__all__ = ['ICRS']
@format_doc(base_doc, components=doc_components, footer="")
class ICRS(BaseRADecFrame):
"""
A coordinate or frame in the ICRS system.
If you're looking for "J2000" coordinates, and aren't sure if you want to
use this or `~astropy.coordinates.FK5`, you probably want to use ICRS. It's
more well-defined as a catalog coordinate and is an inertial system, and is
very close (within tens of milliarcseconds) to J2000 equatorial.
For more background on the ICRS and related coordinate transformations, see
the references provided in the :ref:`astropy:astropy-coordinates-seealso`
section of the documentation.
"""
|
79272ba2ea5f42cc995e0ac10e9b0122572d50e37058cd5eb2301b5faa28ffb2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy import units as u
from astropy.utils.decorators import format_doc
from astropy.coordinates.attributes import (TimeAttribute,
CartesianRepresentationAttribute)
from .utils import DEFAULT_OBSTIME, EQUINOX_J2000
from astropy.coordinates.baseframe import base_doc
from .baseradec import BaseRADecFrame, doc_components
__all__ = ['GCRS', 'PrecessedGeocentric']
doc_footer_gcrs = """
Other parameters
----------------
obstime : `~astropy.time.Time`
The time at which the observation is taken. Used for determining the
position of the Earth.
obsgeoloc : `~astropy.coordinates.CartesianRepresentation`, `~astropy.units.Quantity`
The position of the observer relative to the center-of-mass of the
Earth, oriented the same as BCRS/ICRS. Either [0, 0, 0],
`~astropy.coordinates.CartesianRepresentation`, or proper input for one,
i.e., a `~astropy.units.Quantity` with shape (3, ...) and length units.
Defaults to [0, 0, 0], meaning "true" GCRS.
obsgeovel : `~astropy.coordinates.CartesianRepresentation`, `~astropy.units.Quantity`
The velocity of the observer relative to the center-of-mass of the
Earth, oriented the same as BCRS/ICRS. Either [0, 0, 0],
`~astropy.coordinates.CartesianRepresentation`, or proper input for one,
i.e., a `~astropy.units.Quantity` with shape (3, ...) and velocity
units. Defaults to [0, 0, 0], meaning "true" GCRS.
"""
@format_doc(base_doc, components=doc_components, footer=doc_footer_gcrs)
class GCRS(BaseRADecFrame):
"""
A coordinate or frame in the Geocentric Celestial Reference System (GCRS).
GCRS is distinct form ICRS mainly in that it is relative to the Earth's
center-of-mass rather than the solar system Barycenter. That means this
frame includes the effects of aberration (unlike ICRS). For more background
on the GCRS, see the references provided in the
:ref:`astropy:astropy-coordinates-seealso` section of the documentation. (Of
particular note is Section 1.2 of
`USNO Circular 179 <https://arxiv.org/abs/astro-ph/0602086>`_)
This frame also includes frames that are defined *relative* to the Earth,
but that are offset (in both position and velocity) from the Earth.
The frame attributes are listed under **Other Parameters**.
"""
obstime = TimeAttribute(default=DEFAULT_OBSTIME)
obsgeoloc = CartesianRepresentationAttribute(default=[0, 0, 0],
unit=u.m)
obsgeovel = CartesianRepresentationAttribute(default=[0, 0, 0],
unit=u.m/u.s)
# The "self-transform" is defined in icrs_cirs_transformations.py, because in
# the current implementation it goes through ICRS (like CIRS)
doc_footer_prec_geo = """
Other parameters
----------------
equinox : `~astropy.time.Time`
The (mean) equinox to precess the coordinates to.
obstime : `~astropy.time.Time`
The time at which the observation is taken. Used for determining the
position of the Earth.
obsgeoloc : `~astropy.coordinates.CartesianRepresentation`, `~astropy.units.Quantity`
The position of the observer relative to the center-of-mass of the
Earth, oriented the same as BCRS/ICRS. Either [0, 0, 0],
`~astropy.coordinates.CartesianRepresentation`, or proper input for one,
i.e., a `~astropy.units.Quantity` with shape (3, ...) and length units.
Defaults to [0, 0, 0], meaning "true" Geocentric.
obsgeovel : `~astropy.coordinates.CartesianRepresentation`, `~astropy.units.Quantity`
The velocity of the observer relative to the center-of-mass of the
Earth, oriented the same as BCRS/ICRS. Either 0,
`~astropy.coordinates.CartesianRepresentation`, or proper input for one,
i.e., a `~astropy.units.Quantity` with shape (3, ...) and velocity
units. Defaults to [0, 0, 0], meaning "true" Geocentric.
"""
@format_doc(base_doc, components=doc_components, footer=doc_footer_prec_geo)
class PrecessedGeocentric(BaseRADecFrame):
"""
A coordinate frame defined in a similar manner as GCRS, but precessed to a
requested (mean) equinox. Note that this does *not* end up the same as
regular GCRS even for J2000 equinox, because the GCRS orientation is fixed
to that of ICRS, which is not quite the same as the dynamical J2000
orientation.
The frame attributes are listed under **Other Parameters**
"""
equinox = TimeAttribute(default=EQUINOX_J2000)
obstime = TimeAttribute(default=DEFAULT_OBSTIME)
obsgeoloc = CartesianRepresentationAttribute(default=[0, 0, 0], unit=u.m)
obsgeovel = CartesianRepresentationAttribute(default=[0, 0, 0], unit=u.m/u.s)
|
8028b9fdcc436e6412a0b50ac1b41e774d00d90c2f458d1f94943eecb1ba96af | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy import units as u
from astropy.utils.decorators import format_doc
from astropy.coordinates import representation as r
from astropy.coordinates.baseframe import BaseCoordinateFrame, base_doc
from astropy.coordinates.attributes import TimeAttribute, QuantityAttribute
from .utils import EQUINOX_J2000, DEFAULT_OBSTIME
__all__ = ['GeocentricMeanEcliptic', 'BarycentricMeanEcliptic',
'HeliocentricMeanEcliptic', 'BaseEclipticFrame',
'GeocentricTrueEcliptic', 'BarycentricTrueEcliptic',
'HeliocentricTrueEcliptic',
'HeliocentricEclipticIAU76', 'CustomBarycentricEcliptic']
doc_components_ecl = """
lon : `~astropy.coordinates.Angle`, optional, keyword-only
The ecliptic longitude for this object (``lat`` must also be given and
``representation`` must be None).
lat : `~astropy.coordinates.Angle`, optional, keyword-only
The ecliptic latitude for this object (``lon`` must also be given and
``representation`` must be None).
distance : `~astropy.units.Quantity` ['length'], optional, keyword-only
The distance for this object from the {0}.
(``representation`` must be None).
pm_lon_coslat : `~astropy.units.Quantity` ['angualar speed'], optional, keyword-only
The proper motion in the ecliptic longitude (including the ``cos(lat)``
factor) for this object (``pm_lat`` must also be given).
pm_lat : `~astropy.units.Quantity` ['angular speed'], optional, keyword-only
The proper motion in the ecliptic latitude for this object
(``pm_lon_coslat`` must also be given).
radial_velocity : `~astropy.units.Quantity` ['speed'], optional, keyword-only
The radial velocity of this object.
"""
@format_doc(base_doc,
components=doc_components_ecl.format('specified location'),
footer="")
class BaseEclipticFrame(BaseCoordinateFrame):
"""
A base class for frames that have names and conventions like that of
ecliptic frames.
.. warning::
In the current version of astropy, the ecliptic frames do not yet have
stringent accuracy tests. We recommend you test to "known-good" cases
to ensure this frames are what you are looking for. (and then ideally
you would contribute these tests to Astropy!)
"""
default_representation = r.SphericalRepresentation
default_differential = r.SphericalCosLatDifferential
doc_footer_geo = """
Other parameters
----------------
equinox : `~astropy.time.Time`, optional
The date to assume for this frame. Determines the location of the
x-axis and the location of the Earth (necessary for transformation to
non-geocentric systems). Defaults to the 'J2000' equinox.
obstime : `~astropy.time.Time`, optional
The time at which the observation is taken. Used for determining the
position of the Earth. Defaults to J2000.
"""
@format_doc(base_doc, components=doc_components_ecl.format('geocenter'),
footer=doc_footer_geo)
class GeocentricMeanEcliptic(BaseEclipticFrame):
"""
Geocentric mean ecliptic coordinates. These origin of the coordinates are the
geocenter (Earth), with the x axis pointing to the *mean* (not true) equinox
at the time specified by the ``equinox`` attribute, and the xy-plane in the
plane of the ecliptic for that date.
Be aware that the definition of "geocentric" here means that this frame
*includes* light deflection from the sun, aberration, etc when transforming
to/from e.g. ICRS.
The frame attributes are listed under **Other Parameters**.
"""
equinox = TimeAttribute(default=EQUINOX_J2000)
obstime = TimeAttribute(default=DEFAULT_OBSTIME)
@format_doc(base_doc, components=doc_components_ecl.format('geocenter'),
footer=doc_footer_geo)
class GeocentricTrueEcliptic(BaseEclipticFrame):
"""
Geocentric true ecliptic coordinates. These origin of the coordinates are the
geocenter (Earth), with the x axis pointing to the *true* (not mean) equinox
at the time specified by the ``equinox`` attribute, and the xy-plane in the
plane of the ecliptic for that date.
Be aware that the definition of "geocentric" here means that this frame
*includes* light deflection from the sun, aberration, etc when transforming
to/from e.g. ICRS.
The frame attributes are listed under **Other Parameters**.
"""
equinox = TimeAttribute(default=EQUINOX_J2000)
obstime = TimeAttribute(default=DEFAULT_OBSTIME)
doc_footer_bary = """
Other parameters
----------------
equinox : `~astropy.time.Time`, optional
The date to assume for this frame. Determines the location of the
x-axis and the location of the Earth and Sun.
Defaults to the 'J2000' equinox.
"""
@format_doc(base_doc, components=doc_components_ecl.format("barycenter"),
footer=doc_footer_bary)
class BarycentricMeanEcliptic(BaseEclipticFrame):
"""
Barycentric mean ecliptic coordinates. These origin of the coordinates are the
barycenter of the solar system, with the x axis pointing in the direction of
the *mean* (not true) equinox as at the time specified by the ``equinox``
attribute (as seen from Earth), and the xy-plane in the plane of the
ecliptic for that date.
The frame attributes are listed under **Other Parameters**.
"""
equinox = TimeAttribute(default=EQUINOX_J2000)
@format_doc(base_doc, components=doc_components_ecl.format("barycenter"),
footer=doc_footer_bary)
class BarycentricTrueEcliptic(BaseEclipticFrame):
"""
Barycentric true ecliptic coordinates. These origin of the coordinates are the
barycenter of the solar system, with the x axis pointing in the direction of
the *true* (not mean) equinox as at the time specified by the ``equinox``
attribute (as seen from Earth), and the xy-plane in the plane of the
ecliptic for that date.
The frame attributes are listed under **Other Parameters**.
"""
equinox = TimeAttribute(default=EQUINOX_J2000)
doc_footer_helio = """
Other parameters
----------------
equinox : `~astropy.time.Time`, optional
The date to assume for this frame. Determines the location of the
x-axis and the location of the Earth and Sun.
Defaults to the 'J2000' equinox.
obstime : `~astropy.time.Time`, optional
The time at which the observation is taken. Used for determining the
position of the Sun. Defaults to J2000.
"""
@format_doc(base_doc, components=doc_components_ecl.format("sun's center"),
footer=doc_footer_helio)
class HeliocentricMeanEcliptic(BaseEclipticFrame):
"""
Heliocentric mean ecliptic coordinates. These origin of the coordinates are the
center of the sun, with the x axis pointing in the direction of
the *mean* (not true) equinox as at the time specified by the ``equinox``
attribute (as seen from Earth), and the xy-plane in the plane of the
ecliptic for that date.
The frame attributes are listed under **Other Parameters**.
{params}
"""
equinox = TimeAttribute(default=EQUINOX_J2000)
obstime = TimeAttribute(default=DEFAULT_OBSTIME)
@format_doc(base_doc, components=doc_components_ecl.format("sun's center"),
footer=doc_footer_helio)
class HeliocentricTrueEcliptic(BaseEclipticFrame):
"""
Heliocentric true ecliptic coordinates. These origin of the coordinates are the
center of the sun, with the x axis pointing in the direction of
the *true* (not mean) equinox as at the time specified by the ``equinox``
attribute (as seen from Earth), and the xy-plane in the plane of the
ecliptic for that date.
The frame attributes are listed under **Other Parameters**.
{params}
"""
equinox = TimeAttribute(default=EQUINOX_J2000)
obstime = TimeAttribute(default=DEFAULT_OBSTIME)
@format_doc(base_doc, components=doc_components_ecl.format("sun's center"),
footer="")
class HeliocentricEclipticIAU76(BaseEclipticFrame):
"""
Heliocentric mean (IAU 1976) ecliptic coordinates. These origin of the coordinates are the
center of the sun, with the x axis pointing in the direction of
the *mean* (not true) equinox of J2000, and the xy-plane in the plane of the
ecliptic of J2000 (according to the IAU 1976/1980 obliquity model).
It has, therefore, a fixed equinox and an older obliquity value
than the rest of the frames.
The frame attributes are listed under **Other Parameters**.
{params}
"""
obstime = TimeAttribute(default=DEFAULT_OBSTIME)
@format_doc(base_doc, components=doc_components_ecl.format("barycenter"),
footer="")
class CustomBarycentricEcliptic(BaseEclipticFrame):
"""
Barycentric ecliptic coordinates with custom obliquity.
These origin of the coordinates are the
barycenter of the solar system, with the x axis pointing in the direction of
the *mean* (not true) equinox of J2000, and the xy-plane in the plane of the
ecliptic tilted a custom obliquity angle.
The frame attributes are listed under **Other Parameters**.
"""
obliquity = QuantityAttribute(default=84381.448 * u.arcsec, unit=u.arcsec)
|
a9d489f95db23609770f711b851723b557f04ce9b9ba17f4cb8a391cb43c6422 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy.utils.decorators import format_doc
from astropy.coordinates.representation import CartesianRepresentation, CartesianDifferential
from astropy.coordinates.baseframe import BaseCoordinateFrame, base_doc
from astropy.coordinates.attributes import TimeAttribute
from .utils import DEFAULT_OBSTIME
__all__ = ['ITRS']
@format_doc(base_doc, components="", footer="")
class ITRS(BaseCoordinateFrame):
"""
A coordinate or frame in the International Terrestrial Reference System
(ITRS). This is approximately a geocentric system, although strictly it is
defined by a series of reference locations near the surface of the Earth.
For more background on the ITRS, see the references provided in the
:ref:`astropy:astropy-coordinates-seealso` section of the documentation.
"""
default_representation = CartesianRepresentation
default_differential = CartesianDifferential
obstime = TimeAttribute(default=DEFAULT_OBSTIME)
@property
def earth_location(self):
"""
The data in this frame as an `~astropy.coordinates.EarthLocation` class.
"""
from astropy.coordinates.earth import EarthLocation
cart = self.represent_as(CartesianRepresentation)
return EarthLocation(x=cart.x, y=cart.y, z=cart.z)
# Self-transform is in intermediate_rotation_transforms.py with all the other
# ITRS transforms
|
e661c03107dc1346c629eec1244847da35f977b6dcb0f87f5af57a276d9a77e6 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Contains the transformation functions for getting to "observed" systems from ICRS.
"""
import erfa
from astropy import units as u
from astropy.coordinates.builtin_frames.utils import atciqz, aticq
from astropy.coordinates.baseframe import frame_transform_graph
from astropy.coordinates.transformations import FunctionTransformWithFiniteDifference
from astropy.coordinates.representation import (SphericalRepresentation,
CartesianRepresentation,
UnitSphericalRepresentation)
from .icrs import ICRS
from .altaz import AltAz
from .hadec import HADec
from .utils import PIOVER2
from ..erfa_astrom import erfa_astrom
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, ICRS, AltAz)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, ICRS, HADec)
def icrs_to_observed(icrs_coo, observed_frame):
# if the data are UnitSphericalRepresentation, we can skip the distance calculations
is_unitspherical = (isinstance(icrs_coo.data, UnitSphericalRepresentation) or
icrs_coo.cartesian.x.unit == u.one)
# first set up the astrometry context for ICRS<->observed
astrom = erfa_astrom.get().apco(observed_frame)
# correct for parallax to find BCRS direction from observer (as in erfa.pmpx)
if is_unitspherical:
srepr = icrs_coo.spherical
else:
observer_icrs = CartesianRepresentation(astrom['eb'], unit=u.au, xyz_axis=-1, copy=False)
srepr = (icrs_coo.cartesian - observer_icrs).represent_as(
SphericalRepresentation)
# convert to topocentric CIRS
cirs_ra, cirs_dec = atciqz(srepr, astrom)
# now perform observed conversion
if isinstance(observed_frame, AltAz):
lon, zen, _, _, _ = erfa.atioq(cirs_ra, cirs_dec, astrom)
lat = PIOVER2 - zen
else:
_, _, lon, lat, _ = erfa.atioq(cirs_ra, cirs_dec, astrom)
if is_unitspherical:
obs_srepr = UnitSphericalRepresentation(lon << u.radian, lat << u.radian, copy=False)
else:
obs_srepr = SphericalRepresentation(lon << u.radian, lat << u.radian, srepr.distance, copy=False)
return observed_frame.realize_frame(obs_srepr)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, AltAz, ICRS)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, HADec, ICRS)
def observed_to_icrs(observed_coo, icrs_frame):
# if the data are UnitSphericalRepresentation, we can skip the distance calculations
is_unitspherical = (isinstance(observed_coo.data, UnitSphericalRepresentation) or
observed_coo.cartesian.x.unit == u.one)
usrepr = observed_coo.represent_as(UnitSphericalRepresentation)
lon = usrepr.lon.to_value(u.radian)
lat = usrepr.lat.to_value(u.radian)
if isinstance(observed_coo, AltAz):
# the 'A' indicates zen/az inputs
coord_type = 'A'
lat = PIOVER2 - lat
else:
coord_type = 'H'
# first set up the astrometry context for ICRS<->CIRS at the observed_coo time
astrom = erfa_astrom.get().apco(observed_coo)
# Topocentric CIRS
cirs_ra, cirs_dec = erfa.atoiq(coord_type, lon, lat, astrom) << u.radian
if is_unitspherical:
srepr = SphericalRepresentation(cirs_ra, cirs_dec, 1, copy=False)
else:
srepr = SphericalRepresentation(lon=cirs_ra, lat=cirs_dec,
distance=observed_coo.distance, copy=False)
# BCRS (Astrometric) direction to source
bcrs_ra, bcrs_dec = aticq(srepr, astrom) << u.radian
# Correct for parallax to get ICRS representation
if is_unitspherical:
icrs_srepr = UnitSphericalRepresentation(bcrs_ra, bcrs_dec, copy=False)
else:
icrs_srepr = SphericalRepresentation(lon=bcrs_ra, lat=bcrs_dec,
distance=observed_coo.distance, copy=False)
observer_icrs = CartesianRepresentation(astrom['eb'], unit=u.au, xyz_axis=-1, copy=False)
newrepr = icrs_srepr.to_cartesian() + observer_icrs
icrs_srepr = newrepr.represent_as(SphericalRepresentation)
return icrs_frame.realize_frame(icrs_srepr)
# Create loopback transformations
frame_transform_graph._add_merged_transform(AltAz, ICRS, AltAz)
frame_transform_graph._add_merged_transform(HADec, ICRS, HADec)
# for now we just implement this through ICRS to make sure we get everything
# covered
# Before, this was using CIRS as intermediate frame, however this is much
# slower than the direct observed<->ICRS transform added in 4.3
# due to how the frame attribute broadcasting works, see
# https://github.com/astropy/astropy/pull/10994#issuecomment-722617041
|
b4453a88648421fa2717f16aff45c6ec3324b42d2ad0fc3a8323d82eec4725c9 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains functions/values used repeatedly in different modules of
the ``builtin_frames`` package.
"""
import warnings
import erfa
import numpy as np
from astropy import units as u
from astropy.time import Time
from astropy.coordinates.earth import EarthLocation
from astropy.utils import iers
from astropy.utils.exceptions import AstropyWarning
from ..representation import CartesianDifferential
# We use tt as the time scale for this equinoxes, primarily because it is the
# convention for J2000 (it is unclear if there is any "right answer" for B1950)
# while #8600 makes this the default behavior, we show it here to ensure it's
# clear which is used here
EQUINOX_J2000 = Time('J2000', scale='tt')
EQUINOX_B1950 = Time('B1950', scale='tt')
# This is a time object that is the default "obstime" when such an attribute is
# necessary. Currently, we use J2000.
DEFAULT_OBSTIME = Time('J2000', scale='tt')
# This is an EarthLocation that is the default "location" when such an attribute is
# necessary. It is the centre of the Earth.
EARTH_CENTER = EarthLocation(0*u.km, 0*u.km, 0*u.km)
PIOVER2 = np.pi / 2.
# comes from the mean of the 1962-2014 IERS B data
_DEFAULT_PM = (0.035, 0.29)*u.arcsec
def get_polar_motion(time):
"""
gets the two polar motion components in radians for use with apio
"""
# Get the polar motion from the IERS table
iers_table = iers.earth_orientation_table.get()
xp, yp, status = iers_table.pm_xy(time, return_status=True)
wmsg = (
'Tried to get polar motions for times {} IERS data is '
'valid. Defaulting to polar motion from the 50-yr mean for those. '
'This may affect precision at the arcsec level. Please check your '
'astropy.utils.iers.conf.iers_auto_url and point it to a newer '
'version if necessary.'
)
if np.any(status == iers.TIME_BEFORE_IERS_RANGE):
xp[status == iers.TIME_BEFORE_IERS_RANGE] = _DEFAULT_PM[0]
yp[status == iers.TIME_BEFORE_IERS_RANGE] = _DEFAULT_PM[1]
warnings.warn(wmsg.format('before'), AstropyWarning)
if np.any(status == iers.TIME_BEYOND_IERS_RANGE):
xp[status == iers.TIME_BEYOND_IERS_RANGE] = _DEFAULT_PM[0]
yp[status == iers.TIME_BEYOND_IERS_RANGE] = _DEFAULT_PM[1]
warnings.warn(wmsg.format('after'), AstropyWarning)
return xp.to_value(u.radian), yp.to_value(u.radian)
def _warn_iers(ierserr):
"""
Generate a warning for an IERSRangeerror
Parameters
----------
ierserr : An `~astropy.utils.iers.IERSRangeError`
"""
msg = '{0} Assuming UT1-UTC=0 for coordinate transformations.'
warnings.warn(msg.format(ierserr.args[0]), AstropyWarning)
def get_dut1utc(time):
"""
This function is used to get UT1-UTC in coordinates because normally it
gives an error outside the IERS range, but in coordinates we want to allow
it to go through but with a warning.
"""
try:
return time.delta_ut1_utc
except iers.IERSRangeError as e:
_warn_iers(e)
return np.zeros(time.shape)
def get_jd12(time, scale):
"""
Gets ``jd1`` and ``jd2`` from a time object in a particular scale.
Parameters
----------
time : `~astropy.time.Time`
The time to get the jds for
scale : str
The time scale to get the jds for
Returns
-------
jd1 : float
jd2 : float
"""
if time.scale == scale:
newtime = time
else:
try:
newtime = getattr(time, scale)
except iers.IERSRangeError as e:
_warn_iers(e)
newtime = time
return newtime.jd1, newtime.jd2
def norm(p):
"""
Normalise a p-vector.
"""
return p / np.sqrt(np.einsum('...i,...i', p, p))[..., np.newaxis]
def pav2pv(p, v):
"""
Combine p- and v- vectors into a pv-vector.
"""
pv = np.empty(np.broadcast(p, v).shape[:-1], erfa.dt_pv)
pv['p'] = p
pv['v'] = v
return pv
def get_cip(jd1, jd2):
"""
Find the X, Y coordinates of the CIP and the CIO locator, s.
Parameters
----------
jd1 : float or `np.ndarray`
First part of two part Julian date (TDB)
jd2 : float or `np.ndarray`
Second part of two part Julian date (TDB)
Returns
-------
x : float or `np.ndarray`
x coordinate of the CIP
y : float or `np.ndarray`
y coordinate of the CIP
s : float or `np.ndarray`
CIO locator, s
"""
# classical NPB matrix, IAU 2006/2000A
rpnb = erfa.pnm06a(jd1, jd2)
# CIP X, Y coordinates from array
x, y = erfa.bpn2xy(rpnb)
# CIO locator, s
s = erfa.s06(jd1, jd2, x, y)
return x, y, s
def aticq(srepr, astrom):
"""
A slightly modified version of the ERFA function ``eraAticq``.
``eraAticq`` performs the transformations between two coordinate systems,
with the details of the transformation being encoded into the ``astrom`` array.
There are two issues with the version of aticq in ERFA. Both are associated
with the handling of light deflection.
The companion function ``eraAtciqz`` is meant to be its inverse. However, this
is not true for directions close to the Solar centre, since the light deflection
calculations are numerically unstable and therefore not reversible.
This version sidesteps that problem by artificially reducing the light deflection
for directions which are within 90 arcseconds of the Sun's position. This is the
same approach used by the ERFA functions above, except that they use a threshold of
9 arcseconds.
In addition, ERFA's aticq assumes a distant source, so there is no difference between
the object-Sun vector and the observer-Sun vector. This can lead to errors of up to a
few arcseconds in the worst case (e.g a Venus transit).
Parameters
----------
srepr : `~astropy.coordinates.SphericalRepresentation`
Astrometric GCRS or CIRS position of object from observer
astrom : eraASTROM array
ERFA astrometry context, as produced by, e.g. ``eraApci13`` or ``eraApcs13``
Returns
-------
rc : float or `~numpy.ndarray`
Right Ascension in radians
dc : float or `~numpy.ndarray`
Declination in radians
"""
# ignore parallax effects if no distance, or far away
srepr_distance = srepr.distance
ignore_distance = srepr_distance.unit == u.one
# RA, Dec to cartesian unit vectors
pos = erfa.s2c(srepr.lon.radian, srepr.lat.radian)
# Bias-precession-nutation, giving GCRS proper direction.
ppr = erfa.trxp(astrom['bpn'], pos)
# Aberration, giving GCRS natural direction
d = np.zeros_like(ppr)
for j in range(2):
before = norm(ppr-d)
after = erfa.ab(before, astrom['v'], astrom['em'], astrom['bm1'])
d = after - before
pnat = norm(ppr-d)
# Light deflection by the Sun, giving BCRS coordinate direction
d = np.zeros_like(pnat)
for j in range(5):
before = norm(pnat-d)
if ignore_distance:
# No distance to object, assume a long way away
q = before
else:
# Find BCRS direction of Sun to object.
# astrom['eh'] and astrom['em'] contain Sun to observer unit vector,
# and distance, respectively.
eh = astrom['em'][..., np.newaxis] * astrom['eh']
# unit vector from Sun to object
q = eh + srepr_distance[..., np.newaxis].to_value(u.au) * before
sundist, q = erfa.pn(q)
sundist = sundist[..., np.newaxis]
# calculation above is extremely unstable very close to the sun
# in these situations, default back to ldsun-style behaviour,
# since this is reversible and drops to zero within stellar limb
q = np.where(sundist > 1.0e-10, q, before)
after = erfa.ld(1.0, before, q, astrom['eh'], astrom['em'], 1e-6)
d = after - before
pco = norm(pnat-d)
# ICRS astrometric RA, Dec
rc, dc = erfa.c2s(pco)
return erfa.anp(rc), dc
def atciqz(srepr, astrom):
"""
A slightly modified version of the ERFA function ``eraAtciqz``.
``eraAtciqz`` performs the transformations between two coordinate systems,
with the details of the transformation being encoded into the ``astrom`` array.
There are two issues with the version of atciqz in ERFA. Both are associated
with the handling of light deflection.
The companion function ``eraAticq`` is meant to be its inverse. However, this
is not true for directions close to the Solar centre, since the light deflection
calculations are numerically unstable and therefore not reversible.
This version sidesteps that problem by artificially reducing the light deflection
for directions which are within 90 arcseconds of the Sun's position. This is the
same approach used by the ERFA functions above, except that they use a threshold of
9 arcseconds.
In addition, ERFA's atciqz assumes a distant source, so there is no difference between
the object-Sun vector and the observer-Sun vector. This can lead to errors of up to a
few arcseconds in the worst case (e.g a Venus transit).
Parameters
----------
srepr : `~astropy.coordinates.SphericalRepresentation`
Astrometric ICRS position of object from observer
astrom : eraASTROM array
ERFA astrometry context, as produced by, e.g. ``eraApci13`` or ``eraApcs13``
Returns
-------
ri : float or `~numpy.ndarray`
Right Ascension in radians
di : float or `~numpy.ndarray`
Declination in radians
"""
# ignore parallax effects if no distance, or far away
srepr_distance = srepr.distance
ignore_distance = srepr_distance.unit == u.one
# BCRS coordinate direction (unit vector).
pco = erfa.s2c(srepr.lon.radian, srepr.lat.radian)
# Find BCRS direction of Sun to object
if ignore_distance:
# No distance to object, assume a long way away
q = pco
else:
# Find BCRS direction of Sun to object.
# astrom['eh'] and astrom['em'] contain Sun to observer unit vector,
# and distance, respectively.
eh = astrom['em'][..., np.newaxis] * astrom['eh']
# unit vector from Sun to object
q = eh + srepr_distance[..., np.newaxis].to_value(u.au) * pco
sundist, q = erfa.pn(q)
sundist = sundist[..., np.newaxis]
# calculation above is extremely unstable very close to the sun
# in these situations, default back to ldsun-style behaviour,
# since this is reversible and drops to zero within stellar limb
q = np.where(sundist > 1.0e-10, q, pco)
# Light deflection by the Sun, giving BCRS natural direction.
pnat = erfa.ld(1.0, pco, q, astrom['eh'], astrom['em'], 1e-6)
# Aberration, giving GCRS proper direction.
ppr = erfa.ab(pnat, astrom['v'], astrom['em'], astrom['bm1'])
# Bias-precession-nutation, giving CIRS proper direction.
# Has no effect if matrix is identity matrix, in which case gives GCRS ppr.
pi = erfa.rxp(astrom['bpn'], ppr)
# CIRS (GCRS) RA, Dec
ri, di = erfa.c2s(pi)
return erfa.anp(ri), di
def prepare_earth_position_vel(time):
"""
Get barycentric position and velocity, and heliocentric position of Earth
Parameters
----------
time : `~astropy.time.Time`
time at which to calculate position and velocity of Earth
Returns
-------
earth_pv : `np.ndarray`
Barycentric position and velocity of Earth, in au and au/day
earth_helio : `np.ndarray`
Heliocentric position of Earth in au
"""
# this goes here to avoid circular import errors
from astropy.coordinates.solar_system import (
get_body_barycentric,
get_body_barycentric_posvel,
solar_system_ephemeris,
)
# get barycentric position and velocity of earth
ephemeris = solar_system_ephemeris.get()
# if we are using the builtin erfa based ephemeris,
# we can use the fact that epv00 already provides all we need.
# This avoids calling epv00 twice, once
# in get_body_barycentric_posvel('earth') and once in
# get_body_barycentric('sun')
if ephemeris == 'builtin':
jd1, jd2 = get_jd12(time, 'tdb')
earth_pv_heliocentric, earth_pv = erfa.epv00(jd1, jd2)
earth_heliocentric = earth_pv_heliocentric['p']
# all other ephemeris providers probably don't have a shortcut like this
else:
earth_p, earth_v = get_body_barycentric_posvel('earth', time)
# get heliocentric position of earth, preparing it for passing to erfa.
sun = get_body_barycentric('sun', time)
earth_heliocentric = (earth_p - sun).get_xyz(xyz_axis=-1).to_value(u.au)
# Also prepare earth_pv for passing to erfa, which wants it as
# a structured dtype.
earth_pv = pav2pv(
earth_p.get_xyz(xyz_axis=-1).to_value(u.au),
earth_v.get_xyz(xyz_axis=-1).to_value(u.au / u.d)
)
return earth_pv, earth_heliocentric
def get_offset_sun_from_barycenter(time, include_velocity=False, reverse=False):
"""
Returns the offset of the Sun center from the solar-system barycenter (SSB).
Parameters
----------
time : `~astropy.time.Time`
Time at which to calculate the offset
include_velocity : `bool`
If ``True``, attach the velocity as a differential. Defaults to ``False``.
reverse : `bool`
If ``True``, return the offset of the barycenter from the Sun. Defaults to ``False``.
Returns
-------
`~astropy.coordinates.CartesianRepresentation`
The offset
"""
if include_velocity:
# Import here to avoid a circular import
from astropy.coordinates.solar_system import get_body_barycentric_posvel
offset_pos, offset_vel = get_body_barycentric_posvel('sun', time)
if reverse:
offset_pos, offset_vel = -offset_pos, -offset_vel
offset_vel = offset_vel.represent_as(CartesianDifferential)
offset_pos = offset_pos.with_differentials(offset_vel)
else:
# Import here to avoid a circular import
from astropy.coordinates.solar_system import get_body_barycentric
offset_pos = get_body_barycentric('sun', time)
if reverse:
offset_pos = -offset_pos
return offset_pos
|
3a1506042436940454badc3798c489b3217665475c92c29725464c6e37d9094b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Contains the transformation functions for getting to/from ecliptic systems.
"""
import erfa
from astropy import units as u
from astropy.coordinates.baseframe import frame_transform_graph
from astropy.coordinates.transformations import (
FunctionTransformWithFiniteDifference, DynamicMatrixTransform,
AffineTransform,
)
from astropy.coordinates.matrix_utilities import (rotation_matrix,
matrix_product,
matrix_transpose)
from .icrs import ICRS
from .gcrs import GCRS
from .ecliptic import (GeocentricMeanEcliptic, BarycentricMeanEcliptic, HeliocentricMeanEcliptic,
GeocentricTrueEcliptic, BarycentricTrueEcliptic, HeliocentricTrueEcliptic,
HeliocentricEclipticIAU76, CustomBarycentricEcliptic)
from .utils import get_jd12, get_offset_sun_from_barycenter, EQUINOX_J2000
from astropy.coordinates.errors import UnitsError
def _mean_ecliptic_rotation_matrix(equinox):
# This code just calls ecm06, which uses the precession matrix according to the
# IAU 2006 model, but leaves out nutation. This brings the results closer to what
# other libraries give (see https://github.com/astropy/astropy/pull/6508).
return erfa.ecm06(*get_jd12(equinox, 'tt'))
def _true_ecliptic_rotation_matrix(equinox):
# This code calls the same routines as done in pnm06a from ERFA, which
# retrieves the precession matrix (including frame bias) according to
# the IAU 2006 model, and including the nutation.
# This family of systems is less popular
# (see https://github.com/astropy/astropy/pull/6508).
jd1, jd2 = get_jd12(equinox, 'tt')
# Here, we call the three routines from erfa.pnm06a separately,
# so that we can keep the nutation for calculating the true obliquity
# (which is a fairly expensive operation); see gh-11000.
# pnm06a: Fukushima-Williams angles for frame bias and precession.
# (ERFA names short for F-W's gamma_bar, phi_bar, psi_bar and epsilon_A).
gamb, phib, psib, epsa = erfa.pfw06(jd1, jd2)
# pnm06a: Nutation components (in longitude and obliquity).
dpsi, deps = erfa.nut06a(jd1, jd2)
# pnm06a: Equinox based nutation x precession x bias matrix.
rnpb = erfa.fw2m(gamb, phib, psib+dpsi, epsa+deps)
# calculate the true obliquity of the ecliptic
obl = erfa.obl06(jd1, jd2)+deps
return matrix_product(rotation_matrix(obl << u.radian, 'x'), rnpb)
def _obliquity_only_rotation_matrix(obl=erfa.obl80(EQUINOX_J2000.jd1, EQUINOX_J2000.jd2) * u.radian):
# This code only accounts for the obliquity,
# which can be passed explicitly.
# The default value is the IAU 1980 value for J2000,
# which is computed using obl80 from ERFA:
#
# obl = erfa.obl80(EQUINOX_J2000.jd1, EQUINOX_J2000.jd2) * u.radian
return rotation_matrix(obl, "x")
# MeanEcliptic frames
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference,
GCRS, GeocentricMeanEcliptic,
finite_difference_frameattr_name='equinox')
def gcrs_to_geoecliptic(gcrs_coo, to_frame):
# first get us to a 0 pos/vel GCRS at the target equinox
gcrs_coo2 = gcrs_coo.transform_to(GCRS(obstime=to_frame.obstime))
rmat = _mean_ecliptic_rotation_matrix(to_frame.equinox)
newrepr = gcrs_coo2.cartesian.transform(rmat)
return to_frame.realize_frame(newrepr)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, GeocentricMeanEcliptic, GCRS)
def geoecliptic_to_gcrs(from_coo, gcrs_frame):
rmat = _mean_ecliptic_rotation_matrix(from_coo.equinox)
newrepr = from_coo.cartesian.transform(matrix_transpose(rmat))
gcrs = GCRS(newrepr, obstime=from_coo.obstime)
# now do any needed offsets (no-op if same obstime and 0 pos/vel)
return gcrs.transform_to(gcrs_frame)
@frame_transform_graph.transform(DynamicMatrixTransform, ICRS, BarycentricMeanEcliptic)
def icrs_to_baryecliptic(from_coo, to_frame):
return _mean_ecliptic_rotation_matrix(to_frame.equinox)
@frame_transform_graph.transform(DynamicMatrixTransform, BarycentricMeanEcliptic, ICRS)
def baryecliptic_to_icrs(from_coo, to_frame):
return matrix_transpose(icrs_to_baryecliptic(to_frame, from_coo))
_NEED_ORIGIN_HINT = ("The input {0} coordinates do not have length units. This "
"probably means you created coordinates with lat/lon but "
"no distance. Heliocentric<->ICRS transforms cannot "
"function in this case because there is an origin shift.")
@frame_transform_graph.transform(AffineTransform,
ICRS, HeliocentricMeanEcliptic)
def icrs_to_helioecliptic(from_coo, to_frame):
if not u.m.is_equivalent(from_coo.cartesian.x.unit):
raise UnitsError(_NEED_ORIGIN_HINT.format(from_coo.__class__.__name__))
# get the offset of the barycenter from the Sun
ssb_from_sun = get_offset_sun_from_barycenter(to_frame.obstime, reverse=True,
include_velocity=bool(from_coo.data.differentials))
# now compute the matrix to precess to the right orientation
rmat = _mean_ecliptic_rotation_matrix(to_frame.equinox)
return rmat, ssb_from_sun.transform(rmat)
@frame_transform_graph.transform(AffineTransform,
HeliocentricMeanEcliptic, ICRS)
def helioecliptic_to_icrs(from_coo, to_frame):
if not u.m.is_equivalent(from_coo.cartesian.x.unit):
raise UnitsError(_NEED_ORIGIN_HINT.format(from_coo.__class__.__name__))
# first un-precess from ecliptic to ICRS orientation
rmat = _mean_ecliptic_rotation_matrix(from_coo.equinox)
# now offset back to barycentric, which is the correct center for ICRS
sun_from_ssb = get_offset_sun_from_barycenter(from_coo.obstime,
include_velocity=bool(from_coo.data.differentials))
return matrix_transpose(rmat), sun_from_ssb
# TrueEcliptic frames
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference,
GCRS, GeocentricTrueEcliptic,
finite_difference_frameattr_name='equinox')
def gcrs_to_true_geoecliptic(gcrs_coo, to_frame):
# first get us to a 0 pos/vel GCRS at the target equinox
gcrs_coo2 = gcrs_coo.transform_to(GCRS(obstime=to_frame.obstime))
rmat = _true_ecliptic_rotation_matrix(to_frame.equinox)
newrepr = gcrs_coo2.cartesian.transform(rmat)
return to_frame.realize_frame(newrepr)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, GeocentricTrueEcliptic, GCRS)
def true_geoecliptic_to_gcrs(from_coo, gcrs_frame):
rmat = _true_ecliptic_rotation_matrix(from_coo.equinox)
newrepr = from_coo.cartesian.transform(matrix_transpose(rmat))
gcrs = GCRS(newrepr, obstime=from_coo.obstime)
# now do any needed offsets (no-op if same obstime and 0 pos/vel)
return gcrs.transform_to(gcrs_frame)
@frame_transform_graph.transform(DynamicMatrixTransform, ICRS, BarycentricTrueEcliptic)
def icrs_to_true_baryecliptic(from_coo, to_frame):
return _true_ecliptic_rotation_matrix(to_frame.equinox)
@frame_transform_graph.transform(DynamicMatrixTransform, BarycentricTrueEcliptic, ICRS)
def true_baryecliptic_to_icrs(from_coo, to_frame):
return matrix_transpose(icrs_to_true_baryecliptic(to_frame, from_coo))
@frame_transform_graph.transform(AffineTransform,
ICRS, HeliocentricTrueEcliptic)
def icrs_to_true_helioecliptic(from_coo, to_frame):
if not u.m.is_equivalent(from_coo.cartesian.x.unit):
raise UnitsError(_NEED_ORIGIN_HINT.format(from_coo.__class__.__name__))
# get the offset of the barycenter from the Sun
ssb_from_sun = get_offset_sun_from_barycenter(to_frame.obstime, reverse=True,
include_velocity=bool(from_coo.data.differentials))
# now compute the matrix to precess to the right orientation
rmat = _true_ecliptic_rotation_matrix(to_frame.equinox)
return rmat, ssb_from_sun.transform(rmat)
@frame_transform_graph.transform(AffineTransform,
HeliocentricTrueEcliptic, ICRS)
def true_helioecliptic_to_icrs(from_coo, to_frame):
if not u.m.is_equivalent(from_coo.cartesian.x.unit):
raise UnitsError(_NEED_ORIGIN_HINT.format(from_coo.__class__.__name__))
# first un-precess from ecliptic to ICRS orientation
rmat = _true_ecliptic_rotation_matrix(from_coo.equinox)
# now offset back to barycentric, which is the correct center for ICRS
sun_from_ssb = get_offset_sun_from_barycenter(from_coo.obstime,
include_velocity=bool(from_coo.data.differentials))
return matrix_transpose(rmat), sun_from_ssb
# Other ecliptic frames
@frame_transform_graph.transform(AffineTransform,
HeliocentricEclipticIAU76, ICRS)
def ecliptic_to_iau76_icrs(from_coo, to_frame):
# first un-precess from ecliptic to ICRS orientation
rmat = _obliquity_only_rotation_matrix()
# now offset back to barycentric, which is the correct center for ICRS
sun_from_ssb = get_offset_sun_from_barycenter(from_coo.obstime,
include_velocity=bool(from_coo.data.differentials))
return matrix_transpose(rmat), sun_from_ssb
@frame_transform_graph.transform(AffineTransform,
ICRS, HeliocentricEclipticIAU76)
def icrs_to_iau76_ecliptic(from_coo, to_frame):
# get the offset of the barycenter from the Sun
ssb_from_sun = get_offset_sun_from_barycenter(to_frame.obstime, reverse=True,
include_velocity=bool(from_coo.data.differentials))
# now compute the matrix to precess to the right orientation
rmat = _obliquity_only_rotation_matrix()
return rmat, ssb_from_sun.transform(rmat)
@frame_transform_graph.transform(DynamicMatrixTransform,
ICRS, CustomBarycentricEcliptic)
def icrs_to_custombaryecliptic(from_coo, to_frame):
return _obliquity_only_rotation_matrix(to_frame.obliquity)
@frame_transform_graph.transform(DynamicMatrixTransform,
CustomBarycentricEcliptic, ICRS)
def custombaryecliptic_to_icrs(from_coo, to_frame):
return icrs_to_custombaryecliptic(to_frame, from_coo).T
# Create loopback transformations
frame_transform_graph._add_merged_transform(GeocentricMeanEcliptic, ICRS, GeocentricMeanEcliptic)
frame_transform_graph._add_merged_transform(GeocentricTrueEcliptic, ICRS, GeocentricTrueEcliptic)
frame_transform_graph._add_merged_transform(HeliocentricMeanEcliptic, ICRS, HeliocentricMeanEcliptic)
frame_transform_graph._add_merged_transform(HeliocentricTrueEcliptic, ICRS, HeliocentricTrueEcliptic)
frame_transform_graph._add_merged_transform(HeliocentricEclipticIAU76, ICRS, HeliocentricEclipticIAU76)
frame_transform_graph._add_merged_transform(BarycentricMeanEcliptic, ICRS, BarycentricMeanEcliptic)
frame_transform_graph._add_merged_transform(BarycentricTrueEcliptic, ICRS, BarycentricTrueEcliptic)
frame_transform_graph._add_merged_transform(CustomBarycentricEcliptic, ICRS, CustomBarycentricEcliptic)
|
806632abd30b13eb3f2c969f60522c409ee5d82a34d4c278b38f1293f5b966f0 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Contains the transformation functions for getting from ICRS/HCRS to CIRS and
anything in between (currently that means GCRS)
"""
import numpy as np
from astropy import units as u
from astropy.coordinates.baseframe import frame_transform_graph
from astropy.coordinates.transformations import (
FunctionTransformWithFiniteDifference,
AffineTransform,
)
from astropy.coordinates.representation import (
SphericalRepresentation,
CartesianRepresentation,
UnitSphericalRepresentation,
CartesianDifferential,
)
from .icrs import ICRS
from .gcrs import GCRS
from .cirs import CIRS
from .hcrs import HCRS
from .utils import aticq, atciqz, get_offset_sun_from_barycenter
from ..erfa_astrom import erfa_astrom
# First the ICRS/CIRS related transforms
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, ICRS, CIRS)
def icrs_to_cirs(icrs_coo, cirs_frame):
# first set up the astrometry context for ICRS<->CIRS
astrom = erfa_astrom.get().apco(cirs_frame)
if icrs_coo.data.get_name() == 'unitspherical' or icrs_coo.data.to_cartesian().x.unit == u.one:
# if no distance, just do the infinite-distance/no parallax calculation
srepr = icrs_coo.spherical
cirs_ra, cirs_dec = atciqz(srepr.without_differentials(), astrom)
newrep = UnitSphericalRepresentation(lat=u.Quantity(cirs_dec, u.radian, copy=False),
lon=u.Quantity(cirs_ra, u.radian, copy=False),
copy=False)
else:
# When there is a distance, we first offset for parallax to get the
# astrometric coordinate direction and *then* run the ERFA transform for
# no parallax/PM. This ensures reversibility and is more sensible for
# inside solar system objects
astrom_eb = CartesianRepresentation(astrom['eb'], unit=u.au,
xyz_axis=-1, copy=False)
newcart = icrs_coo.cartesian - astrom_eb
srepr = newcart.represent_as(SphericalRepresentation)
cirs_ra, cirs_dec = atciqz(srepr.without_differentials(), astrom)
newrep = SphericalRepresentation(lat=u.Quantity(cirs_dec, u.radian, copy=False),
lon=u.Quantity(cirs_ra, u.radian, copy=False),
distance=srepr.distance, copy=False)
return cirs_frame.realize_frame(newrep)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, CIRS, ICRS)
def cirs_to_icrs(cirs_coo, icrs_frame):
# set up the astrometry context for ICRS<->cirs and then convert to
# astrometric coordinate direction
astrom = erfa_astrom.get().apco(cirs_coo)
srepr = cirs_coo.represent_as(SphericalRepresentation)
i_ra, i_dec = aticq(srepr.without_differentials(), astrom)
if cirs_coo.data.get_name() == 'unitspherical' or cirs_coo.data.to_cartesian().x.unit == u.one:
# if no distance, just use the coordinate direction to yield the
# infinite-distance/no parallax answer
newrep = UnitSphericalRepresentation(lat=u.Quantity(i_dec, u.radian, copy=False),
lon=u.Quantity(i_ra, u.radian, copy=False),
copy=False)
else:
# When there is a distance, apply the parallax/offset to the SSB as the
# last step - ensures round-tripping with the icrs_to_cirs transform
# the distance in intermedrep is *not* a real distance as it does not
# include the offset back to the SSB
intermedrep = SphericalRepresentation(lat=u.Quantity(i_dec, u.radian, copy=False),
lon=u.Quantity(i_ra, u.radian, copy=False),
distance=srepr.distance,
copy=False)
astrom_eb = CartesianRepresentation(astrom['eb'], unit=u.au,
xyz_axis=-1, copy=False)
newrep = intermedrep + astrom_eb
return icrs_frame.realize_frame(newrep)
# Now the GCRS-related transforms to/from ICRS
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, ICRS, GCRS)
def icrs_to_gcrs(icrs_coo, gcrs_frame):
# first set up the astrometry context for ICRS<->GCRS.
astrom = erfa_astrom.get().apcs(gcrs_frame)
if icrs_coo.data.get_name() == 'unitspherical' or icrs_coo.data.to_cartesian().x.unit == u.one:
# if no distance, just do the infinite-distance/no parallax calculation
srepr = icrs_coo.represent_as(SphericalRepresentation)
gcrs_ra, gcrs_dec = atciqz(srepr.without_differentials(), astrom)
newrep = UnitSphericalRepresentation(lat=u.Quantity(gcrs_dec, u.radian, copy=False),
lon=u.Quantity(gcrs_ra, u.radian, copy=False),
copy=False)
else:
# When there is a distance, we first offset for parallax to get the
# BCRS coordinate direction and *then* run the ERFA transform for no
# parallax/PM. This ensures reversibility and is more sensible for
# inside solar system objects
astrom_eb = CartesianRepresentation(astrom['eb'], unit=u.au,
xyz_axis=-1, copy=False)
newcart = icrs_coo.cartesian - astrom_eb
srepr = newcart.represent_as(SphericalRepresentation)
gcrs_ra, gcrs_dec = atciqz(srepr.without_differentials(), astrom)
newrep = SphericalRepresentation(lat=u.Quantity(gcrs_dec, u.radian, copy=False),
lon=u.Quantity(gcrs_ra, u.radian, copy=False),
distance=srepr.distance, copy=False)
return gcrs_frame.realize_frame(newrep)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference,
GCRS, ICRS)
def gcrs_to_icrs(gcrs_coo, icrs_frame):
# set up the astrometry context for ICRS<->GCRS and then convert to BCRS
# coordinate direction
astrom = erfa_astrom.get().apcs(gcrs_coo)
srepr = gcrs_coo.represent_as(SphericalRepresentation)
i_ra, i_dec = aticq(srepr.without_differentials(), astrom)
if gcrs_coo.data.get_name() == 'unitspherical' or gcrs_coo.data.to_cartesian().x.unit == u.one:
# if no distance, just use the coordinate direction to yield the
# infinite-distance/no parallax answer
newrep = UnitSphericalRepresentation(lat=u.Quantity(i_dec, u.radian, copy=False),
lon=u.Quantity(i_ra, u.radian, copy=False),
copy=False)
else:
# When there is a distance, apply the parallax/offset to the SSB as the
# last step - ensures round-tripping with the icrs_to_gcrs transform
# the distance in intermedrep is *not* a real distance as it does not
# include the offset back to the SSB
intermedrep = SphericalRepresentation(lat=u.Quantity(i_dec, u.radian, copy=False),
lon=u.Quantity(i_ra, u.radian, copy=False),
distance=srepr.distance,
copy=False)
astrom_eb = CartesianRepresentation(astrom['eb'], unit=u.au,
xyz_axis=-1, copy=False)
newrep = intermedrep + astrom_eb
return icrs_frame.realize_frame(newrep)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, GCRS, HCRS)
def gcrs_to_hcrs(gcrs_coo, hcrs_frame):
if np.any(gcrs_coo.obstime != hcrs_frame.obstime):
# if they GCRS obstime and HCRS obstime are not the same, we first
# have to move to a GCRS where they are.
frameattrs = gcrs_coo.get_frame_attr_names()
frameattrs['obstime'] = hcrs_frame.obstime
gcrs_coo = gcrs_coo.transform_to(GCRS(**frameattrs))
# set up the astrometry context for ICRS<->GCRS and then convert to ICRS
# coordinate direction
astrom = erfa_astrom.get().apcs(gcrs_coo)
srepr = gcrs_coo.represent_as(SphericalRepresentation)
i_ra, i_dec = aticq(srepr.without_differentials(), astrom)
# convert to Quantity objects
i_ra = u.Quantity(i_ra, u.radian, copy=False)
i_dec = u.Quantity(i_dec, u.radian, copy=False)
if gcrs_coo.data.get_name() == 'unitspherical' or gcrs_coo.data.to_cartesian().x.unit == u.one:
# if no distance, just use the coordinate direction to yield the
# infinite-distance/no parallax answer
newrep = UnitSphericalRepresentation(lat=i_dec, lon=i_ra, copy=False)
else:
# When there is a distance, apply the parallax/offset to the
# Heliocentre as the last step to ensure round-tripping with the
# hcrs_to_gcrs transform
# Note that the distance in intermedrep is *not* a real distance as it
# does not include the offset back to the Heliocentre
intermedrep = SphericalRepresentation(lat=i_dec, lon=i_ra,
distance=srepr.distance,
copy=False)
# astrom['eh'] and astrom['em'] contain Sun to observer unit vector,
# and distance, respectively. Shapes are (X) and (X,3), where (X) is the
# shape resulting from broadcasting the shape of the times object
# against the shape of the pv array.
# broadcast em to eh and scale eh
eh = astrom['eh'] * astrom['em'][..., np.newaxis]
eh = CartesianRepresentation(eh, unit=u.au, xyz_axis=-1, copy=False)
newrep = intermedrep.to_cartesian() + eh
return hcrs_frame.realize_frame(newrep)
_NEED_ORIGIN_HINT = ("The input {0} coordinates do not have length units. This "
"probably means you created coordinates with lat/lon but "
"no distance. Heliocentric<->ICRS transforms cannot "
"function in this case because there is an origin shift.")
@frame_transform_graph.transform(AffineTransform, HCRS, ICRS)
def hcrs_to_icrs(hcrs_coo, icrs_frame):
# this is just an origin translation so without a distance it cannot go ahead
if isinstance(hcrs_coo.data, UnitSphericalRepresentation):
raise u.UnitsError(_NEED_ORIGIN_HINT.format(hcrs_coo.__class__.__name__))
return None, get_offset_sun_from_barycenter(hcrs_coo.obstime,
include_velocity=bool(hcrs_coo.data.differentials))
@frame_transform_graph.transform(AffineTransform, ICRS, HCRS)
def icrs_to_hcrs(icrs_coo, hcrs_frame):
# this is just an origin translation so without a distance it cannot go ahead
if isinstance(icrs_coo.data, UnitSphericalRepresentation):
raise u.UnitsError(_NEED_ORIGIN_HINT.format(icrs_coo.__class__.__name__))
return None, get_offset_sun_from_barycenter(hcrs_frame.obstime, reverse=True,
include_velocity=bool(icrs_coo.data.differentials))
# Create loopback transformations
frame_transform_graph._add_merged_transform(CIRS, ICRS, CIRS)
# The CIRS<-> CIRS transform going through ICRS has a
# subtle implication that a point in CIRS is uniquely determined
# by the corresponding astrometric ICRS coordinate *at its
# current time*. This has some subtle implications in terms of GR, but
# is sort of glossed over in the current scheme because we are dropping
# distances anyway.
frame_transform_graph._add_merged_transform(GCRS, ICRS, GCRS)
frame_transform_graph._add_merged_transform(HCRS, ICRS, HCRS)
|
3f8a079d8f51d41036df5f5ba181ca688947d024e4240c628584df9d9c061039 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy import units as u
from astropy.coordinates.transformations import DynamicMatrixTransform, FunctionTransform
from astropy.coordinates.baseframe import (frame_transform_graph,
BaseCoordinateFrame)
from astropy.coordinates.attributes import CoordinateAttribute, QuantityAttribute
from astropy.coordinates.matrix_utilities import (rotation_matrix,
matrix_product,
matrix_transpose)
_skyoffset_cache = {}
def make_skyoffset_cls(framecls):
"""
Create a new class that is the sky offset frame for a specific class of
origin frame. If such a class has already been created for this frame, the
same class will be returned.
The new class will always have component names for spherical coordinates of
``lon``/``lat``.
Parameters
----------
framecls : `~astropy.coordinates.BaseCoordinateFrame` subclass
The class to create the SkyOffsetFrame of.
Returns
-------
skyoffsetframecls : class
The class for the new skyoffset frame.
Notes
-----
This function is necessary because Astropy's frame transformations depend
on connection between specific frame *classes*. So each type of frame
needs its own distinct skyoffset frame class. This function generates
just that class, as well as ensuring that only one example of such a class
actually gets created in any given python session.
"""
if framecls in _skyoffset_cache:
return _skyoffset_cache[framecls]
# Create a new SkyOffsetFrame subclass for this frame class.
name = 'SkyOffset' + framecls.__name__
_SkyOffsetFramecls = type(
name, (SkyOffsetFrame, framecls),
{'origin': CoordinateAttribute(frame=framecls, default=None),
# The following two have to be done because otherwise we use the
# defaults of SkyOffsetFrame set by BaseCoordinateFrame.
'_default_representation': framecls._default_representation,
'_default_differential': framecls._default_differential,
'__doc__': SkyOffsetFrame.__doc__,
})
@frame_transform_graph.transform(FunctionTransform, _SkyOffsetFramecls, _SkyOffsetFramecls)
def skyoffset_to_skyoffset(from_skyoffset_coord, to_skyoffset_frame):
"""Transform between two skyoffset frames."""
# This transform goes through the parent frames on each side.
# from_frame -> from_frame.origin -> to_frame.origin -> to_frame
intermediate_from = from_skyoffset_coord.transform_to(from_skyoffset_coord.origin)
intermediate_to = intermediate_from.transform_to(to_skyoffset_frame.origin)
return intermediate_to.transform_to(to_skyoffset_frame)
@frame_transform_graph.transform(DynamicMatrixTransform, framecls, _SkyOffsetFramecls)
def reference_to_skyoffset(reference_frame, skyoffset_frame):
"""Convert a reference coordinate to an sky offset frame."""
# Define rotation matrices along the position angle vector, and
# relative to the origin.
origin = skyoffset_frame.origin.spherical
mat1 = rotation_matrix(-skyoffset_frame.rotation, 'x')
mat2 = rotation_matrix(-origin.lat, 'y')
mat3 = rotation_matrix(origin.lon, 'z')
return matrix_product(mat1, mat2, mat3)
@frame_transform_graph.transform(DynamicMatrixTransform, _SkyOffsetFramecls, framecls)
def skyoffset_to_reference(skyoffset_coord, reference_frame):
"""Convert an sky offset frame coordinate to the reference frame"""
# use the forward transform, but just invert it
R = reference_to_skyoffset(reference_frame, skyoffset_coord)
# transpose is the inverse because R is a rotation matrix
return matrix_transpose(R)
_skyoffset_cache[framecls] = _SkyOffsetFramecls
return _SkyOffsetFramecls
class SkyOffsetFrame(BaseCoordinateFrame):
"""
A frame which is relative to some specific position and oriented to match
its frame.
SkyOffsetFrames always have component names for spherical coordinates
of ``lon``/``lat``, *not* the component names for the frame of ``origin``.
This is useful for calculating offsets and dithers in the frame of the sky
relative to an arbitrary position. Coordinates in this frame are both centered on the position specified by the
``origin`` coordinate, *and* they are oriented in the same manner as the
``origin`` frame. E.g., if ``origin`` is `~astropy.coordinates.ICRS`, this
object's ``lat`` will be pointed in the direction of Dec, while ``lon``
will point in the direction of RA.
For more on skyoffset frames, see :ref:`astropy:astropy-skyoffset-frames`.
Parameters
----------
representation : `~astropy.coordinates.BaseRepresentation` or None
A representation object or None to have no data (or use the other keywords)
origin : coordinate-like
The coordinate which specifies the origin of this frame. Note that this
origin is used purely for on-sky location/rotation. It can have a
``distance`` but it will not be used by this ``SkyOffsetFrame``.
rotation : angle-like
The final rotation of the frame about the ``origin``. The sign of
the rotation is the left-hand rule. That is, an object at a
particular position angle in the un-rotated system will be sent to
the positive latitude (z) direction in the final frame.
Notes
-----
``SkyOffsetFrame`` is a factory class. That is, the objects that it
yields are *not* actually objects of class ``SkyOffsetFrame``. Instead,
distinct classes are created on-the-fly for whatever the frame class is
of ``origin``.
"""
rotation = QuantityAttribute(default=0, unit=u.deg)
origin = CoordinateAttribute(default=None, frame=None)
def __new__(cls, *args, **kwargs):
# We don't want to call this method if we've already set up
# an skyoffset frame for this class.
if not (issubclass(cls, SkyOffsetFrame) and cls is not SkyOffsetFrame):
# We get the origin argument, and handle it here.
try:
origin_frame = kwargs['origin']
except KeyError:
raise TypeError("Can't initialize an SkyOffsetFrame without origin= keyword.")
if hasattr(origin_frame, 'frame'):
origin_frame = origin_frame.frame
newcls = make_skyoffset_cls(origin_frame.__class__)
return newcls.__new__(newcls, *args, **kwargs)
# http://stackoverflow.com/questions/19277399/why-does-object-new-work-differently-in-these-three-cases
# See above for why this is necessary. Basically, because some child
# may override __new__, we must override it here to never pass
# arguments to the object.__new__ method.
if super().__new__ is object.__new__:
return super().__new__(cls)
return super().__new__(cls, *args, **kwargs)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.origin is not None and not self.origin.has_data:
raise ValueError('The origin supplied to SkyOffsetFrame has no '
'data.')
if self.has_data:
self._set_skyoffset_data_lon_wrap_angle(self.data)
@staticmethod
def _set_skyoffset_data_lon_wrap_angle(data):
if hasattr(data, 'lon'):
data.lon.wrap_angle = 180. * u.deg
return data
def represent_as(self, base, s='base', in_frame_units=False):
"""
Ensure the wrap angle for any spherical
representations.
"""
data = super().represent_as(base, s, in_frame_units=in_frame_units)
self._set_skyoffset_data_lon_wrap_angle(data)
return data
def __reduce__(self):
return (_skyoffset_reducer, (self.origin,), self.__dict__)
def _skyoffset_reducer(origin):
return SkyOffsetFrame.__new__(SkyOffsetFrame, origin=origin)
|
71fd6d91ae8d539c8cbc0b26243801b0352f3b990f1fa3bb2fd2165f77767051 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Contains the transformation functions for getting to "observed" systems from CIRS.
"""
import numpy as np
import erfa
from astropy import units as u
from astropy.coordinates.baseframe import frame_transform_graph
from astropy.coordinates.transformations import FunctionTransformWithFiniteDifference
from astropy.coordinates.representation import (SphericalRepresentation,
UnitSphericalRepresentation)
from .cirs import CIRS
from .altaz import AltAz
from .hadec import HADec
from .utils import PIOVER2
from ..erfa_astrom import erfa_astrom
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, CIRS, AltAz)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, CIRS, HADec)
def cirs_to_observed(cirs_coo, observed_frame):
if (np.any(observed_frame.location != cirs_coo.location) or
np.any(cirs_coo.obstime != observed_frame.obstime)):
cirs_coo = cirs_coo.transform_to(CIRS(obstime=observed_frame.obstime,
location=observed_frame.location))
# if the data are UnitSphericalRepresentation, we can skip the distance calculations
is_unitspherical = (isinstance(cirs_coo.data, UnitSphericalRepresentation) or
cirs_coo.cartesian.x.unit == u.one)
# We used to do "astrometric" corrections here, but these are no longer necesssary
# CIRS has proper topocentric behaviour
usrepr = cirs_coo.represent_as(UnitSphericalRepresentation)
cirs_ra = usrepr.lon.to_value(u.radian)
cirs_dec = usrepr.lat.to_value(u.radian)
# first set up the astrometry context for CIRS<->observed
astrom = erfa_astrom.get().apio(observed_frame)
if isinstance(observed_frame, AltAz):
lon, zen, _, _, _ = erfa.atioq(cirs_ra, cirs_dec, astrom)
lat = PIOVER2 - zen
else:
_, _, lon, lat, _ = erfa.atioq(cirs_ra, cirs_dec, astrom)
if is_unitspherical:
rep = UnitSphericalRepresentation(lat=u.Quantity(lat, u.radian, copy=False),
lon=u.Quantity(lon, u.radian, copy=False),
copy=False)
else:
# since we've transformed to CIRS at the observatory location, just use CIRS distance
rep = SphericalRepresentation(lat=u.Quantity(lat, u.radian, copy=False),
lon=u.Quantity(lon, u.radian, copy=False),
distance=cirs_coo.distance,
copy=False)
return observed_frame.realize_frame(rep)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, AltAz, CIRS)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, HADec, CIRS)
def observed_to_cirs(observed_coo, cirs_frame):
usrepr = observed_coo.represent_as(UnitSphericalRepresentation)
lon = usrepr.lon.to_value(u.radian)
lat = usrepr.lat.to_value(u.radian)
if isinstance(observed_coo, AltAz):
# the 'A' indicates zen/az inputs
coord_type = 'A'
lat = PIOVER2 - lat
else:
coord_type = 'H'
# first set up the astrometry context for ICRS<->CIRS at the observed_coo time
astrom = erfa_astrom.get().apio(observed_coo)
cirs_ra, cirs_dec = erfa.atoiq(coord_type, lon, lat, astrom) << u.radian
if isinstance(observed_coo.data, UnitSphericalRepresentation) or observed_coo.cartesian.x.unit == u.one:
distance = None
else:
distance = observed_coo.distance
cirs_at_aa_time = CIRS(ra=cirs_ra, dec=cirs_dec, distance=distance,
obstime=observed_coo.obstime,
location=observed_coo.location)
# this final transform may be a no-op if the obstimes and locations are the same
return cirs_at_aa_time.transform_to(cirs_frame)
|
2314512ab555beed31ca2faecf3b6a96185ab0648ae9df7db463203d83c6ce0c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from astropy import units as u
from astropy.utils.decorators import format_doc
from astropy.coordinates import representation as r
from astropy.coordinates.baseframe import BaseCoordinateFrame, RepresentationMapping, base_doc
from astropy.coordinates.attributes import (TimeAttribute,
QuantityAttribute,
EarthLocationAttribute)
__all__ = ['AltAz']
_90DEG = 90*u.deg
doc_components = """
az : `~astropy.coordinates.Angle`, optional, keyword-only
The Azimuth for this object (``alt`` must also be given and
``representation`` must be None).
alt : `~astropy.coordinates.Angle`, optional, keyword-only
The Altitude for this object (``az`` must also be given and
``representation`` must be None).
distance : `~astropy.units.Quantity` ['length'], optional, keyword-only
The Distance for this object along the line-of-sight.
pm_az_cosalt : `~astropy.units.Quantity` ['angular speed'], optional, keyword-only
The proper motion in azimuth (including the ``cos(alt)`` factor) for
this object (``pm_alt`` must also be given).
pm_alt : `~astropy.units.Quantity` ['angular speed'], optional, keyword-only
The proper motion in altitude for this object (``pm_az_cosalt`` must
also be given).
radial_velocity : `~astropy.units.Quantity` ['speed'], optional, keyword-only
The radial velocity of this object."""
doc_footer = """
Other parameters
----------------
obstime : `~astropy.time.Time`
The time at which the observation is taken. Used for determining the
position and orientation of the Earth.
location : `~astropy.coordinates.EarthLocation`
The location on the Earth. This can be specified either as an
`~astropy.coordinates.EarthLocation` object or as anything that can be
transformed to an `~astropy.coordinates.ITRS` frame.
pressure : `~astropy.units.Quantity` ['pressure']
The atmospheric pressure as an `~astropy.units.Quantity` with pressure
units. This is necessary for performing refraction corrections.
Setting this to 0 (the default) will disable refraction calculations
when transforming to/from this frame.
temperature : `~astropy.units.Quantity` ['temperature']
The ground-level temperature as an `~astropy.units.Quantity` in
deg C. This is necessary for performing refraction corrections.
relative_humidity : `~astropy.units.Quantity` ['dimensionless'] or number
The relative humidity as a dimensionless quantity between 0 to 1.
This is necessary for performing refraction corrections.
obswl : `~astropy.units.Quantity` ['length']
The average wavelength of observations as an `~astropy.units.Quantity`
with length units. This is necessary for performing refraction
corrections.
Notes
-----
The refraction model is based on that implemented in ERFA, which is fast
but becomes inaccurate for altitudes below about 5 degrees. Near and below
altitudes of 0, it can even give meaningless answers, and in this case
transforming to AltAz and back to another frame can give highly discrepant
results. For much better numerical stability, leave the ``pressure`` at
``0`` (the default), thereby disabling the refraction correction and
yielding "topocentric" horizontal coordinates.
"""
@format_doc(base_doc, components=doc_components, footer=doc_footer)
class AltAz(BaseCoordinateFrame):
"""
A coordinate or frame in the Altitude-Azimuth system (Horizontal
coordinates) with respect to the WGS84 ellipsoid. Azimuth is oriented
East of North (i.e., N=0, E=90 degrees). Altitude is also known as
elevation angle, so this frame is also in the Azimuth-Elevation system.
This frame is assumed to *include* refraction effects if the ``pressure``
frame attribute is non-zero.
The frame attributes are listed under **Other Parameters**, which are
necessary for transforming from AltAz to some other system.
"""
frame_specific_representation_info = {
r.SphericalRepresentation: [
RepresentationMapping('lon', 'az'),
RepresentationMapping('lat', 'alt')
]
}
default_representation = r.SphericalRepresentation
default_differential = r.SphericalCosLatDifferential
obstime = TimeAttribute(default=None)
location = EarthLocationAttribute(default=None)
pressure = QuantityAttribute(default=0, unit=u.hPa)
temperature = QuantityAttribute(default=0, unit=u.deg_C)
relative_humidity = QuantityAttribute(default=0, unit=u.dimensionless_unscaled)
obswl = QuantityAttribute(default=1*u.micron, unit=u.micron)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def secz(self):
"""
Secant of the zenith angle for this coordinate, a common estimate of
the airmass.
"""
return 1/np.sin(self.alt)
@property
def zen(self):
"""
The zenith angle (or zenith distance / co-altitude) for this coordinate.
"""
return _90DEG.to(self.alt.unit) - self.alt
# self-transform defined in icrs_observed_transforms.py
|
17cda771b23828e179ef0c8fa3b7b92415418ae4bb3345cb84a9752a064eb09d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy import units as u
from astropy.utils.decorators import format_doc
from astropy.coordinates import representation as r
from astropy.coordinates.baseframe import BaseCoordinateFrame, RepresentationMapping, base_doc
from astropy.coordinates.attributes import (TimeAttribute,
QuantityAttribute,
EarthLocationAttribute)
__all__ = ['HADec']
doc_components = """
ha : `~astropy.coordinates.Angle`, optional, keyword-only
The Hour Angle for this object (``dec`` must also be given and
``representation`` must be None).
dec : `~astropy.coordinates.Angle`, optional, keyword-only
The Declination for this object (``ha`` must also be given and
``representation`` must be None).
distance : `~astropy.units.Quantity` ['length'], optional, keyword-only
The Distance for this object along the line-of-sight.
pm_ha_cosdec : `~astropy.units.Quantity` ['angular speed'], optional, keyword-only
The proper motion in hour angle (including the ``cos(dec)`` factor) for
this object (``pm_dec`` must also be given).
pm_dec : `~astropy.units.Quantity` ['angular speed'], optional, keyword-only
The proper motion in declination for this object (``pm_ha_cosdec`` must
also be given).
radial_velocity : `~astropy.units.Quantity` ['speed'], optional, keyword-only
The radial velocity of this object."""
doc_footer = """
Other parameters
----------------
obstime : `~astropy.time.Time`
The time at which the observation is taken. Used for determining the
position and orientation of the Earth.
location : `~astropy.coordinates.EarthLocation`
The location on the Earth. This can be specified either as an
`~astropy.coordinates.EarthLocation` object or as anything that can be
transformed to an `~astropy.coordinates.ITRS` frame.
pressure : `~astropy.units.Quantity` ['pressure']
The atmospheric pressure as an `~astropy.units.Quantity` with pressure
units. This is necessary for performing refraction corrections.
Setting this to 0 (the default) will disable refraction calculations
when transforming to/from this frame.
temperature : `~astropy.units.Quantity` ['temperature']
The ground-level temperature as an `~astropy.units.Quantity` in
deg C. This is necessary for performing refraction corrections.
relative_humidity : `~astropy.units.Quantity` ['dimensionless'] or number.
The relative humidity as a dimensionless quantity between 0 to 1.
This is necessary for performing refraction corrections.
obswl : `~astropy.units.Quantity` ['length']
The average wavelength of observations as an `~astropy.units.Quantity`
with length units. This is necessary for performing refraction
corrections.
Notes
-----
The refraction model is based on that implemented in ERFA, which is fast
but becomes inaccurate for altitudes below about 5 degrees. Near and below
altitudes of 0, it can even give meaningless answers, and in this case
transforming to HADec and back to another frame can give highly discrepant
results. For much better numerical stability, leave the ``pressure`` at
``0`` (the default), thereby disabling the refraction correction and
yielding "topocentric" equatorial coordinates.
"""
@format_doc(base_doc, components=doc_components, footer=doc_footer)
class HADec(BaseCoordinateFrame):
"""
A coordinate or frame in the Hour Angle-Declination system (Equatorial
coordinates) with respect to the WGS84 ellipsoid. Hour Angle is oriented
with respect to upper culmination such that the hour angle is negative to
the East and positive to the West.
This frame is assumed to *include* refraction effects if the ``pressure``
frame attribute is non-zero.
The frame attributes are listed under **Other Parameters**, which are
necessary for transforming from HADec to some other system.
"""
frame_specific_representation_info = {
r.SphericalRepresentation: [
RepresentationMapping('lon', 'ha', u.hourangle),
RepresentationMapping('lat', 'dec')
]
}
default_representation = r.SphericalRepresentation
default_differential = r.SphericalCosLatDifferential
obstime = TimeAttribute(default=None)
location = EarthLocationAttribute(default=None)
pressure = QuantityAttribute(default=0, unit=u.hPa)
temperature = QuantityAttribute(default=0, unit=u.deg_C)
relative_humidity = QuantityAttribute(default=0, unit=u.dimensionless_unscaled)
obswl = QuantityAttribute(default=1*u.micron, unit=u.micron)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.has_data:
self._set_data_lon_wrap_angle(self.data)
@staticmethod
def _set_data_lon_wrap_angle(data):
if hasattr(data, 'lon'):
data.lon.wrap_angle = 180. * u.deg
return data
def represent_as(self, base, s='base', in_frame_units=False):
"""
Ensure the wrap angle for any spherical
representations.
"""
data = super().represent_as(base, s, in_frame_units=in_frame_units)
self._set_data_lon_wrap_angle(data)
return data
# self-transform defined in icrs_observed_transforms.py
|
2d9c6858bf3d7b1e3c3f46d758f58388fa1e36c57905dcd1d40bbfd43070858b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from astropy.coordinates.baseframe import frame_transform_graph
from astropy.coordinates.transformations import DynamicMatrixTransform
from astropy.coordinates.matrix_utilities import matrix_product, matrix_transpose
from .fk4 import FK4NoETerms
from .fk5 import FK5
from .utils import EQUINOX_B1950, EQUINOX_J2000
# FK5 to/from FK4 ------------------->
# B1950->J2000 matrix from Murray 1989 A&A 218,325 eqn 28
_B1950_TO_J2000_M = np.array(
[[0.9999256794956877, -0.0111814832204662, -0.0048590038153592],
[0.0111814832391717, 0.9999374848933135, -0.0000271625947142],
[0.0048590037723143, -0.0000271702937440, 0.9999881946023742]])
_FK4_CORR = np.array(
[[-0.0026455262, -1.1539918689, +2.1111346190],
[+1.1540628161, -0.0129042997, +0.0236021478],
[-2.1112979048, -0.0056024448, +0.0102587734]]) * 1.e-6
def _fk4_B_matrix(obstime):
"""
This is a correction term in the FK4 transformations because FK4 is a
rotating system - see Murray 89 eqn 29
"""
# Note this is *julian century*, not besselian
T = (obstime.jyear - 1950.) / 100.
if getattr(T, 'shape', ()):
# Ensure we broadcast possibly arrays of times properly.
T.shape += (1, 1)
return _B1950_TO_J2000_M + _FK4_CORR * T
# This transformation can't be static because the observation date is needed.
@frame_transform_graph.transform(DynamicMatrixTransform, FK4NoETerms, FK5)
def fk4_no_e_to_fk5(fk4noecoord, fk5frame):
# Correction terms for FK4 being a rotating system
B = _fk4_B_matrix(fk4noecoord.obstime)
# construct both precession matricies - if the equinoxes are B1950 and
# J2000, these are just identity matricies
pmat1 = fk4noecoord._precession_matrix(fk4noecoord.equinox, EQUINOX_B1950)
pmat2 = fk5frame._precession_matrix(EQUINOX_J2000, fk5frame.equinox)
return matrix_product(pmat2, B, pmat1)
# This transformation can't be static because the observation date is needed.
@frame_transform_graph.transform(DynamicMatrixTransform, FK5, FK4NoETerms)
def fk5_to_fk4_no_e(fk5coord, fk4noeframe):
# Get transposed version of the rotating correction terms... so with the
# transpose this takes us from FK5/J200 to FK4/B1950
B = matrix_transpose(_fk4_B_matrix(fk4noeframe.obstime))
# construct both precession matricies - if the equinoxes are B1950 and
# J2000, these are just identity matricies
pmat1 = fk5coord._precession_matrix(fk5coord.equinox, EQUINOX_J2000)
pmat2 = fk4noeframe._precession_matrix(EQUINOX_B1950, fk4noeframe.equinox)
return matrix_product(pmat2, B, pmat1)
|
e417ee850c4fd437524d0545ea8afa9f3aed09996e959b39c1fabf2274eec441 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy import units as u
from astropy.utils.decorators import format_doc
from astropy.time import Time
from astropy.coordinates import representation as r
from astropy.coordinates.baseframe import (BaseCoordinateFrame,
RepresentationMapping,
frame_transform_graph, base_doc)
from astropy.coordinates.transformations import AffineTransform
from astropy.coordinates.attributes import DifferentialAttribute
from .baseradec import BaseRADecFrame, doc_components as doc_components_radec
from .icrs import ICRS
from .galactic import Galactic
# For speed
J2000 = Time('J2000')
v_bary_Schoenrich2010 = r.CartesianDifferential([11.1, 12.24, 7.25]*u.km/u.s)
__all__ = ['LSR', 'GalacticLSR', 'LSRK', 'LSRD']
doc_footer_lsr = """
Other parameters
----------------
v_bary : `~astropy.coordinates.representation.CartesianDifferential`
The velocity of the solar system barycenter with respect to the LSR, in
Galactic cartesian velocity components.
"""
@format_doc(base_doc, components=doc_components_radec, footer=doc_footer_lsr)
class LSR(BaseRADecFrame):
r"""A coordinate or frame in the Local Standard of Rest (LSR).
This coordinate frame is axis-aligned and co-spatial with `ICRS`, but has
a velocity offset relative to the solar system barycenter to remove the
peculiar motion of the sun relative to the LSR. Roughly, the LSR is the mean
velocity of the stars in the solar neighborhood, but the precise definition
of which depends on the study. As defined in Schönrich et al. (2010):
"The LSR is the rest frame at the location of the Sun of a star that would
be on a circular orbit in the gravitational potential one would obtain by
azimuthally averaging away non-axisymmetric features in the actual Galactic
potential." No such orbit truly exists, but it is still a commonly used
velocity frame.
We use default values from Schönrich et al. (2010) for the barycentric
velocity relative to the LSR, which is defined in Galactic (right-handed)
cartesian velocity components
:math:`(U, V, W) = (11.1, 12.24, 7.25)~{{\rm km}}~{{\rm s}}^{{-1}}`. These
values are customizable via the ``v_bary`` argument which specifies the
velocity of the solar system barycenter with respect to the LSR.
The frame attributes are listed under **Other Parameters**.
"""
# frame attributes:
v_bary = DifferentialAttribute(default=v_bary_Schoenrich2010,
allowed_classes=[r.CartesianDifferential])
@frame_transform_graph.transform(AffineTransform, ICRS, LSR)
def icrs_to_lsr(icrs_coord, lsr_frame):
v_bary_gal = Galactic(lsr_frame.v_bary.to_cartesian())
v_bary_icrs = v_bary_gal.transform_to(icrs_coord)
v_offset = v_bary_icrs.data.represent_as(r.CartesianDifferential)
offset = r.CartesianRepresentation([0, 0, 0]*u.au, differentials=v_offset)
return None, offset
@frame_transform_graph.transform(AffineTransform, LSR, ICRS)
def lsr_to_icrs(lsr_coord, icrs_frame):
v_bary_gal = Galactic(lsr_coord.v_bary.to_cartesian())
v_bary_icrs = v_bary_gal.transform_to(icrs_frame)
v_offset = v_bary_icrs.data.represent_as(r.CartesianDifferential)
offset = r.CartesianRepresentation([0, 0, 0]*u.au, differentials=-v_offset)
return None, offset
# ------------------------------------------------------------------------------
doc_components_gal = """
l : `~astropy.coordinates.Angle`, optional, keyword-only
The Galactic longitude for this object (``b`` must also be given and
``representation`` must be None).
b : `~astropy.coordinates.Angle`, optional, keyword-only
The Galactic latitude for this object (``l`` must also be given and
``representation`` must be None).
distance : `~astropy.units.Quantity` ['length'], optional, keyword-only
The Distance for this object along the line-of-sight.
(``representation`` must be None).
pm_l_cosb : `~astropy.units.Quantity` ['angular speed'], optional, keyword-only
The proper motion in Galactic longitude (including the ``cos(b)`` term)
for this object (``pm_b`` must also be given).
pm_b : `~astropy.units.Quantity` ['angular speed'], optional, keyword-only
The proper motion in Galactic latitude for this object (``pm_l_cosb``
must also be given).
radial_velocity : `~astropy.units.Quantity` ['speed'], optional, keyword-only
The radial velocity of this object.
"""
@format_doc(base_doc, components=doc_components_gal, footer=doc_footer_lsr)
class GalacticLSR(BaseCoordinateFrame):
r"""A coordinate or frame in the Local Standard of Rest (LSR), axis-aligned
to the `Galactic` frame.
This coordinate frame is axis-aligned and co-spatial with `ICRS`, but has
a velocity offset relative to the solar system barycenter to remove the
peculiar motion of the sun relative to the LSR. Roughly, the LSR is the mean
velocity of the stars in the solar neighborhood, but the precise definition
of which depends on the study. As defined in Schönrich et al. (2010):
"The LSR is the rest frame at the location of the Sun of a star that would
be on a circular orbit in the gravitational potential one would obtain by
azimuthally averaging away non-axisymmetric features in the actual Galactic
potential." No such orbit truly exists, but it is still a commonly used
velocity frame.
We use default values from Schönrich et al. (2010) for the barycentric
velocity relative to the LSR, which is defined in Galactic (right-handed)
cartesian velocity components
:math:`(U, V, W) = (11.1, 12.24, 7.25)~{{\rm km}}~{{\rm s}}^{{-1}}`. These
values are customizable via the ``v_bary`` argument which specifies the
velocity of the solar system barycenter with respect to the LSR.
The frame attributes are listed under **Other Parameters**.
"""
frame_specific_representation_info = {
r.SphericalRepresentation: [
RepresentationMapping('lon', 'l'),
RepresentationMapping('lat', 'b')
]
}
default_representation = r.SphericalRepresentation
default_differential = r.SphericalCosLatDifferential
# frame attributes:
v_bary = DifferentialAttribute(default=v_bary_Schoenrich2010)
@frame_transform_graph.transform(AffineTransform, Galactic, GalacticLSR)
def galactic_to_galacticlsr(galactic_coord, lsr_frame):
v_bary_gal = Galactic(lsr_frame.v_bary.to_cartesian())
v_offset = v_bary_gal.data.represent_as(r.CartesianDifferential)
offset = r.CartesianRepresentation([0, 0, 0]*u.au, differentials=v_offset)
return None, offset
@frame_transform_graph.transform(AffineTransform, GalacticLSR, Galactic)
def galacticlsr_to_galactic(lsr_coord, galactic_frame):
v_bary_gal = Galactic(lsr_coord.v_bary.to_cartesian())
v_offset = v_bary_gal.data.represent_as(r.CartesianDifferential)
offset = r.CartesianRepresentation([0, 0, 0]*u.au, differentials=-v_offset)
return None, offset
# ------------------------------------------------------------------------------
# The LSRK velocity frame, defined as having a velocity of 20 km/s towards
# RA=270 Dec=30 (B1900) relative to the solar system Barycenter. This is defined
# in:
#
# Gordon 1975, Methods of Experimental Physics: Volume 12:
# Astrophysics, Part C: Radio Observations - Section 6.1.5.
class LSRK(BaseRADecFrame):
r"""
A coordinate or frame in the Kinematic Local Standard of Rest (LSR).
This frame is defined as having a velocity of 20 km/s towards RA=270 Dec=30
(B1900) relative to the solar system Barycenter. This is defined in:
Gordon 1975, Methods of Experimental Physics: Volume 12:
Astrophysics, Part C: Radio Observations - Section 6.1.5.
This coordinate frame is axis-aligned and co-spatial with `ICRS`, but has
a velocity offset relative to the solar system barycenter to remove the
peculiar motion of the sun relative to the LSRK.
"""
# NOTE: To avoid a performance penalty at import time, we hard-code the ICRS
# offsets here. The code to generate the offsets is provided for reproducibility.
# GORDON1975_V_BARY = 20*u.km/u.s
# GORDON1975_DIRECTION = FK4(ra=270*u.deg, dec=30*u.deg, equinox='B1900')
# V_OFFSET_LSRK = ((GORDON1975_V_BARY * GORDON1975_DIRECTION.transform_to(ICRS()).data)
# .represent_as(r.CartesianDifferential))
V_OFFSET_LSRK = r.CartesianDifferential([0.28999706839034606,
-17.317264789717928,
10.00141199546947]*u.km/u.s)
ICRS_LSRK_OFFSET = r.CartesianRepresentation([0, 0, 0]*u.au, differentials=V_OFFSET_LSRK)
LSRK_ICRS_OFFSET = r.CartesianRepresentation([0, 0, 0]*u.au, differentials=-V_OFFSET_LSRK)
@frame_transform_graph.transform(AffineTransform, ICRS, LSRK)
def icrs_to_lsrk(icrs_coord, lsr_frame):
return None, ICRS_LSRK_OFFSET
@frame_transform_graph.transform(AffineTransform, LSRK, ICRS)
def lsrk_to_icrs(lsr_coord, icrs_frame):
return None, LSRK_ICRS_OFFSET
# ------------------------------------------------------------------------------
# The LSRD velocity frame, defined as a velocity of U=9 km/s, V=12 km/s,
# and W=7 km/s in Galactic coordinates or 16.552945 km/s
# towards l=53.13 b=25.02. This is defined in:
#
# Delhaye 1965, Solar Motion and Velocity Distribution of
# Common Stars.
class LSRD(BaseRADecFrame):
r"""
A coordinate or frame in the Dynamical Local Standard of Rest (LSRD)
This frame is defined as a velocity of U=9 km/s, V=12 km/s,
and W=7 km/s in Galactic coordinates or 16.552945 km/s
towards l=53.13 b=25.02. This is defined in:
Delhaye 1965, Solar Motion and Velocity Distribution of
Common Stars.
This coordinate frame is axis-aligned and co-spatial with `ICRS`, but has
a velocity offset relative to the solar system barycenter to remove the
peculiar motion of the sun relative to the LSRD.
"""
# NOTE: To avoid a performance penalty at import time, we hard-code the ICRS
# offsets here. The code to generate the offsets is provided for reproducibility.
# V_BARY_DELHAYE1965 = r.CartesianDifferential([9, 12, 7] * u.km/u.s)
# V_OFFSET_LSRD = (Galactic(V_BARY_DELHAYE1965.to_cartesian()).transform_to(ICRS()).data
# .represent_as(r.CartesianDifferential))
V_OFFSET_LSRD = r.CartesianDifferential([-0.6382306360182073,
-14.585424483191094,
7.8011572411006815]*u.km/u.s)
ICRS_LSRD_OFFSET = r.CartesianRepresentation([0, 0, 0]*u.au, differentials=V_OFFSET_LSRD)
LSRD_ICRS_OFFSET = r.CartesianRepresentation([0, 0, 0]*u.au, differentials=-V_OFFSET_LSRD)
@frame_transform_graph.transform(AffineTransform, ICRS, LSRD)
def icrs_to_lsrd(icrs_coord, lsr_frame):
return None, ICRS_LSRD_OFFSET
@frame_transform_graph.transform(AffineTransform, LSRD, ICRS)
def lsrd_to_icrs(lsr_coord, icrs_frame):
return None, LSRD_ICRS_OFFSET
# ------------------------------------------------------------------------------
# Create loopback transformations
frame_transform_graph._add_merged_transform(LSR, ICRS, LSR)
frame_transform_graph._add_merged_transform(GalacticLSR, Galactic, GalacticLSR)
|
884f38f1d60e583f31152f6f7c26cbd55845a3681e70d0e94fe006a9cf9d756e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy.coordinates.matrix_utilities import (rotation_matrix,
matrix_product,
matrix_transpose)
from astropy.coordinates.baseframe import frame_transform_graph
from astropy.coordinates.transformations import DynamicMatrixTransform
from .fk5 import FK5
from .fk4 import FK4NoETerms
from .utils import EQUINOX_B1950, EQUINOX_J2000
from .galactic import Galactic
# Galactic to/from FK4/FK5 ----------------------->
# can't be static because the equinox is needed
@frame_transform_graph.transform(DynamicMatrixTransform, FK5, Galactic)
def fk5_to_gal(fk5coord, galframe):
# need precess to J2000 first
pmat = fk5coord._precession_matrix(fk5coord.equinox, EQUINOX_J2000)
mat1 = rotation_matrix(180 - Galactic._lon0_J2000.degree, 'z')
mat2 = rotation_matrix(90 - Galactic._ngp_J2000.dec.degree, 'y')
mat3 = rotation_matrix(Galactic._ngp_J2000.ra.degree, 'z')
return matrix_product(mat1, mat2, mat3, pmat)
@frame_transform_graph.transform(DynamicMatrixTransform, Galactic, FK5)
def _gal_to_fk5(galcoord, fk5frame):
return matrix_transpose(fk5_to_gal(fk5frame, galcoord))
@frame_transform_graph.transform(DynamicMatrixTransform, FK4NoETerms, Galactic)
def fk4_to_gal(fk4coords, galframe):
mat1 = rotation_matrix(180 - Galactic._lon0_B1950.degree, 'z')
mat2 = rotation_matrix(90 - Galactic._ngp_B1950.dec.degree, 'y')
mat3 = rotation_matrix(Galactic._ngp_B1950.ra.degree, 'z')
matprec = fk4coords._precession_matrix(fk4coords.equinox, EQUINOX_B1950)
return matrix_product(mat1, mat2, mat3, matprec)
@frame_transform_graph.transform(DynamicMatrixTransform, Galactic, FK4NoETerms)
def gal_to_fk4(galcoords, fk4frame):
return matrix_transpose(fk4_to_gal(fk4frame, galcoords))
|
90bb2888a4951b00d9440b2526e7092af1c40fd2744bf82bda5de478a82ab1a7 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy import units as u
from astropy.utils.decorators import format_doc
from astropy.coordinates.angles import Angle
from astropy.coordinates import representation as r
from astropy.coordinates.baseframe import BaseCoordinateFrame, RepresentationMapping, base_doc
# these are needed for defining the NGP
from .fk5 import FK5
from .fk4 import FK4NoETerms
__all__ = ['Galactic']
doc_components = """
l : `~astropy.coordinates.Angle`, optional, keyword-only
The Galactic longitude for this object (``b`` must also be given and
``representation`` must be None).
b : `~astropy.coordinates.Angle`, optional, keyword-only
The Galactic latitude for this object (``l`` must also be given and
``representation`` must be None).
distance : `~astropy.units.Quantity` ['length'], optional, keyword-only
The Distance for this object along the line-of-sight.
pm_l_cosb : `~astropy.units.Quantity` ['angular speed'], optional, keyword-only
The proper motion in Galactic longitude (including the ``cos(b)`` term)
for this object (``pm_b`` must also be given).
pm_b : `~astropy.units.Quantity` ['angular speed'], optional, keyword-only
The proper motion in Galactic latitude for this object (``pm_l_cosb``
must also be given).
radial_velocity : `~astropy.units.Quantity` ['speed'], optional, keyword-only
The radial velocity of this object.
"""
doc_footer = """
Notes
-----
.. [1] Blaauw, A.; Gum, C. S.; Pawsey, J. L.; Westerhout, G. (1960), "The
new I.A.U. system of galactic coordinates (1958 revision),"
`MNRAS, Vol 121, pp.123 <https://ui.adsabs.harvard.edu/abs/1960MNRAS.121..123B>`_.
"""
@format_doc(base_doc, components=doc_components, footer=doc_footer)
class Galactic(BaseCoordinateFrame):
"""
A coordinate or frame in the Galactic coordinate system.
This frame is used in a variety of Galactic contexts because it has as its
x-y plane the plane of the Milky Way. The positive x direction (i.e., the
l=0, b=0 direction) points to the center of the Milky Way and the z-axis
points toward the North Galactic Pole (following the IAU's 1958 definition
[1]_). However, unlike the `~astropy.coordinates.Galactocentric` frame, the
*origin* of this frame in 3D space is the solar system barycenter, not
the center of the Milky Way.
"""
frame_specific_representation_info = {
r.SphericalRepresentation: [
RepresentationMapping('lon', 'l'),
RepresentationMapping('lat', 'b')
],
r.CartesianRepresentation: [
RepresentationMapping('x', 'u'),
RepresentationMapping('y', 'v'),
RepresentationMapping('z', 'w')
],
r.CartesianDifferential: [
RepresentationMapping('d_x', 'U', u.km/u.s),
RepresentationMapping('d_y', 'V', u.km/u.s),
RepresentationMapping('d_z', 'W', u.km/u.s)
]
}
default_representation = r.SphericalRepresentation
default_differential = r.SphericalCosLatDifferential
# North galactic pole and zeropoint of l in FK4/FK5 coordinates. Needed for
# transformations to/from FK4/5
# These are from the IAU's definition of galactic coordinates
_ngp_B1950 = FK4NoETerms(ra=192.25*u.degree, dec=27.4*u.degree)
_lon0_B1950 = Angle(123, u.degree)
# These are *not* from Reid & Brunthaler 2004 - instead, they were
# derived by doing:
#
# >>> FK4NoETerms(ra=192.25*u.degree, dec=27.4*u.degree).transform_to(FK5())
#
# This gives better consistency with other codes than using the values
# from Reid & Brunthaler 2004 and the best self-consistency between FK5
# -> Galactic and FK5 -> FK4 -> Galactic. The lon0 angle was found by
# optimizing the self-consistency.
_ngp_J2000 = FK5(ra=192.8594812065348*u.degree, dec=27.12825118085622*u.degree)
_lon0_J2000 = Angle(122.9319185680026, u.degree)
|
9eb3283b936245259ea8d118871f746909cd21d83eed2d49436e05355da47196 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy.coordinates.matrix_utilities import (rotation_matrix,
matrix_product,
matrix_transpose)
from astropy.coordinates.baseframe import frame_transform_graph
from astropy.coordinates.transformations import DynamicMatrixTransform
from .fk5 import FK5
from .icrs import ICRS
from .utils import EQUINOX_J2000
def _icrs_to_fk5_matrix():
"""
B-matrix from USNO circular 179. Used by the ICRS->FK5 transformation
functions.
"""
eta0 = -19.9 / 3600000.
xi0 = 9.1 / 3600000.
da0 = -22.9 / 3600000.
m1 = rotation_matrix(-eta0, 'x')
m2 = rotation_matrix(xi0, 'y')
m3 = rotation_matrix(da0, 'z')
return matrix_product(m1, m2, m3)
# define this here because it only needs to be computed once
_ICRS_TO_FK5_J2000_MAT = _icrs_to_fk5_matrix()
@frame_transform_graph.transform(DynamicMatrixTransform, ICRS, FK5)
def icrs_to_fk5(icrscoord, fk5frame):
# ICRS is by design very close to J2000 equinox
pmat = fk5frame._precession_matrix(EQUINOX_J2000, fk5frame.equinox)
return matrix_product(pmat, _ICRS_TO_FK5_J2000_MAT)
# can't be static because the equinox is needed
@frame_transform_graph.transform(DynamicMatrixTransform, FK5, ICRS)
def fk5_to_icrs(fk5coord, icrsframe):
# ICRS is by design very close to J2000 equinox
pmat = fk5coord._precession_matrix(fk5coord.equinox, EQUINOX_J2000)
return matrix_product(matrix_transpose(_ICRS_TO_FK5_J2000_MAT), pmat)
|
753d12a6e4b34519933ca1db4524a47c2b91c44402929632f1a8245c034e6eda | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy.utils.decorators import format_doc
from astropy.coordinates.attributes import (TimeAttribute,
EarthLocationAttribute)
from astropy.coordinates.baseframe import base_doc
from .baseradec import doc_components, BaseRADecFrame
from .utils import DEFAULT_OBSTIME, EARTH_CENTER
__all__ = ['CIRS']
doc_footer = """
Other parameters
----------------
obstime : `~astropy.time.Time`
The time at which the observation is taken. Used for determining the
position of the Earth and its precession.
location : `~astropy.coordinates.EarthLocation`
The location on the Earth. This can be specified either as an
`~astropy.coordinates.EarthLocation` object or as anything that can be
transformed to an `~astropy.coordinates.ITRS` frame. The default is the
centre of the Earth.
"""
@format_doc(base_doc, components=doc_components, footer=doc_footer)
class CIRS(BaseRADecFrame):
"""
A coordinate or frame in the Celestial Intermediate Reference System (CIRS).
The frame attributes are listed under **Other Parameters**.
"""
obstime = TimeAttribute(default=DEFAULT_OBSTIME)
location = EarthLocationAttribute(default=EARTH_CENTER)
# The "self-transform" is defined in icrs_cirs_transformations.py, because in
# the current implementation it goes through ICRS (like GCRS)
|
a80e6045288fd1d21e2458d4928549c3bba8f07b0b912c322c0ab37761c1ce60 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Coordinate frames tied to the Equator and Equinox of Earth.
TEME is a True equator, Mean Equinox coordinate frame used in NORAD TLE
satellite files.
TETE is a True equator, True Equinox coordinate frame often called the
"apparent" coordinates. It is the same frame as used by JPL Horizons
and can be combined with Local Apparent Sidereal Time to calculate the
hour angle.
"""
from astropy.utils.decorators import format_doc
from astropy.coordinates.representation import (CartesianRepresentation, CartesianDifferential)
from astropy.coordinates.baseframe import BaseCoordinateFrame, base_doc
from astropy.coordinates.builtin_frames.baseradec import BaseRADecFrame, doc_components
from astropy.coordinates.attributes import TimeAttribute, EarthLocationAttribute
from .utils import DEFAULT_OBSTIME, EARTH_CENTER
__all__ = ['TEME', 'TETE']
doc_footer_teme = """
Other parameters
----------------
obstime : `~astropy.time.Time`
The time at which the frame is defined. Used for determining the
position of the Earth.
"""
doc_footer_tete = """
Other parameters
----------------
obstime : `~astropy.time.Time`
The time at which the observation is taken. Used for determining the
position of the Earth.
location : `~astropy.coordinates.EarthLocation`
The location on the Earth. This can be specified either as an
`~astropy.coordinates.EarthLocation` object or as anything that can be
transformed to an `~astropy.coordinates.ITRS` frame. The default is the
centre of the Earth.
"""
@format_doc(base_doc, components=doc_components, footer=doc_footer_tete)
class TETE(BaseRADecFrame):
"""
An equatorial coordinate or frame using the True Equator and True Equinox (TETE).
Equatorial coordinate frames measure RA with respect to the equinox and declination
with with respect to the equator. The location of the equinox and equator vary due
the gravitational torques on the oblate Earth. This variation is split into precession
and nutation, although really they are two aspects of a single phenomena. The smooth,
long term variation is known as precession, whilst smaller, periodic components are
called nutation.
Calculation of the true equator and equinox involves the application of both precession
and nutation, whilst only applying precession gives a mean equator and equinox.
TETE coordinates are often referred to as "apparent" coordinates, or
"apparent place". TETE is the apparent coordinate system used by JPL Horizons
and is the correct coordinate system to use when combining the right ascension
with local apparent sidereal time to calculate the apparent (TIRS) hour angle.
For more background on TETE, see the references provided in the
:ref:`astropy:astropy-coordinates-seealso` section of the documentation.
Of particular note are Sections 5 and 6 of
`USNO Circular 179 <https://arxiv.org/abs/astro-ph/0602086>`_) and
especially the diagram at the top of page 57.
This frame also includes frames that are defined *relative* to the center of the Earth,
but that are offset (in both position and velocity) from the center of the Earth. You
may see such non-geocentric coordinates referred to as "topocentric".
The frame attributes are listed under **Other Parameters**.
"""
obstime = TimeAttribute(default=DEFAULT_OBSTIME)
location = EarthLocationAttribute(default=EARTH_CENTER)
# Self transform goes through ICRS and is defined in icrs_cirs_transforms.py
@format_doc(base_doc, components="", footer=doc_footer_teme)
class TEME(BaseCoordinateFrame):
"""
A coordinate or frame in the True Equator Mean Equinox frame (TEME).
This frame is a geocentric system similar to CIRS or geocentric apparent place,
except that the mean sidereal time is used to rotate from TIRS. TEME coordinates
are most often used in combination with orbital data for satellites in the
two-line-ephemeris format.
Different implementations of the TEME frame exist. For clarity, this frame follows the
conventions and relations to other frames that are set out in Vallado et al (2006).
For more background on TEME, see the references provided in the
:ref:`astropy:astropy-coordinates-seealso` section of the documentation.
"""
default_representation = CartesianRepresentation
default_differential = CartesianDifferential
obstime = TimeAttribute()
# Transformation functions for getting to/from TEME and ITRS are in
# intermediate rotation transforms.py
|
d52e871639b9b67fc3d194da557a648c00d1eae999fb38120367817d7be92111 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from copy import deepcopy
import numpy as np
import pytest
import re
from astropy import units as u
from astropy.units import allclose
from astropy.tests.helper import assert_quantity_allclose as assert_allclose
from astropy.utils.exceptions import AstropyWarning
from astropy.time import Time
from astropy.coordinates import (
EarthLocation,
galactocentric_frame_defaults,
representation as r,
SkyCoord,
)
from astropy.coordinates.attributes import (
Attribute,
CoordinateAttribute,
DifferentialAttribute,
EarthLocationAttribute,
QuantityAttribute,
TimeAttribute,
)
from astropy.coordinates.baseframe import (
BaseCoordinateFrame,
RepresentationMapping
)
from astropy.coordinates.builtin_frames import (
AltAz,
HADec,
FK4,
FK5,
Galactic,
Galactocentric,
GCRS,
HCRS,
ICRS,
ITRS
)
from astropy.coordinates.representation import (
CartesianDifferential,
REPRESENTATION_CLASSES,
)
from .test_representation import unitphysics # this fixture is used below # noqa
def setup_function(func):
"""Copy original 'REPRESENTATIONCLASSES' as attribute in function."""
func.REPRESENTATION_CLASSES_ORIG = deepcopy(REPRESENTATION_CLASSES)
def teardown_function(func):
"""Reset REPRESENTATION_CLASSES to original value."""
REPRESENTATION_CLASSES.clear()
REPRESENTATION_CLASSES.update(func.REPRESENTATION_CLASSES_ORIG)
def test_frame_attribute_descriptor():
"""Unit tests of the Attribute descriptor."""
class TestAttributes:
attr_none = Attribute()
attr_2 = Attribute(default=2)
attr_3_attr2 = Attribute(default=3, secondary_attribute='attr_2')
attr_none_attr2 = Attribute(default=None, secondary_attribute='attr_2')
attr_none_nonexist = Attribute(
default=None, secondary_attribute='nonexist'
)
t = TestAttributes()
# Defaults
assert t.attr_none is None
assert t.attr_2 == 2
assert t.attr_3_attr2 == 3
assert t.attr_none_attr2 == t.attr_2
assert t.attr_none_nonexist is None # No default and non-existent secondary attr
# Setting values via '_'-prefixed internal vars
# (as would normally done in __init__)
t._attr_none = 10
assert t.attr_none == 10
t._attr_2 = 20
assert t.attr_2 == 20
assert t.attr_3_attr2 == 3
assert t.attr_none_attr2 == t.attr_2
t._attr_none_attr2 = 40
assert t.attr_none_attr2 == 40
# Make sure setting values via public attribute fails
with pytest.raises(AttributeError) as err:
t.attr_none = 5
assert 'Cannot set frame attribute' in str(err.value)
def test_frame_subclass_attribute_descriptor():
"""Unit test of the attribute descriptors in subclasses."""
_EQUINOX_B1980 = Time('B1980', scale='tai')
class MyFK4(FK4):
# equinox inherited from FK4, obstime overridden, and newattr is new
obstime = TimeAttribute(default=_EQUINOX_B1980)
newattr = Attribute(default='newattr')
mfk4 = MyFK4()
assert mfk4.equinox.value == 'B1950.000'
assert mfk4.obstime.value == 'B1980.000'
assert mfk4.newattr == 'newattr'
assert set(mfk4.get_frame_attr_names()) == {'equinox', 'obstime', 'newattr'}
mfk4 = MyFK4(equinox='J1980.0', obstime='J1990.0', newattr='world')
assert mfk4.equinox.value == 'J1980.000'
assert mfk4.obstime.value == 'J1990.000'
assert mfk4.newattr == 'world'
def test_frame_multiple_inheritance_attribute_descriptor():
"""
Ensure that all attributes are accumulated in case of inheritance from
multiple BaseCoordinateFrames. See
https://github.com/astropy/astropy/pull/11099#issuecomment-735829157
"""
class Frame1(BaseCoordinateFrame):
attr1 = Attribute()
class Frame2(BaseCoordinateFrame):
attr2 = Attribute()
class Frame3(Frame1, Frame2):
pass
assert len(Frame3.frame_attributes) == 2
assert 'attr1' in Frame3.frame_attributes
assert 'attr2' in Frame3.frame_attributes
# In case the same attribute exists in both frames, the one from the
# left-most class in the MRO should take precedence
class Frame4(BaseCoordinateFrame):
attr1 = Attribute()
attr2 = Attribute()
class Frame5(Frame1, Frame4):
pass
assert Frame5.frame_attributes['attr1'] is Frame1.frame_attributes['attr1']
assert Frame5.frame_attributes['attr2'] is Frame4.frame_attributes['attr2']
def test_differentialattribute():
# Test logic of passing input through to allowed class
vel = [1, 2, 3]*u.km/u.s
dif = r.CartesianDifferential(vel)
class TestFrame(BaseCoordinateFrame):
attrtest = DifferentialAttribute(
default=dif, allowed_classes=[r.CartesianDifferential])
frame1 = TestFrame()
frame2 = TestFrame(attrtest=dif)
frame3 = TestFrame(attrtest=vel)
assert np.all(frame1.attrtest.d_xyz == frame2.attrtest.d_xyz)
assert np.all(frame1.attrtest.d_xyz == frame3.attrtest.d_xyz)
# This shouldn't work if there is more than one allowed class:
class TestFrame2(BaseCoordinateFrame):
attrtest = DifferentialAttribute(
default=dif, allowed_classes=[r.CartesianDifferential,
r.CylindricalDifferential])
frame1 = TestFrame2()
frame2 = TestFrame2(attrtest=dif)
with pytest.raises(TypeError):
TestFrame2(attrtest=vel)
def test_create_data_frames():
# from repr
i1 = ICRS(r.SphericalRepresentation(1*u.deg, 2*u.deg, 3*u.kpc))
i2 = ICRS(r.UnitSphericalRepresentation(lon=1*u.deg, lat=2*u.deg))
# from preferred name
i3 = ICRS(ra=1*u.deg, dec=2*u.deg, distance=3*u.kpc)
i4 = ICRS(ra=1*u.deg, dec=2*u.deg)
assert i1.data.lat == i3.data.lat
assert i1.data.lon == i3.data.lon
assert i1.data.distance == i3.data.distance
assert i2.data.lat == i4.data.lat
assert i2.data.lon == i4.data.lon
# now make sure the preferred names work as properties
assert_allclose(i1.ra, i3.ra)
assert_allclose(i2.ra, i4.ra)
assert_allclose(i1.distance, i3.distance)
with pytest.raises(AttributeError):
i1.ra = [11.]*u.deg
def test_create_orderered_data():
TOL = 1e-10*u.deg
i = ICRS(1*u.deg, 2*u.deg)
assert (i.ra - 1*u.deg) < TOL
assert (i.dec - 2*u.deg) < TOL
g = Galactic(1*u.deg, 2*u.deg)
assert (g.l - 1*u.deg) < TOL
assert (g.b - 2*u.deg) < TOL
a = AltAz(1*u.deg, 2*u.deg)
assert (a.az - 1*u.deg) < TOL
assert (a.alt - 2*u.deg) < TOL
with pytest.raises(TypeError):
ICRS(1*u.deg, 2*u.deg, 1*u.deg, 2*u.deg)
with pytest.raises(TypeError):
sph = r.SphericalRepresentation(1*u.deg, 2*u.deg, 3*u.kpc)
ICRS(sph, 1*u.deg, 2*u.deg)
def test_create_nodata_frames():
i = ICRS()
assert len(i.get_frame_attr_names()) == 0
f5 = FK5()
assert f5.equinox == FK5.get_frame_attr_names()['equinox']
f4 = FK4()
assert f4.equinox == FK4.get_frame_attr_names()['equinox']
# obstime is special because it's a property that uses equinox if obstime is not set
assert f4.obstime in (FK4.get_frame_attr_names()['obstime'],
FK4.get_frame_attr_names()['equinox'])
def test_no_data_nonscalar_frames():
a1 = AltAz(obstime=Time('2012-01-01') + np.arange(10.) * u.day,
temperature=np.ones((3, 1)) * u.deg_C)
assert a1.obstime.shape == (3, 10)
assert a1.temperature.shape == (3, 10)
assert a1.shape == (3, 10)
with pytest.raises(ValueError) as exc:
AltAz(obstime=Time('2012-01-01') + np.arange(10.) * u.day,
temperature=np.ones((3,)) * u.deg_C)
assert 'inconsistent shapes' in str(exc.value)
def test_frame_repr():
i = ICRS()
assert repr(i) == '<ICRS Frame>'
f5 = FK5()
assert repr(f5).startswith('<FK5 Frame (equinox=')
i2 = ICRS(ra=1*u.deg, dec=2*u.deg)
i3 = ICRS(ra=1*u.deg, dec=2*u.deg, distance=3*u.kpc)
assert repr(i2) == ('<ICRS Coordinate: (ra, dec) in deg\n'
' (1., 2.)>')
assert repr(i3) == ('<ICRS Coordinate: (ra, dec, distance) in (deg, deg, kpc)\n'
' (1., 2., 3.)>')
# try with arrays
i2 = ICRS(ra=[1.1, 2.1]*u.deg, dec=[2.1, 3.1]*u.deg)
i3 = ICRS(ra=[1.1, 2.1]*u.deg, dec=[-15.6, 17.1]*u.deg, distance=[11., 21.]*u.kpc)
assert repr(i2) == ('<ICRS Coordinate: (ra, dec) in deg\n'
' [(1.1, 2.1), (2.1, 3.1)]>')
assert repr(i3) == ('<ICRS Coordinate: (ra, dec, distance) in (deg, deg, kpc)\n'
' [(1.1, -15.6, 11.), (2.1, 17.1, 21.)]>')
def test_frame_repr_vels():
i = ICRS(ra=1*u.deg, dec=2*u.deg,
pm_ra_cosdec=1*u.marcsec/u.yr, pm_dec=2*u.marcsec/u.yr)
# unit comes out as mas/yr because of the preferred units defined in the
# frame RepresentationMapping
assert repr(i) == ('<ICRS Coordinate: (ra, dec) in deg\n'
' (1., 2.)\n'
' (pm_ra_cosdec, pm_dec) in mas / yr\n'
' (1., 2.)>')
def test_converting_units():
# this is a regular expression that with split (see below) removes what's
# the decimal point to fix rounding problems
rexrepr = re.compile(r'(.*?=\d\.).*?( .*?=\d\.).*?( .*)')
# Use values that aren't subject to rounding down to X.9999...
i2 = ICRS(ra=2.*u.deg, dec=2.*u.deg)
i2_many = ICRS(ra=[2., 4.]*u.deg, dec=[2., -8.1]*u.deg)
# converting from FK5 to ICRS and back changes the *internal* representation,
# but it should still come out in the preferred form
i4 = i2.transform_to(FK5()).transform_to(ICRS())
i4_many = i2_many.transform_to(FK5()).transform_to(ICRS())
ri2 = ''.join(rexrepr.split(repr(i2)))
ri4 = ''.join(rexrepr.split(repr(i4)))
assert ri2 == ri4
assert i2.data.lon.unit != i4.data.lon.unit # Internal repr changed
ri2_many = ''.join(rexrepr.split(repr(i2_many)))
ri4_many = ''.join(rexrepr.split(repr(i4_many)))
assert ri2_many == ri4_many
assert i2_many.data.lon.unit != i4_many.data.lon.unit # Internal repr changed
# but that *shouldn't* hold if we turn off units for the representation
class FakeICRS(ICRS):
frame_specific_representation_info = {
'spherical': [RepresentationMapping('lon', 'ra', u.hourangle),
RepresentationMapping('lat', 'dec', None),
RepresentationMapping('distance', 'distance')] # should fall back to default of None unit
}
fi = FakeICRS(i4.data)
ri2 = ''.join(rexrepr.split(repr(i2)))
rfi = ''.join(rexrepr.split(repr(fi)))
rfi = re.sub('FakeICRS', 'ICRS', rfi) # Force frame name to match
assert ri2 != rfi
# the attributes should also get the right units
assert i2.dec.unit == i4.dec.unit
# unless no/explicitly given units
assert i2.dec.unit != fi.dec.unit
assert i2.ra.unit != fi.ra.unit
assert fi.ra.unit == u.hourangle
def test_representation_info():
class NewICRS1(ICRS):
frame_specific_representation_info = {
r.SphericalRepresentation: [
RepresentationMapping('lon', 'rara', u.hourangle),
RepresentationMapping('lat', 'decdec', u.degree),
RepresentationMapping('distance', 'distance', u.kpc)]
}
i1 = NewICRS1(rara=10*u.degree, decdec=-12*u.deg, distance=1000*u.pc,
pm_rara_cosdecdec=100*u.mas/u.yr,
pm_decdec=17*u.mas/u.yr,
radial_velocity=10*u.km/u.s)
assert allclose(i1.rara, 10*u.deg)
assert i1.rara.unit == u.hourangle
assert allclose(i1.decdec, -12*u.deg)
assert allclose(i1.distance, 1000*u.pc)
assert i1.distance.unit == u.kpc
assert allclose(i1.pm_rara_cosdecdec, 100*u.mas/u.yr)
assert allclose(i1.pm_decdec, 17*u.mas/u.yr)
# this should auto-set the names of UnitSpherical:
i1.set_representation_cls(r.UnitSphericalRepresentation,
s=r.UnitSphericalCosLatDifferential)
assert allclose(i1.rara, 10*u.deg)
assert allclose(i1.decdec, -12*u.deg)
assert allclose(i1.pm_rara_cosdecdec, 100*u.mas/u.yr)
assert allclose(i1.pm_decdec, 17*u.mas/u.yr)
# For backwards compatibility, we also support the string name in the
# representation info dictionary:
class NewICRS2(ICRS):
frame_specific_representation_info = {
'spherical': [
RepresentationMapping('lon', 'ang1', u.hourangle),
RepresentationMapping('lat', 'ang2', u.degree),
RepresentationMapping('distance', 'howfar', u.kpc)]
}
i2 = NewICRS2(ang1=10*u.degree, ang2=-12*u.deg, howfar=1000*u.pc)
assert allclose(i2.ang1, 10*u.deg)
assert i2.ang1.unit == u.hourangle
assert allclose(i2.ang2, -12*u.deg)
assert allclose(i2.howfar, 1000*u.pc)
assert i2.howfar.unit == u.kpc
# Test that the differential kwargs get overridden
class NewICRS3(ICRS):
frame_specific_representation_info = {
r.SphericalCosLatDifferential: [
RepresentationMapping('d_lon_coslat', 'pm_ang1', u.hourangle/u.year),
RepresentationMapping('d_lat', 'pm_ang2'),
RepresentationMapping('d_distance', 'vlos', u.kpc/u.Myr)]
}
i3 = NewICRS3(lon=10*u.degree, lat=-12*u.deg, distance=1000*u.pc,
pm_ang1=1*u.mas/u.yr, pm_ang2=2*u.mas/u.yr,
vlos=100*u.km/u.s)
assert allclose(i3.pm_ang1, 1*u.mas/u.yr)
assert i3.pm_ang1.unit == u.hourangle/u.year
assert allclose(i3.pm_ang2, 2*u.mas/u.yr)
assert allclose(i3.vlos, 100*u.km/u.s)
assert i3.vlos.unit == u.kpc/u.Myr
def test_realizing():
rep = r.SphericalRepresentation(1*u.deg, 2*u.deg, 3*u.kpc)
i = ICRS()
i2 = i.realize_frame(rep)
assert not i.has_data
assert i2.has_data
f = FK5(equinox=Time('J2001'))
f2 = f.realize_frame(rep)
assert not f.has_data
assert f2.has_data
assert f2.equinox == f.equinox
assert f2.equinox != FK5.get_frame_attr_names()['equinox']
# Check that a nicer error message is returned:
with pytest.raises(TypeError) as excinfo:
f.realize_frame(f.representation_type)
assert ('Class passed as data instead of a representation' in
excinfo.value.args[0])
def test_replicating():
i = ICRS(ra=[1]*u.deg, dec=[2]*u.deg)
icopy = i.replicate(copy=True)
irepl = i.replicate(copy=False)
i.data._lat[:] = 0*u.deg
assert np.all(i.data.lat == irepl.data.lat)
assert np.all(i.data.lat != icopy.data.lat)
iclone = i.replicate_without_data()
assert i.has_data
assert not iclone.has_data
aa = AltAz(alt=1*u.deg, az=2*u.deg, obstime=Time('J2000'))
aaclone = aa.replicate_without_data(obstime=Time('J2001'))
assert not aaclone.has_data
assert aa.obstime != aaclone.obstime
assert aa.pressure == aaclone.pressure
assert aa.obswl == aaclone.obswl
def test_getitem():
rep = r.SphericalRepresentation(
[1, 2, 3]*u.deg, [4, 5, 6]*u.deg, [7, 8, 9]*u.kpc)
i = ICRS(rep)
assert len(i.ra) == 3
iidx = i[1:]
assert len(iidx.ra) == 2
iidx2 = i[0]
assert iidx2.ra.isscalar
def test_transform():
"""
This test just makes sure the transform architecture works, but does *not*
actually test all the builtin transforms themselves are accurate.
"""
i = ICRS(ra=[1, 2]*u.deg, dec=[3, 4]*u.deg)
f = i.transform_to(FK5())
i2 = f.transform_to(ICRS())
assert i2.data.__class__ == r.UnitSphericalRepresentation
assert_allclose(i.ra, i2.ra)
assert_allclose(i.dec, i2.dec)
i = ICRS(ra=[1, 2]*u.deg, dec=[3, 4]*u.deg, distance=[5, 6]*u.kpc)
f = i.transform_to(FK5())
i2 = f.transform_to(ICRS())
assert i2.data.__class__ != r.UnitSphericalRepresentation
f = FK5(ra=1*u.deg, dec=2*u.deg, equinox=Time('J2001'))
f4 = f.transform_to(FK4())
f4_2 = f.transform_to(FK4(equinox=f.equinox))
# make sure attributes are copied over correctly
assert f4.equinox == FK4().equinox
assert f4_2.equinox == f.equinox
# make sure self-transforms also work
i = ICRS(ra=[1, 2]*u.deg, dec=[3, 4]*u.deg)
i2 = i.transform_to(ICRS())
assert_allclose(i.ra, i2.ra)
assert_allclose(i.dec, i2.dec)
f = FK5(ra=1*u.deg, dec=2*u.deg, equinox=Time('J2001'))
f2 = f.transform_to(FK5()) # default equinox, so should be *different*
assert f2.equinox == FK5().equinox
with pytest.raises(AssertionError):
assert_allclose(f.ra, f2.ra)
with pytest.raises(AssertionError):
assert_allclose(f.dec, f2.dec)
# finally, check Galactic round-tripping
i1 = ICRS(ra=[1, 2]*u.deg, dec=[3, 4]*u.deg)
i2 = i1.transform_to(Galactic()).transform_to(ICRS())
assert_allclose(i1.ra, i2.ra)
assert_allclose(i1.dec, i2.dec)
def test_transform_to_nonscalar_nodata_frame():
# https://github.com/astropy/astropy/pull/5254#issuecomment-241592353
times = Time('2016-08-23') + np.linspace(0, 10, 12)*u.day
coo1 = ICRS(ra=[[0.], [10.], [20.]]*u.deg,
dec=[[-30.], [30.], [60.]]*u.deg)
coo2 = coo1.transform_to(FK5(equinox=times))
assert coo2.shape == (3, 12)
def test_setitem_no_velocity():
"""Test different flavors of item setting for a Frame without a velocity.
"""
obstime = 'B1955'
sc0 = FK4([1, 2]*u.deg, [3, 4]*u.deg, obstime=obstime)
sc2 = FK4([10, 20]*u.deg, [30, 40]*u.deg, obstime=obstime)
sc1 = sc0.copy()
sc1_repr = repr(sc1)
assert 'representation' in sc1.cache
sc1[1] = sc2[0]
assert sc1.cache == {}
assert repr(sc2) != sc1_repr
assert np.allclose(sc1.ra.to_value(u.deg), [1, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [3, 30])
assert sc1.obstime == sc2.obstime
assert sc1.name == 'fk4'
sc1 = sc0.copy()
sc1[:] = sc2[0]
assert np.allclose(sc1.ra.to_value(u.deg), [10, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [30, 30])
sc1 = sc0.copy()
sc1[:] = sc2[:]
assert np.allclose(sc1.ra.to_value(u.deg), [10, 20])
assert np.allclose(sc1.dec.to_value(u.deg), [30, 40])
sc1 = sc0.copy()
sc1[[1, 0]] = sc2[:]
assert np.allclose(sc1.ra.to_value(u.deg), [20, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [40, 30])
# Works for array-valued obstime so long as they are considered equivalent
sc1 = FK4(sc0.ra, sc0.dec, obstime=[obstime, obstime])
sc1[0] = sc2[0]
# Multidimensional coordinates
sc1 = FK4([[1, 2], [3, 4]] * u.deg, [[5, 6], [7, 8]] * u.deg)
sc2 = FK4([[10, 20], [30, 40]] * u.deg, [[50, 60], [70, 80]] * u.deg)
sc1[0] = sc2[0]
assert np.allclose(sc1.ra.to_value(u.deg), [[10, 20], [3, 4]])
assert np.allclose(sc1.dec.to_value(u.deg), [[50, 60], [7, 8]])
def test_setitem_velocities():
"""Test different flavors of item setting for a Frame with a velocity.
"""
sc0 = FK4([1, 2]*u.deg, [3, 4]*u.deg, radial_velocity=[1, 2]*u.km/u.s,
obstime='B1950')
sc2 = FK4([10, 20]*u.deg, [30, 40]*u.deg, radial_velocity=[10, 20]*u.km/u.s,
obstime='B1950')
sc1 = sc0.copy()
sc1[1] = sc2[0]
assert np.allclose(sc1.ra.to_value(u.deg), [1, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [3, 30])
assert np.allclose(sc1.radial_velocity.to_value(u.km / u.s), [1, 10])
assert sc1.obstime == sc2.obstime
assert sc1.name == 'fk4'
sc1 = sc0.copy()
sc1[:] = sc2[0]
assert np.allclose(sc1.ra.to_value(u.deg), [10, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [30, 30])
assert np.allclose(sc1.radial_velocity.to_value(u.km / u.s), [10, 10])
sc1 = sc0.copy()
sc1[:] = sc2[:]
assert np.allclose(sc1.ra.to_value(u.deg), [10, 20])
assert np.allclose(sc1.dec.to_value(u.deg), [30, 40])
assert np.allclose(sc1.radial_velocity.to_value(u.km / u.s), [10, 20])
sc1 = sc0.copy()
sc1[[1, 0]] = sc2[:]
assert np.allclose(sc1.ra.to_value(u.deg), [20, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [40, 30])
assert np.allclose(sc1.radial_velocity.to_value(u.km / u.s), [20, 10])
def test_setitem_exceptions():
obstime = 'B1950'
sc0 = FK4([1, 2]*u.deg, [3, 4]*u.deg)
sc2 = FK4([10, 20]*u.deg, [30, 40]*u.deg, obstime=obstime)
sc1 = Galactic(sc0.ra, sc0.dec)
with pytest.raises(TypeError, match='can only set from object of same class: '
'Galactic vs. FK4'):
sc1[0] = sc2[0]
sc1 = FK4(sc0.ra, sc0.dec, obstime='B2001')
with pytest.raises(ValueError, match='can only set frame item from an equivalent frame'):
sc1[0] = sc2[0]
sc1 = FK4(sc0.ra[0], sc0.dec[0], obstime=obstime)
with pytest.raises(TypeError, match="scalar 'FK4' frame object does not support "
'item assignment'):
sc1[0] = sc2[0]
sc1 = FK4(obstime=obstime)
with pytest.raises(ValueError, match='cannot set frame which has no data'):
sc1[0] = sc2[0]
sc1 = FK4(sc0.ra, sc0.dec, obstime=[obstime, 'B1980'])
with pytest.raises(ValueError, match='can only set frame item from an equivalent frame'):
sc1[0] = sc2[0]
# Wrong shape
sc1 = FK4([sc0.ra], [sc0.dec], obstime=[obstime, 'B1980'])
with pytest.raises(ValueError, match='can only set frame item from an equivalent frame'):
sc1[0] = sc2[0]
def test_sep():
i1 = ICRS(ra=0*u.deg, dec=1*u.deg)
i2 = ICRS(ra=0*u.deg, dec=2*u.deg)
sep = i1.separation(i2)
assert_allclose(sep.deg, 1.)
i3 = ICRS(ra=[1, 2]*u.deg, dec=[3, 4]*u.deg, distance=[5, 6]*u.kpc)
i4 = ICRS(ra=[1, 2]*u.deg, dec=[3, 4]*u.deg, distance=[4, 5]*u.kpc)
sep3d = i3.separation_3d(i4)
assert_allclose(sep3d.to(u.kpc), np.array([1, 1])*u.kpc)
# check that it works even with velocities
i5 = ICRS(ra=[1, 2]*u.deg, dec=[3, 4]*u.deg, distance=[5, 6]*u.kpc,
pm_ra_cosdec=[1, 2]*u.mas/u.yr, pm_dec=[3, 4]*u.mas/u.yr,
radial_velocity=[5, 6]*u.km/u.s)
i6 = ICRS(ra=[1, 2]*u.deg, dec=[3, 4]*u.deg, distance=[7, 8]*u.kpc,
pm_ra_cosdec=[1, 2]*u.mas/u.yr, pm_dec=[3, 4]*u.mas/u.yr,
radial_velocity=[5, 6]*u.km/u.s)
sep3d = i5.separation_3d(i6)
assert_allclose(sep3d.to(u.kpc), np.array([2, 2])*u.kpc)
# 3d separations of dimensionless distances should still work
i7 = ICRS(ra=1*u.deg, dec=2*u.deg, distance=3*u.one)
i8 = ICRS(ra=1*u.deg, dec=2*u.deg, distance=4*u.one)
sep3d = i7.separation_3d(i8)
assert_allclose(sep3d, 1*u.one)
# but should fail with non-dimensionless
with pytest.raises(ValueError):
i7.separation_3d(i3)
def test_time_inputs():
"""
Test validation and conversion of inputs for equinox and obstime attributes.
"""
c = FK4(1 * u.deg, 2 * u.deg, equinox='J2001.5', obstime='2000-01-01 12:00:00')
assert c.equinox == Time('J2001.5')
assert c.obstime == Time('2000-01-01 12:00:00')
with pytest.raises(ValueError) as err:
c = FK4(1 * u.deg, 2 * u.deg, equinox=1.5)
assert 'Invalid time input' in str(err.value)
with pytest.raises(ValueError) as err:
c = FK4(1 * u.deg, 2 * u.deg, obstime='hello')
assert 'Invalid time input' in str(err.value)
# A vector time should work if the shapes match, but we don't automatically
# broadcast the basic data (just like time).
FK4([1, 2] * u.deg, [2, 3] * u.deg, obstime=['J2000', 'J2001'])
with pytest.raises(ValueError) as err:
FK4(1 * u.deg, 2 * u.deg, obstime=['J2000', 'J2001'])
assert 'shape' in str(err.value)
def test_is_frame_attr_default():
"""
Check that the `is_frame_attr_default` machinery works as expected
"""
c1 = FK5(ra=1*u.deg, dec=1*u.deg)
c2 = FK5(ra=1*u.deg, dec=1*u.deg, equinox=FK5.get_frame_attr_names()['equinox'])
c3 = FK5(ra=1*u.deg, dec=1*u.deg, equinox=Time('J2001.5'))
assert c1.equinox == c2.equinox
assert c1.equinox != c3.equinox
assert c1.is_frame_attr_default('equinox')
assert not c2.is_frame_attr_default('equinox')
assert not c3.is_frame_attr_default('equinox')
c4 = c1.realize_frame(r.UnitSphericalRepresentation(3*u.deg, 4*u.deg))
c5 = c2.realize_frame(r.UnitSphericalRepresentation(3*u.deg, 4*u.deg))
assert c4.is_frame_attr_default('equinox')
assert not c5.is_frame_attr_default('equinox')
def test_altaz_attributes():
aa = AltAz(1*u.deg, 2*u.deg)
assert aa.obstime is None
assert aa.location is None
aa2 = AltAz(1*u.deg, 2*u.deg, obstime='J2000')
assert aa2.obstime == Time('J2000')
aa3 = AltAz(1*u.deg, 2*u.deg, location=EarthLocation(0*u.deg, 0*u.deg, 0*u.m))
assert isinstance(aa3.location, EarthLocation)
def test_hadec_attributes():
hd = HADec(1*u.hourangle, 2*u.deg)
assert hd.ha == 1.*u.hourangle
assert hd.dec == 2*u.deg
assert hd.obstime is None
assert hd.location is None
hd2 = HADec(23*u.hourangle, -2*u.deg, obstime='J2000',
location=EarthLocation(0*u.deg, 0*u.deg, 0*u.m))
assert_allclose(hd2.ha, -1*u.hourangle)
assert hd2.dec == -2*u.deg
assert hd2.obstime == Time('J2000')
assert isinstance(hd2.location, EarthLocation)
sr = hd2.represent_as(r.SphericalRepresentation)
assert_allclose(sr.lon, -1*u.hourangle)
def test_representation():
"""
Test the getter and setter properties for `representation`
"""
# Create the frame object.
icrs = ICRS(ra=1*u.deg, dec=1*u.deg)
data = icrs.data
# Create some representation objects.
icrs_cart = icrs.cartesian
icrs_spher = icrs.spherical
icrs_cyl = icrs.cylindrical
# Testing when `_representation` set to `CartesianRepresentation`.
icrs.representation_type = r.CartesianRepresentation
assert icrs.representation_type == r.CartesianRepresentation
assert icrs_cart.x == icrs.x
assert icrs_cart.y == icrs.y
assert icrs_cart.z == icrs.z
assert icrs.data == data
# Testing that an ICRS object in CartesianRepresentation must not have spherical attributes.
for attr in ('ra', 'dec', 'distance'):
with pytest.raises(AttributeError) as err:
getattr(icrs, attr)
assert 'object has no attribute' in str(err.value)
# Testing when `_representation` set to `CylindricalRepresentation`.
icrs.representation_type = r.CylindricalRepresentation
assert icrs.representation_type == r.CylindricalRepresentation
assert icrs.data == data
# Testing setter input using text argument for spherical.
icrs.representation_type = 'spherical'
assert icrs.representation_type is r.SphericalRepresentation
assert icrs_spher.lat == icrs.dec
assert icrs_spher.lon == icrs.ra
assert icrs_spher.distance == icrs.distance
assert icrs.data == data
# Testing that an ICRS object in SphericalRepresentation must not have cartesian attributes.
for attr in ('x', 'y', 'z'):
with pytest.raises(AttributeError) as err:
getattr(icrs, attr)
assert 'object has no attribute' in str(err.value)
# Testing setter input using text argument for cylindrical.
icrs.representation_type = 'cylindrical'
assert icrs.representation_type is r.CylindricalRepresentation
assert icrs_cyl.rho == icrs.rho
assert icrs_cyl.phi == icrs.phi
assert icrs_cyl.z == icrs.z
assert icrs.data == data
# Testing that an ICRS object in CylindricalRepresentation must not have spherical attributes.
for attr in ('ra', 'dec', 'distance'):
with pytest.raises(AttributeError) as err:
getattr(icrs, attr)
assert 'object has no attribute' in str(err.value)
with pytest.raises(ValueError) as err:
icrs.representation_type = 'WRONG'
assert 'but must be a BaseRepresentation class' in str(err.value)
with pytest.raises(ValueError) as err:
icrs.representation_type = ICRS
assert 'but must be a BaseRepresentation class' in str(err.value)
def test_represent_as():
icrs = ICRS(ra=1*u.deg, dec=1*u.deg)
cart1 = icrs.represent_as('cartesian')
cart2 = icrs.represent_as(r.CartesianRepresentation)
cart1.x == cart2.x
cart1.y == cart2.y
cart1.z == cart2.z
# now try with velocities
icrs = ICRS(ra=0*u.deg, dec=0*u.deg, distance=10*u.kpc,
pm_ra_cosdec=0*u.mas/u.yr, pm_dec=0*u.mas/u.yr,
radial_velocity=1*u.km/u.s)
# single string
rep2 = icrs.represent_as('cylindrical')
assert isinstance(rep2, r.CylindricalRepresentation)
assert isinstance(rep2.differentials['s'], r.CylindricalDifferential)
# single class with positional in_frame_units, verify that warning raised
with pytest.warns(AstropyWarning, match='argument position') as w:
icrs.represent_as(r.CylindricalRepresentation, False)
assert len(w) == 1
# TODO: this should probably fail in the future once we figure out a better
# workaround for dealing with UnitSphericalRepresentation's with
# RadialDifferential's
# two classes
# rep2 = icrs.represent_as(r.CartesianRepresentation,
# r.SphericalCosLatDifferential)
# assert isinstance(rep2, r.CartesianRepresentation)
# assert isinstance(rep2.differentials['s'], r.SphericalCosLatDifferential)
with pytest.raises(ValueError):
icrs.represent_as('odaigahara')
def test_shorthand_representations():
rep = r.CartesianRepresentation([1, 2, 3]*u.pc)
dif = r.CartesianDifferential([1, 2, 3]*u.km/u.s)
rep = rep.with_differentials(dif)
icrs = ICRS(rep)
cyl = icrs.cylindrical
assert isinstance(cyl, r.CylindricalRepresentation)
assert isinstance(cyl.differentials['s'], r.CylindricalDifferential)
sph = icrs.spherical
assert isinstance(sph, r.SphericalRepresentation)
assert isinstance(sph.differentials['s'], r.SphericalDifferential)
sph = icrs.sphericalcoslat
assert isinstance(sph, r.SphericalRepresentation)
assert isinstance(sph.differentials['s'], r.SphericalCosLatDifferential)
def test_equal():
obstime = 'B1955'
sc1 = FK4([1, 2]*u.deg, [3, 4]*u.deg, obstime=obstime)
sc2 = FK4([1, 20]*u.deg, [3, 4]*u.deg, obstime=obstime)
# Compare arrays and scalars
eq = sc1 == sc2
ne = sc1 != sc2
assert np.all(eq == [True, False])
assert np.all(ne == [False, True])
assert (sc1[0] == sc2[0]) == True # noqa (numpy True not Python True)
assert (sc1[0] != sc2[0]) == False # noqa
# Broadcasting
eq = sc1[0] == sc2
ne = sc1[0] != sc2
assert np.all(eq == [True, False])
assert np.all(ne == [False, True])
# With diff only in velocity
sc1 = FK4([1, 2]*u.deg, [3, 4]*u.deg, radial_velocity=[1, 2]*u.km/u.s)
sc2 = FK4([1, 2]*u.deg, [3, 4]*u.deg, radial_velocity=[1, 20]*u.km/u.s)
eq = sc1 == sc2
ne = sc1 != sc2
assert np.all(eq == [True, False])
assert np.all(ne == [False, True])
assert (sc1[0] == sc2[0]) == True # noqa
assert (sc1[0] != sc2[0]) == False # noqa
assert (FK4() == ICRS()) is False
assert (FK4() == FK4(obstime='J1999')) is False
def test_equal_exceptions():
# Shape mismatch
sc1 = FK4([1, 2, 3]*u.deg, [3, 4, 5]*u.deg)
with pytest.raises(ValueError, match='cannot compare: shape mismatch'):
sc1 == sc1[:2]
# Different representation_type
sc1 = FK4(1, 2, 3, representation_type='cartesian')
sc2 = FK4(1*u.deg, 2*u.deg, 2, representation_type='spherical')
with pytest.raises(TypeError, match='cannot compare: objects must have same '
'class: CartesianRepresentation vs. SphericalRepresentation'):
sc1 == sc2
# Different differential type
sc1 = FK4(1*u.deg, 2*u.deg, radial_velocity=1*u.km/u.s)
sc2 = FK4(1*u.deg, 2*u.deg, pm_ra_cosdec=1*u.mas/u.yr, pm_dec=1*u.mas/u.yr)
with pytest.raises(TypeError, match='cannot compare: objects must have same '
'class: RadialDifferential vs. UnitSphericalCosLatDifferential'):
sc1 == sc2
# Different frame attribute
sc1 = FK5(1*u.deg, 2*u.deg)
sc2 = FK5(1*u.deg, 2*u.deg, equinox='J1999')
with pytest.raises(TypeError, match=r'cannot compare: objects must have equivalent '
r'frames: <FK5 Frame \(equinox=J2000.000\)> '
r'vs. <FK5 Frame \(equinox=J1999.000\)>'):
sc1 == sc2
# Different frame
sc1 = FK4(1*u.deg, 2*u.deg)
sc2 = FK5(1*u.deg, 2*u.deg, equinox='J2000')
with pytest.raises(TypeError, match='cannot compare: objects must have equivalent '
r'frames: <FK4 Frame \(equinox=B1950.000, obstime=B1950.000\)> '
r'vs. <FK5 Frame \(equinox=J2000.000\)>'):
sc1 == sc2
sc1 = FK4(1*u.deg, 2*u.deg)
sc2 = FK4()
with pytest.raises(ValueError, match='cannot compare: one frame has data and '
'the other does not'):
sc1 == sc2
with pytest.raises(ValueError, match='cannot compare: one frame has data and '
'the other does not'):
sc2 == sc1
def test_dynamic_attrs():
c = ICRS(1*u.deg, 2*u.deg)
assert 'ra' in dir(c)
assert 'dec' in dir(c)
with pytest.raises(AttributeError) as err:
c.blahblah
assert "object has no attribute 'blahblah'" in str(err.value)
with pytest.raises(AttributeError) as err:
c.ra = 1
assert "Cannot set any frame attribute" in str(err.value)
c.blahblah = 1
assert c.blahblah == 1
def test_nodata_error():
i = ICRS()
with pytest.raises(ValueError) as excinfo:
i.data
assert 'does not have associated data' in str(excinfo.value)
def test_len0_data():
i = ICRS([]*u.deg, []*u.deg)
assert i.has_data
repr(i)
def test_quantity_attributes():
# make sure we can create a GCRS frame with valid inputs
GCRS(obstime='J2002', obsgeoloc=[1, 2, 3]*u.km, obsgeovel=[4, 5, 6]*u.km/u.s)
# make sure it fails for invalid lovs or vels
with pytest.raises(TypeError):
GCRS(obsgeoloc=[1, 2, 3]) # no unit
with pytest.raises(u.UnitsError):
GCRS(obsgeoloc=[1, 2, 3]*u.km/u.s) # incorrect unit
with pytest.raises(ValueError):
GCRS(obsgeoloc=[1, 3]*u.km) # incorrect shape
def test_quantity_attribute_default():
# The default default (yes) is None:
class MyCoord(BaseCoordinateFrame):
someval = QuantityAttribute(unit=u.deg)
frame = MyCoord()
assert frame.someval is None
frame = MyCoord(someval=15*u.deg)
assert u.isclose(frame.someval, 15*u.deg)
# This should work if we don't explicitly pass in a unit, but we pass in a
# default value with a unit
class MyCoord2(BaseCoordinateFrame):
someval = QuantityAttribute(15*u.deg)
frame = MyCoord2()
assert u.isclose(frame.someval, 15*u.deg)
# Since here no shape was given, we can set to any shape we like.
frame = MyCoord2(someval=np.ones(3)*u.deg)
assert frame.someval.shape == (3,)
assert np.all(frame.someval == 1*u.deg)
# We should also be able to insist on a given shape.
class MyCoord3(BaseCoordinateFrame):
someval = QuantityAttribute(unit=u.arcsec, shape=(3,))
frame = MyCoord3(someval=np.ones(3)*u.deg)
assert frame.someval.shape == (3,)
assert frame.someval.unit == u.arcsec
assert u.allclose(frame.someval.value, 3600.)
# The wrong shape raises.
with pytest.raises(ValueError, match='shape'):
MyCoord3(someval=1.*u.deg)
# As does the wrong unit.
with pytest.raises(u.UnitsError):
MyCoord3(someval=np.ones(3)*u.m)
# We are allowed a short-cut for zero.
frame0 = MyCoord3(someval=0)
assert frame0.someval.shape == (3,)
assert frame0.someval.unit == u.arcsec
assert np.all(frame0.someval.value == 0.)
# But not if it has the wrong shape.
with pytest.raises(ValueError, match='shape'):
MyCoord3(someval=np.zeros(2))
# This should fail, if we don't pass in a default or a unit
with pytest.raises(ValueError):
class MyCoord(BaseCoordinateFrame):
someval = QuantityAttribute()
def test_eloc_attributes():
el = EarthLocation(lon=12.3*u.deg, lat=45.6*u.deg, height=1*u.km)
it = ITRS(r.SphericalRepresentation(lon=12.3*u.deg, lat=45.6*u.deg, distance=1*u.km))
gc = GCRS(ra=12.3*u.deg, dec=45.6*u.deg, distance=6375*u.km)
el1 = AltAz(location=el).location
assert isinstance(el1, EarthLocation)
# these should match *exactly* because the EarthLocation
assert el1.lat == el.lat
assert el1.lon == el.lon
assert el1.height == el.height
el2 = AltAz(location=it).location
assert isinstance(el2, EarthLocation)
# these should *not* match because giving something in Spherical ITRS is
# *not* the same as giving it as an EarthLocation: EarthLocation is on an
# elliptical geoid. So the longitude should match (because flattening is
# only along the z-axis), but latitude should not. Also, height is relative
# to the *surface* in EarthLocation, but the ITRS distance is relative to
# the center of the Earth
assert not allclose(el2.lat, it.spherical.lat)
assert allclose(el2.lon, it.spherical.lon)
assert el2.height < -6000*u.km
el3 = AltAz(location=gc).location
# GCRS inputs implicitly get transformed to ITRS and then onto
# EarthLocation's elliptical geoid. So both lat and lon shouldn't match
assert isinstance(el3, EarthLocation)
assert not allclose(el3.lat, gc.dec)
assert not allclose(el3.lon, gc.ra)
assert np.abs(el3.height) < 500*u.km
def test_equivalent_frames():
i = ICRS()
i2 = ICRS(1*u.deg, 2*u.deg)
assert i.is_equivalent_frame(i)
assert i.is_equivalent_frame(i2)
with pytest.raises(TypeError):
assert i.is_equivalent_frame(10)
with pytest.raises(TypeError):
assert i2.is_equivalent_frame(SkyCoord(i2))
f0 = FK5() # this J2000 is TT
f1 = FK5(equinox='J2000')
f2 = FK5(1*u.deg, 2*u.deg, equinox='J2000')
f3 = FK5(equinox='J2010')
f4 = FK4(equinox='J2010')
assert f1.is_equivalent_frame(f1)
assert not i.is_equivalent_frame(f1)
assert f0.is_equivalent_frame(f1)
assert f1.is_equivalent_frame(f2)
assert not f1.is_equivalent_frame(f3)
assert not f3.is_equivalent_frame(f4)
aa1 = AltAz()
aa2 = AltAz(obstime='J2010')
assert aa2.is_equivalent_frame(aa2)
assert not aa1.is_equivalent_frame(i)
assert not aa1.is_equivalent_frame(aa2)
def test_equivalent_frame_coordinateattribute():
class FrameWithCoordinateAttribute(BaseCoordinateFrame):
coord_attr = CoordinateAttribute(HCRS)
# These frames should not be considered equivalent
f0 = FrameWithCoordinateAttribute()
f1 = FrameWithCoordinateAttribute(coord_attr=HCRS(1*u.deg, 2*u.deg, obstime='J2000'))
f2 = FrameWithCoordinateAttribute(coord_attr=HCRS(3*u.deg, 4*u.deg, obstime='J2000'))
f3 = FrameWithCoordinateAttribute(coord_attr=HCRS(1*u.deg, 2*u.deg, obstime='J2001'))
assert not f0.is_equivalent_frame(f1)
assert not f1.is_equivalent_frame(f0)
assert not f1.is_equivalent_frame(f2)
assert not f1.is_equivalent_frame(f3)
assert not f2.is_equivalent_frame(f3)
# They each should still be equivalent to a deep copy of themselves
assert f0.is_equivalent_frame(deepcopy(f0))
assert f1.is_equivalent_frame(deepcopy(f1))
assert f2.is_equivalent_frame(deepcopy(f2))
assert f3.is_equivalent_frame(deepcopy(f3))
def test_equivalent_frame_locationattribute():
class FrameWithLocationAttribute(BaseCoordinateFrame):
loc_attr = EarthLocationAttribute()
# These frames should not be considered equivalent
f0 = FrameWithLocationAttribute()
location = EarthLocation(lat=-34, lon=19, height=300)
f1 = FrameWithLocationAttribute(loc_attr=location)
assert not f0.is_equivalent_frame(f1)
assert not f1.is_equivalent_frame(f0)
# They each should still be equivalent to a deep copy of themselves
assert f0.is_equivalent_frame(deepcopy(f0))
assert f1.is_equivalent_frame(deepcopy(f1))
def test_representation_subclass():
# Regression test for #3354
# Normally when instantiating a frame without a distance the frame will try
# and use UnitSphericalRepresentation internally instead of
# SphericalRepresentation.
frame = FK5(representation_type=r.SphericalRepresentation, ra=32 * u.deg, dec=20 * u.deg)
assert type(frame._data) == r.UnitSphericalRepresentation
assert frame.representation_type == r.SphericalRepresentation
# If using a SphericalRepresentation class this used to not work, so we
# test here that this is now fixed.
class NewSphericalRepresentation(r.SphericalRepresentation):
attr_classes = r.SphericalRepresentation.attr_classes
frame = FK5(representation_type=NewSphericalRepresentation, lon=32 * u.deg, lat=20 * u.deg)
assert type(frame._data) == r.UnitSphericalRepresentation
assert frame.representation_type == NewSphericalRepresentation
# A similar issue then happened in __repr__ with subclasses of
# SphericalRepresentation.
assert repr(frame) == ("<FK5 Coordinate (equinox=J2000.000): (lon, lat) in deg\n"
" (32., 20.)>")
# A more subtle issue is when specifying a custom
# UnitSphericalRepresentation subclass for the data and
# SphericalRepresentation or a subclass for the representation.
class NewUnitSphericalRepresentation(r.UnitSphericalRepresentation):
attr_classes = r.UnitSphericalRepresentation.attr_classes
def __repr__(self):
return "<NewUnitSphericalRepresentation: spam spam spam>"
frame = FK5(NewUnitSphericalRepresentation(lon=32 * u.deg, lat=20 * u.deg),
representation_type=NewSphericalRepresentation)
assert repr(frame) == "<FK5 Coordinate (equinox=J2000.000): spam spam spam>"
def test_getitem_representation():
"""
Make sure current representation survives __getitem__ even if different
from data representation.
"""
c = ICRS([1, 1] * u.deg, [2, 2] * u.deg)
c.representation_type = 'cartesian'
assert c[0].representation_type is r.CartesianRepresentation
def test_component_error_useful():
"""
Check that a data-less frame gives useful error messages about not having
data when the attributes asked for are possible coordinate components
"""
i = ICRS()
with pytest.raises(ValueError) as excinfo:
i.ra
assert 'does not have associated data' in str(excinfo.value)
with pytest.raises(AttributeError) as excinfo1:
i.foobar
with pytest.raises(AttributeError) as excinfo2:
i.lon # lon is *not* the component name despite being the underlying representation's name
assert "object has no attribute 'foobar'" in str(excinfo1.value)
assert "object has no attribute 'lon'" in str(excinfo2.value)
def test_cache_clear():
i = ICRS(1*u.deg, 2*u.deg)
# Add an in frame units version of the rep to the cache.
repr(i)
assert len(i.cache['representation']) == 2
i.cache.clear()
assert len(i.cache['representation']) == 0
def test_inplace_array():
i = ICRS([[1, 2], [3, 4]]*u.deg, [[10, 20], [30, 40]]*u.deg)
# Add an in frame units version of the rep to the cache.
repr(i)
# Check that repr() has added a rep to the cache
assert len(i.cache['representation']) == 2
# Modify the data
i.data.lon[:, 0] = [100, 200]*u.deg
# Clear the cache
i.cache.clear()
# This will use a second (potentially cached rep)
assert_allclose(i.ra, [[100, 2], [200, 4]]*u.deg)
assert_allclose(i.dec, [[10, 20], [30, 40]]*u.deg)
def test_inplace_change():
i = ICRS(1*u.deg, 2*u.deg)
# Add an in frame units version of the rep to the cache.
repr(i)
# Check that repr() has added a rep to the cache
assert len(i.cache['representation']) == 2
# Modify the data
i.data.lon[()] = 10*u.deg
# Clear the cache
i.cache.clear()
# This will use a second (potentially cached rep)
assert i.ra == 10 * u.deg
assert i.dec == 2 * u.deg
def test_representation_with_multiple_differentials():
dif1 = r.CartesianDifferential([1, 2, 3]*u.km/u.s)
dif2 = r.CartesianDifferential([1, 2, 3]*u.km/u.s**2)
rep = r.CartesianRepresentation([1, 2, 3]*u.pc,
differentials={'s': dif1, 's2': dif2})
# check warning is raised for a scalar
with pytest.raises(ValueError):
ICRS(rep)
def test_missing_component_error_names():
"""
This test checks that the component names are frame component names, not
representation or differential names, when referenced in an exception raised
when not passing in enough data. For example:
ICRS(ra=10*u.deg)
should state:
TypeError: __init__() missing 1 required positional argument: 'dec'
"""
with pytest.raises(TypeError) as e:
ICRS(ra=150 * u.deg)
assert "missing 1 required positional argument: 'dec'" in str(e.value)
with pytest.raises(TypeError) as e:
ICRS(ra=150*u.deg, dec=-11*u.deg,
pm_ra=100*u.mas/u.yr, pm_dec=10*u.mas/u.yr)
assert "pm_ra_cosdec" in str(e.value)
def test_non_spherical_representation_unit_creation(unitphysics):
class PhysicsICRS(ICRS):
default_representation = r.PhysicsSphericalRepresentation
pic = PhysicsICRS(phi=1*u.deg, theta=25*u.deg, r=1*u.kpc)
assert isinstance(pic.data, r.PhysicsSphericalRepresentation)
picu = PhysicsICRS(phi=1*u.deg, theta=25*u.deg)
assert isinstance(picu.data, unitphysics)
def test_attribute_repr():
class Spam:
def _astropy_repr_in_frame(self):
return "TEST REPR"
class TestFrame(BaseCoordinateFrame):
attrtest = Attribute(default=Spam())
assert "TEST REPR" in repr(TestFrame())
def test_component_names_repr():
# Frame class with new component names that includes a name swap
class NameChangeFrame(BaseCoordinateFrame):
default_representation = r.PhysicsSphericalRepresentation
frame_specific_representation_info = {
r.PhysicsSphericalRepresentation: [
RepresentationMapping('phi', 'theta', u.deg),
RepresentationMapping('theta', 'phi', u.arcsec),
RepresentationMapping('r', 'JUSTONCE', u.AU)]
}
frame = NameChangeFrame(0*u.deg, 0*u.arcsec, 0*u.AU)
# Check for the new names in the Frame repr
assert "(theta, phi, JUSTONCE)" in repr(frame)
# Check that the letter "r" has not been replaced more than once in the Frame repr
assert repr(frame).count("JUSTONCE") == 1
def test_galactocentric_defaults():
with galactocentric_frame_defaults.set('pre-v4.0'):
galcen_pre40 = Galactocentric()
with galactocentric_frame_defaults.set('v4.0'):
galcen_40 = Galactocentric()
with galactocentric_frame_defaults.set('latest'):
galcen_latest = Galactocentric()
# parameters that changed
assert not u.allclose(galcen_pre40.galcen_distance,
galcen_40.galcen_distance)
assert not u.allclose(galcen_pre40.z_sun, galcen_40.z_sun)
for k in galcen_40.get_frame_attr_names():
if isinstance(getattr(galcen_40, k), BaseCoordinateFrame):
continue # skip coordinate comparison...
elif isinstance(getattr(galcen_40, k), CartesianDifferential):
assert u.allclose(getattr(galcen_40, k).d_xyz,
getattr(galcen_latest, k).d_xyz)
else:
assert getattr(galcen_40, k) == getattr(galcen_latest, k)
# test validate Galactocentric
with galactocentric_frame_defaults.set('latest'):
params = galactocentric_frame_defaults.validate(galcen_latest)
references = galcen_latest.frame_attribute_references
state = dict(parameters=params, references=references)
assert galactocentric_frame_defaults.parameters == params
assert galactocentric_frame_defaults.references == references
assert galactocentric_frame_defaults._state == state
# Test not one of accepted parameter types
with pytest.raises(ValueError):
galactocentric_frame_defaults.validate(ValueError)
# test parameters property
assert (
galactocentric_frame_defaults.parameters
== galactocentric_frame_defaults.parameters
)
def test_galactocentric_references():
# references in the "scientific paper"-sense
with galactocentric_frame_defaults.set('pre-v4.0'):
galcen_pre40 = Galactocentric()
for k in galcen_pre40.get_frame_attr_names():
if k == 'roll': # no reference for this parameter
continue
assert k in galcen_pre40.frame_attribute_references
with galactocentric_frame_defaults.set('v4.0'):
galcen_40 = Galactocentric()
for k in galcen_40.get_frame_attr_names():
if k == 'roll': # no reference for this parameter
continue
assert k in galcen_40.frame_attribute_references
with galactocentric_frame_defaults.set('v4.0'):
galcen_custom = Galactocentric(z_sun=15*u.pc)
for k in galcen_custom.get_frame_attr_names():
if k == 'roll': # no reference for this parameter
continue
if k == 'z_sun':
assert k not in galcen_custom.frame_attribute_references
else:
assert k in galcen_custom.frame_attribute_references
def test_coordinateattribute_transformation():
class FrameWithCoordinateAttribute(BaseCoordinateFrame):
coord_attr = CoordinateAttribute(HCRS)
hcrs = HCRS(1*u.deg, 2*u.deg, 3*u.AU, obstime='2001-02-03')
f1_frame = FrameWithCoordinateAttribute(coord_attr=hcrs)
f1_skycoord = FrameWithCoordinateAttribute(coord_attr=SkyCoord(hcrs))
# The input is already HCRS, so the frame attribute should not change it
assert f1_frame.coord_attr == hcrs
# The output should not be different if a SkyCoord is provided
assert f1_skycoord.coord_attr == f1_frame.coord_attr
gcrs = GCRS(4*u.deg, 5*u.deg, 6*u.AU, obstime='2004-05-06')
f2_frame = FrameWithCoordinateAttribute(coord_attr=gcrs)
f2_skycoord = FrameWithCoordinateAttribute(coord_attr=SkyCoord(gcrs))
# The input needs to be converted from GCRS to HCRS
assert isinstance(f2_frame.coord_attr, HCRS)
# The `obstime` frame attribute should have been "merged" in a SkyCoord-style transformation
assert f2_frame.coord_attr.obstime == gcrs.obstime
# The output should not be different if a SkyCoord is provided
assert f2_skycoord.coord_attr == f2_frame.coord_attr
def test_realize_frame_accepts_kwargs():
c1 = ICRS(x=1*u.pc, y=2*u.pc, z=3*u.pc,
representation_type=r.CartesianRepresentation)
new_data = r.CartesianRepresentation(x=11*u.pc, y=12*u.pc, z=13*u.pc)
c2 = c1.realize_frame(new_data, representation_type="cartesian")
c3 = c1.realize_frame(new_data, representation_type="cylindrical")
assert c2.representation_type == r.CartesianRepresentation
assert c3.representation_type == r.CylindricalRepresentation
def test_nameless_frame_subclass():
"""Note: this is a regression test for #11096"""
class Test:
pass
# Subclass from a frame class and a non-frame class.
# This subclassing is the test!
class NewFrame(ICRS, Test):
pass
|
4228edee1e292bb7c8d7a839f5975c9c09ddfe6af2db0bd28e2d799496297e95 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
from astropy import units as u
from astropy.coordinates import transformations as t
from astropy.coordinates.builtin_frames import ICRS, FK5, FK4, FK4NoETerms, Galactic, AltAz, HCRS
from astropy.coordinates import representation as r
from astropy.coordinates.baseframe import frame_transform_graph
from astropy.coordinates.matrix_utilities import rotation_matrix
from astropy.tests.helper import assert_quantity_allclose as assert_allclose
from astropy.time import Time
from astropy.units import allclose as quantity_allclose
from astropy.utils.exceptions import AstropyWarning
# Coordinates just for these tests.
class TCoo1(ICRS):
pass
class TCoo2(ICRS):
pass
class TCoo3(ICRS):
pass
def test_transform_classes():
"""
Tests the class-based/OO syntax for creating transforms
"""
def tfun(c, f):
return f.__class__(ra=c.ra, dec=c.dec)
_ = t.FunctionTransform(tfun, TCoo1, TCoo2,
register_graph=frame_transform_graph)
c1 = TCoo1(ra=1*u.radian, dec=0.5*u.radian)
c2 = c1.transform_to(TCoo2())
assert_allclose(c2.ra.radian, 1)
assert_allclose(c2.dec.radian, 0.5)
def matfunc(coo, fr):
return [[1, 0, 0],
[0, coo.ra.degree, 0],
[0, 0, 1]]
trans2 = t.DynamicMatrixTransform(matfunc, TCoo1, TCoo2)
trans2.register(frame_transform_graph)
c3 = TCoo1(ra=1*u.deg, dec=2*u.deg)
c4 = c3.transform_to(TCoo2())
assert_allclose(c4.ra.degree, 1)
assert_allclose(c4.ra.degree, 1)
# be sure to unregister the second one - no need for trans1 because it
# already got unregistered when trans2 was created.
trans2.unregister(frame_transform_graph)
def test_transform_decos():
"""
Tests the decorator syntax for creating transforms
"""
c1 = TCoo1(ra=1*u.deg, dec=2*u.deg)
@frame_transform_graph.transform(t.FunctionTransform, TCoo1, TCoo2)
def trans(coo1, f):
return TCoo2(ra=coo1.ra, dec=coo1.dec * 2)
c2 = c1.transform_to(TCoo2())
assert_allclose(c2.ra.degree, 1)
assert_allclose(c2.dec.degree, 4)
c3 = TCoo1(r.CartesianRepresentation(x=1*u.pc, y=1*u.pc, z=2*u.pc))
@frame_transform_graph.transform(t.StaticMatrixTransform, TCoo1, TCoo2)
def matrix():
return [[2, 0, 0],
[0, 1, 0],
[0, 0, 1]]
c4 = c3.transform_to(TCoo2())
assert_allclose(c4.cartesian.x, 2*u.pc)
assert_allclose(c4.cartesian.y, 1*u.pc)
assert_allclose(c4.cartesian.z, 2*u.pc)
def test_shortest_path():
class FakeTransform:
def __init__(self, pri):
self.priority = pri
g = t.TransformGraph()
# cheating by adding graph elements directly that are not classes - the
# graphing algorithm still works fine with integers - it just isn't a valid
# TransformGraph
# the graph looks is a down-going diamond graph with the lower-right slightly
# heavier and a cycle from the bottom to the top
# also, a pair of nodes isolated from 1
g._graph[1][2] = FakeTransform(1)
g._graph[1][3] = FakeTransform(1)
g._graph[2][4] = FakeTransform(1)
g._graph[3][4] = FakeTransform(2)
g._graph[4][1] = FakeTransform(5)
g._graph[5][6] = FakeTransform(1)
path, d = g.find_shortest_path(1, 2)
assert path == [1, 2]
assert d == 1
path, d = g.find_shortest_path(1, 3)
assert path == [1, 3]
assert d == 1
path, d = g.find_shortest_path(1, 4)
print('Cached paths:', g._shortestpaths)
assert path == [1, 2, 4]
assert d == 2
# unreachable
path, d = g.find_shortest_path(1, 5)
assert path is None
assert d == float('inf')
path, d = g.find_shortest_path(5, 6)
assert path == [5, 6]
assert d == 1
def test_sphere_cart():
"""
Tests the spherical <-> cartesian transform functions
"""
from astropy.utils import NumpyRNGContext
from astropy.coordinates import spherical_to_cartesian, cartesian_to_spherical
x, y, z = spherical_to_cartesian(1, 0, 0)
assert_allclose(x, 1)
assert_allclose(y, 0)
assert_allclose(z, 0)
x, y, z = spherical_to_cartesian(0, 1, 1)
assert_allclose(x, 0)
assert_allclose(y, 0)
assert_allclose(z, 0)
x, y, z = spherical_to_cartesian(5, 0, np.arcsin(4. / 5.))
assert_allclose(x, 3)
assert_allclose(y, 4)
assert_allclose(z, 0)
r, lat, lon = cartesian_to_spherical(0, 1, 0)
assert_allclose(r, 1)
assert_allclose(lat, 0 * u.deg)
assert_allclose(lon, np.pi / 2 * u.rad)
# test round-tripping
with NumpyRNGContext(13579):
x, y, z = np.random.randn(3, 5)
r, lat, lon = cartesian_to_spherical(x, y, z)
x2, y2, z2 = spherical_to_cartesian(r, lat, lon)
assert_allclose(x, x2)
assert_allclose(y, y2)
assert_allclose(z, z2)
def test_transform_path_pri():
"""
This checks that the transformation path prioritization works by
making sure the ICRS -> Gal transformation always goes through FK5
and not FK4.
"""
frame_transform_graph.invalidate_cache()
tpath, td = frame_transform_graph.find_shortest_path(ICRS, Galactic)
assert tpath == [ICRS, FK5, Galactic]
assert td == 2
# but direct from FK4 to Galactic should still be possible
tpath, td = frame_transform_graph.find_shortest_path(FK4, Galactic)
assert tpath == [FK4, FK4NoETerms, Galactic]
assert td == 2
def test_obstime():
"""
Checks to make sure observation time is
accounted for at least in FK4 <-> ICRS transformations
"""
b1950 = Time('B1950')
j1975 = Time('J1975')
fk4_50 = FK4(ra=1*u.deg, dec=2*u.deg, obstime=b1950)
fk4_75 = FK4(ra=1*u.deg, dec=2*u.deg, obstime=j1975)
icrs_50 = fk4_50.transform_to(ICRS())
icrs_75 = fk4_75.transform_to(ICRS())
# now check that the resulting coordinates are *different* - they should be,
# because the obstime is different
assert icrs_50.ra.degree != icrs_75.ra.degree
assert icrs_50.dec.degree != icrs_75.dec.degree
# ------------------------------------------------------------------------------
# Affine transform tests and helpers:
# just acting as a namespace
class transfunc:
rep = r.CartesianRepresentation(np.arange(3)*u.pc)
dif = r.CartesianDifferential(*np.arange(3, 6)*u.pc/u.Myr)
rep0 = r.CartesianRepresentation(np.zeros(3)*u.pc)
@classmethod
def both(cls, coo, fr):
# exchange x <-> z and offset
M = np.array([[0., 0., 1.],
[0., 1., 0.],
[1., 0., 0.]])
return M, cls.rep.with_differentials(cls.dif)
@classmethod
def just_matrix(cls, coo, fr):
# exchange x <-> z and offset
M = np.array([[0., 0., 1.],
[0., 1., 0.],
[1., 0., 0.]])
return M, None
@classmethod
def no_matrix(cls, coo, fr):
return None, cls.rep.with_differentials(cls.dif)
@classmethod
def no_pos(cls, coo, fr):
return None, cls.rep0.with_differentials(cls.dif)
@classmethod
def no_vel(cls, coo, fr):
return None, cls.rep
@pytest.mark.parametrize('transfunc', [transfunc.both, transfunc.no_matrix,
transfunc.no_pos, transfunc.no_vel,
transfunc.just_matrix])
@pytest.mark.parametrize('rep', [
r.CartesianRepresentation(5, 6, 7, unit=u.pc),
r.CartesianRepresentation(5, 6, 7, unit=u.pc,
differentials=r.CartesianDifferential(8, 9, 10,
unit=u.pc/u.Myr)),
r.CartesianRepresentation(5, 6, 7, unit=u.pc,
differentials=r.CartesianDifferential(8, 9, 10,
unit=u.pc/u.Myr))
.represent_as(r.CylindricalRepresentation, r.CylindricalDifferential)
])
def test_affine_transform_succeed(transfunc, rep):
c = TCoo1(rep)
# compute expected output
M, offset = transfunc(c, TCoo2)
_rep = rep.to_cartesian()
diffs = {k: diff.represent_as(r.CartesianDifferential, rep)
for k, diff in rep.differentials.items()}
expected_rep = _rep.with_differentials(diffs)
if M is not None:
expected_rep = expected_rep.transform(M)
expected_pos = expected_rep.without_differentials()
if offset is not None:
expected_pos = expected_pos + offset.without_differentials()
expected_vel = None
if c.data.differentials:
expected_vel = expected_rep.differentials['s']
if offset and offset.differentials:
expected_vel = (expected_vel + offset.differentials['s'])
# register and do the transformation and check against expected
trans = t.AffineTransform(transfunc, TCoo1, TCoo2)
trans.register(frame_transform_graph)
c2 = c.transform_to(TCoo2())
assert quantity_allclose(c2.data.to_cartesian().xyz,
expected_pos.to_cartesian().xyz)
if expected_vel is not None:
diff = c2.data.differentials['s'].to_cartesian(base=c2.data)
assert quantity_allclose(diff.xyz, expected_vel.d_xyz)
trans.unregister(frame_transform_graph)
# these should fail
def transfunc_invalid_matrix(coo, fr):
return np.eye(4), None
# Leaving this open in case we want to add more functions to check for failures
@pytest.mark.parametrize('transfunc', [transfunc_invalid_matrix])
def test_affine_transform_fail(transfunc):
diff = r.CartesianDifferential(8, 9, 10, unit=u.pc/u.Myr)
rep = r.CartesianRepresentation(5, 6, 7, unit=u.pc, differentials=diff)
c = TCoo1(rep)
# register and do the transformation and check against expected
trans = t.AffineTransform(transfunc, TCoo1, TCoo2)
trans.register(frame_transform_graph)
with pytest.raises(ValueError):
c.transform_to(TCoo2())
trans.unregister(frame_transform_graph)
def test_too_many_differentials():
dif1 = r.CartesianDifferential(*np.arange(3, 6)*u.pc/u.Myr)
dif2 = r.CartesianDifferential(*np.arange(3, 6)*u.pc/u.Myr**2)
rep = r.CartesianRepresentation(np.arange(3)*u.pc,
differentials={'s': dif1, 's2': dif2})
with pytest.raises(ValueError):
c = TCoo1(rep)
# register and do the transformation and check against expected
trans = t.AffineTransform(transfunc.both, TCoo1, TCoo2)
trans.register(frame_transform_graph)
# Check that if frame somehow gets through to transformation, multiple
# differentials are caught
c = TCoo1(rep.without_differentials())
c._data = c._data.with_differentials({'s': dif1, 's2': dif2})
with pytest.raises(ValueError):
c.transform_to(TCoo2())
trans.unregister(frame_transform_graph)
# A matrix transform of a unit spherical with differentials should work
@pytest.mark.parametrize('rep', [
r.UnitSphericalRepresentation(lon=15*u.degree, lat=-11*u.degree,
differentials=r.SphericalDifferential(d_lon=15*u.mas/u.yr,
d_lat=11*u.mas/u.yr,
d_distance=-110*u.km/u.s)),
r.UnitSphericalRepresentation(lon=15*u.degree, lat=-11*u.degree,
differentials={'s': r.RadialDifferential(d_distance=-110*u.km/u.s)}),
r.SphericalRepresentation(lon=15*u.degree, lat=-11*u.degree,
distance=150*u.pc,
differentials={'s': r.RadialDifferential(d_distance=-110*u.km/u.s)})
])
def test_unit_spherical_with_differentials(rep):
c = TCoo1(rep)
# register and do the transformation and check against expected
trans = t.AffineTransform(transfunc.just_matrix, TCoo1, TCoo2)
trans.register(frame_transform_graph)
c2 = c.transform_to(TCoo2())
assert 's' in rep.differentials
assert isinstance(c2.data.differentials['s'],
rep.differentials['s'].__class__)
if isinstance(rep.differentials['s'], r.RadialDifferential):
assert c2.data.differentials['s'] is rep.differentials['s']
trans.unregister(frame_transform_graph)
# should fail if we have to do offsets
trans = t.AffineTransform(transfunc.both, TCoo1, TCoo2)
trans.register(frame_transform_graph)
with pytest.raises(TypeError):
c.transform_to(TCoo2())
trans.unregister(frame_transform_graph)
def test_vel_transformation_obstime_err():
# TODO: replace after a final decision on PR #6280
from astropy.coordinates.sites import get_builtin_sites
diff = r.CartesianDifferential([.1, .2, .3]*u.km/u.s)
rep = r.CartesianRepresentation([1, 2, 3]*u.au, differentials=diff)
loc = get_builtin_sites()['example_site']
aaf = AltAz(obstime='J2010', location=loc)
aaf2 = AltAz(obstime=aaf.obstime + 3*u.day, location=loc)
aaf3 = AltAz(obstime=aaf.obstime + np.arange(3)*u.day, location=loc)
aaf4 = AltAz(obstime=aaf.obstime, location=loc)
aa = aaf.realize_frame(rep)
with pytest.raises(NotImplementedError) as exc:
aa.transform_to(aaf2)
assert 'cannot transform' in exc.value.args[0]
with pytest.raises(NotImplementedError) as exc:
aa.transform_to(aaf3)
assert 'cannot transform' in exc.value.args[0]
aa.transform_to(aaf4)
aa.transform_to(ICRS())
def test_function_transform_with_differentials():
def tfun(c, f):
return f.__class__(ra=c.ra, dec=c.dec)
_ = t.FunctionTransform(tfun, TCoo3, TCoo2,
register_graph=frame_transform_graph)
t3 = TCoo3(ra=1*u.deg, dec=2*u.deg, pm_ra_cosdec=1*u.marcsec/u.yr,
pm_dec=1*u.marcsec/u.yr,)
with pytest.warns(AstropyWarning, match=r'.*they have been dropped.*') as w:
t3.transform_to(TCoo2())
assert len(w) == 1
def test_frame_override_component_with_attribute():
"""
It was previously possible to define a frame with an attribute with the
same name as a component. We don't want to allow this!
"""
from astropy.coordinates.baseframe import BaseCoordinateFrame
from astropy.coordinates.attributes import Attribute
class BorkedFrame(BaseCoordinateFrame):
ra = Attribute(default=150)
dec = Attribute(default=150)
def trans_func(coo1, f):
pass
trans = t.FunctionTransform(trans_func, BorkedFrame, ICRS)
with pytest.raises(ValueError) as exc:
trans.register(frame_transform_graph)
assert ('BorkedFrame' in exc.value.args[0] and
"'ra'" in exc.value.args[0] and
"'dec'" in exc.value.args[0])
def test_static_matrix_combine_paths():
"""
Check that combined staticmatrixtransform matrices provide the same
transformation as using an intermediate transformation.
This is somewhat of a regression test for #7706
"""
from astropy.coordinates.baseframe import BaseCoordinateFrame
from astropy.coordinates.matrix_utilities import rotation_matrix
class AFrame(BaseCoordinateFrame):
default_representation = r.SphericalRepresentation
default_differential = r.SphericalCosLatDifferential
t1 = t.StaticMatrixTransform(rotation_matrix(30.*u.deg, 'z'),
ICRS, AFrame)
t1.register(frame_transform_graph)
t2 = t.StaticMatrixTransform(rotation_matrix(30.*u.deg, 'z').T,
AFrame, ICRS)
t2.register(frame_transform_graph)
class BFrame(BaseCoordinateFrame):
default_representation = r.SphericalRepresentation
default_differential = r.SphericalCosLatDifferential
t3 = t.StaticMatrixTransform(rotation_matrix(30.*u.deg, 'x'),
ICRS, BFrame)
t3.register(frame_transform_graph)
t4 = t.StaticMatrixTransform(rotation_matrix(30.*u.deg, 'x').T,
BFrame, ICRS)
t4.register(frame_transform_graph)
c = Galactic(123*u.deg, 45*u.deg)
c1 = c.transform_to(BFrame()) # direct
c2 = c.transform_to(AFrame()).transform_to(BFrame()) # thru A
c3 = c.transform_to(ICRS()).transform_to(BFrame()) # thru ICRS
assert quantity_allclose(c1.lon, c2.lon)
assert quantity_allclose(c1.lat, c2.lat)
assert quantity_allclose(c1.lon, c3.lon)
assert quantity_allclose(c1.lat, c3.lat)
for t_ in [t1, t2, t3, t4]:
t_.unregister(frame_transform_graph)
def test_multiple_aliases():
from astropy.coordinates.baseframe import BaseCoordinateFrame
# Define a frame with multiple aliases
class MultipleAliasesFrame(BaseCoordinateFrame):
name = ['alias_1', 'alias_2']
default_representation = r.SphericalRepresentation
def tfun(c, f):
return f.__class__(lon=c.lon, lat=c.lat)
# Register a transform
graph = t.TransformGraph()
_ = t.FunctionTransform(tfun, MultipleAliasesFrame, MultipleAliasesFrame,
register_graph=graph)
# Test that both aliases have been added to the transform graph
assert graph.lookup_name('alias_1') == MultipleAliasesFrame
assert graph.lookup_name('alias_2') == MultipleAliasesFrame
# Test that both aliases appear in the graphviz DOT format output
dotstr = graph.to_dot_graph()
assert '`alias_1`\\n`alias_2`' in dotstr
def test_remove_transform_and_unregister():
def tfun(c, f):
f.__class__(ra=c.ra, dec=c.dec)
# Register transforms
graph = t.TransformGraph()
ftrans1 = t.FunctionTransform(tfun, TCoo1, TCoo1, register_graph=graph)
ftrans2 = t.FunctionTransform(tfun, TCoo2, TCoo2, register_graph=graph)
_ = t.FunctionTransform(tfun, TCoo1, TCoo2, register_graph=graph)
# Confirm that the frames are part of the graph
assert TCoo1 in graph.frame_set
assert TCoo2 in graph.frame_set
# Use all three ways to remove a transform
# Remove the only transform with TCoo2 as the "from" frame
ftrans2.unregister(graph)
# TCoo2 should still be part of the graph because it is the "to" frame of a transform
assert TCoo2 in graph.frame_set
# Remove the remaining transform that involves TCoo2
graph.remove_transform(TCoo1, TCoo2, None)
# Now TCoo2 should not be part of the graph
assert TCoo2 not in graph.frame_set
# Remove the remaining transform that involves TCoo1
graph.remove_transform(None, None, ftrans1)
# Now TCoo1 should not be part of the graph
assert TCoo1 not in graph.frame_set
def test_remove_transform_errors():
def tfun(c, f):
return f.__class__(ra=c.ra, dec=c.dec)
graph = t.TransformGraph()
_ = t.FunctionTransform(tfun, TCoo1, TCoo1, register_graph=graph)
# Test bad calls to remove_transform
with pytest.raises(ValueError):
graph.remove_transform(None, TCoo1, None)
with pytest.raises(ValueError):
graph.remove_transform(TCoo1, None, None)
with pytest.raises(ValueError):
graph.remove_transform(None, None, None)
with pytest.raises(ValueError):
graph.remove_transform(None, None, 1)
with pytest.raises(ValueError):
graph.remove_transform(TCoo1, TCoo1, 1)
def test_impose_finite_difference_dt():
class H1(HCRS):
pass
class H2(HCRS):
pass
class H3(HCRS):
pass
graph = t.TransformGraph()
tfun = lambda c, f: f.__class__(ra=c.ra, dec=c.dec)
# Set up a number of transforms with different time steps
old_dt = 1*u.min
transform1 = t.FunctionTransformWithFiniteDifference(tfun, H1, H1, register_graph=graph,
finite_difference_dt=old_dt)
transform2 = t.FunctionTransformWithFiniteDifference(tfun, H2, H2, register_graph=graph,
finite_difference_dt=old_dt * 2)
transform3 = t.FunctionTransformWithFiniteDifference(tfun, H2, H3, register_graph=graph,
finite_difference_dt=old_dt * 3)
# Check that all of the transforms have the same new time step
new_dt = 1*u.yr
with graph.impose_finite_difference_dt(new_dt):
assert transform1.finite_difference_dt == new_dt
assert transform2.finite_difference_dt == new_dt
assert transform3.finite_difference_dt == new_dt
# Check that all of the original time steps have been restored
assert transform1.finite_difference_dt == old_dt
assert transform2.finite_difference_dt == old_dt * 2
assert transform3.finite_difference_dt == old_dt * 3
@pytest.mark.parametrize("first, second, check",
[((rotation_matrix(30*u.deg), None),
(rotation_matrix(45*u.deg), None),
(rotation_matrix(75*u.deg), None)),
((rotation_matrix(30*u.deg), r.CartesianRepresentation([1, 0, 0])),
(rotation_matrix(45*u.deg), None),
(rotation_matrix(75*u.deg), r.CartesianRepresentation([1/np.sqrt(2), -1/np.sqrt(2), 0]))),
((rotation_matrix(30*u.deg), None),
(rotation_matrix(45*u.deg), r.CartesianRepresentation([0, 0, 1])),
(rotation_matrix(75*u.deg), r.CartesianRepresentation([0, 0, 1]))),
((rotation_matrix(30*u.deg), r.CartesianRepresentation([1, 0, 0])),
(rotation_matrix(45*u.deg), r.CartesianRepresentation([0, 0, 1])),
(rotation_matrix(75*u.deg), r.CartesianRepresentation([1/np.sqrt(2), -1/np.sqrt(2), 1]))),
((rotation_matrix(30*u.deg), r.CartesianRepresentation([1, 2 ,3])),
(None, r.CartesianRepresentation([4, 5, 6])),
(rotation_matrix(30*u.deg), r.CartesianRepresentation([5, 7, 9]))),
((None, r.CartesianRepresentation([1, 2, 3])),
(rotation_matrix(45*u.deg), r.CartesianRepresentation([4, 5, 6])),
(rotation_matrix(45*u.deg), r.CartesianRepresentation([3/np.sqrt(2)+4, 1/np.sqrt(2)+5, 9]))),
((None, r.CartesianRepresentation([1, 2, 3])),
(None, r.CartesianRepresentation([4, 5, 6])),
(None, r.CartesianRepresentation([5, 7, 9]))),
((rotation_matrix(30*u.deg), r.CartesianRepresentation([1, 0, 0])),
(None, None),
(rotation_matrix(30*u.deg), r.CartesianRepresentation([1, 0, 0]))),
((None, None),
(rotation_matrix(45*u.deg), r.CartesianRepresentation([0, 0, 1])),
(rotation_matrix(45*u.deg), r.CartesianRepresentation([0, 0, 1]))),
((None, None),
(None, None),
(None, None))])
def test_combine_affine_params(first, second, check):
result = t._combine_affine_params(first, second)
if check[0] is None:
assert result[0] is None
else:
assert_allclose(result[0], check[0])
if check[1] is None:
assert result[1] is None
else:
assert_allclose(result[1].xyz, check[1].xyz)
|
8620b48d7e88699a17e2958207c1568e47a4f0f4f545a0e9738e9a4aeade81f5 | import pytest
import numpy as np
from astropy.tests.helper import assert_quantity_allclose
from astropy import units as u
from astropy.time import Time
from astropy.coordinates import EarthLocation, SkyCoord, Angle, Distance
from astropy.coordinates.sites import get_builtin_sites
from astropy.utils.data import get_pkg_data_filename
from astropy.constants import c as speed_of_light
from astropy.table import Table
@pytest.mark.parametrize('kind', ['heliocentric', 'barycentric'])
def test_basic(kind):
t0 = Time('2015-1-1')
loc = get_builtin_sites()['example_site']
sc = SkyCoord(0, 0, unit=u.deg, obstime=t0, location=loc)
rvc0 = sc.radial_velocity_correction(kind)
assert rvc0.shape == ()
assert rvc0.unit.is_equivalent(u.km/u.s)
scs = SkyCoord(0, 0, unit=u.deg, obstime=t0 + np.arange(10)*u.day,
location=loc)
rvcs = scs.radial_velocity_correction(kind)
assert rvcs.shape == (10,)
assert rvcs.unit.is_equivalent(u.km/u.s)
test_input_time = Time(2457244.5, format='jd')
# test_input_loc = EarthLocation.of_site('Cerro Paranal')
# to avoid the network hit we just copy here what that yields
test_input_loc = EarthLocation.from_geodetic(lon=-70.403*u.deg,
lat=-24.6252*u.deg,
height=2635*u.m)
def test_helio_iraf():
"""
Compare the heliocentric correction to the IRAF rvcorrect.
`generate_IRAF_input` function is provided to show how the comparison data
was produced
"""
# this is based on running IRAF with the output of `generate_IRAF_input` below
rvcorr_result = """
# RVCORRECT: Observatory parameters for European Southern Observatory: Paranal
# latitude = -24:37.5
# longitude = 70:24.2
# altitude = 2635
## HJD VOBS VHELIO VLSR VDIURNAL VLUNAR VANNUAL VSOLAR
2457244.50120 0.00 -10.36 -20.35 -0.034 -0.001 -10.325 -9.993
2457244.50025 0.00 -14.20 -23.86 -0.115 -0.004 -14.085 -9.656
2457244.50278 0.00 -2.29 -11.75 0.115 0.004 -2.413 -9.459
2457244.50025 0.00 -14.20 -23.86 -0.115 -0.004 -14.085 -9.656
2457244.49929 0.00 -17.41 -26.30 -0.192 -0.006 -17.214 -8.888
2457244.50317 0.00 -17.19 -17.44 0.078 0.001 -17.269 -0.253
2457244.50348 0.00 2.35 -6.21 0.192 0.006 2.156 -8.560
2457244.49959 0.00 2.13 -15.06 -0.078 -0.000 2.211 -17.194
2457244.49929 0.00 -17.41 -26.30 -0.192 -0.006 -17.214 -8.888
2457244.49835 0.00 -19.84 -27.56 -0.259 -0.008 -19.573 -7.721
2457244.50186 0.00 -24.47 -22.16 -0.038 -0.004 -24.433 2.313
2457244.50470 0.00 -11.11 -8.57 0.221 0.005 -11.332 2.534
2457244.50402 0.00 6.90 -0.38 0.259 0.008 6.629 -7.277
2457244.50051 0.00 11.53 -5.78 0.038 0.004 11.489 -17.311
2457244.49768 0.00 -1.84 -19.37 -0.221 -0.004 -1.612 -17.533
2457244.49835 0.00 -19.84 -27.56 -0.259 -0.008 -19.573 -7.721
2457244.49749 0.00 -21.38 -27.59 -0.315 -0.010 -21.056 -6.209
2457244.50109 0.00 -27.69 -22.90 -0.096 -0.006 -27.584 4.785
2457244.50457 0.00 -17.00 -9.30 0.196 0.003 -17.201 7.704
2457244.50532 0.00 2.62 2.97 0.340 0.009 2.276 0.349
2457244.50277 0.00 16.42 4.67 0.228 0.009 16.178 -11.741
2457244.49884 0.00 13.98 -5.48 -0.056 0.002 14.039 -19.463
2457244.49649 0.00 -2.84 -19.84 -0.297 -0.007 -2.533 -17.000
2457244.49749 0.00 -21.38 -27.59 -0.315 -0.010 -21.056 -6.209
2457244.49675 0.00 -21.97 -26.39 -0.357 -0.011 -21.598 -4.419
2457244.50025 0.00 -29.30 -22.47 -0.149 -0.008 -29.146 6.831
2457244.50398 0.00 -21.55 -9.88 0.146 0.001 -21.700 11.670
2457244.50577 0.00 -3.26 4.00 0.356 0.009 -3.623 7.263
2457244.50456 0.00 14.87 11.06 0.357 0.011 14.497 -3.808
2457244.50106 0.00 22.20 7.14 0.149 0.008 22.045 -15.058
2457244.49732 0.00 14.45 -5.44 -0.146 -0.001 14.600 -19.897
2457244.49554 0.00 -3.84 -19.33 -0.356 -0.008 -3.478 -15.491
2457244.49675 0.00 -21.97 -26.39 -0.357 -0.011 -21.598 -4.419
2457244.49615 0.00 -21.57 -24.00 -0.383 -0.012 -21.172 -2.432
2457244.49942 0.00 -29.36 -20.83 -0.193 -0.009 -29.157 8.527
2457244.50312 0.00 -24.26 -9.75 0.088 -0.001 -24.348 14.511
2457244.50552 0.00 -8.66 4.06 0.327 0.007 -8.996 12.721
2457244.50549 0.00 10.14 14.13 0.413 0.012 9.715 3.994
2457244.50305 0.00 23.35 15.76 0.306 0.011 23.031 -7.586
2457244.49933 0.00 24.78 8.18 0.056 0.006 24.721 -16.601
2457244.49609 0.00 13.77 -5.06 -0.221 -0.003 13.994 -18.832
2457244.49483 0.00 -4.53 -17.77 -0.394 -0.010 -4.131 -13.237
2457244.49615 0.00 -21.57 -24.00 -0.383 -0.012 -21.172 -2.432
2457244.49572 0.00 -20.20 -20.54 -0.392 -0.013 -19.799 -0.335
2457244.49907 0.00 -28.17 -17.30 -0.197 -0.009 -27.966 10.874
2457244.50285 0.00 -22.96 -5.96 0.090 -0.001 -23.048 16.995
2457244.50531 0.00 -7.00 8.16 0.335 0.007 -7.345 15.164
2457244.50528 0.00 12.23 18.47 0.423 0.012 11.795 6.238
2457244.50278 0.00 25.74 20.13 0.313 0.012 25.416 -5.607
2457244.49898 0.00 27.21 12.38 0.057 0.006 27.144 -14.829
2457244.49566 0.00 15.94 -1.17 -0.226 -0.003 16.172 -17.111
2457244.49437 0.00 -2.78 -14.17 -0.403 -0.010 -2.368 -11.387
2457244.49572 0.00 -20.20 -20.54 -0.392 -0.013 -19.799 -0.335
2457244.49548 0.00 -17.94 -16.16 -0.383 -0.012 -17.541 1.776
2457244.49875 0.00 -25.73 -12.99 -0.193 -0.009 -25.525 12.734
2457244.50246 0.00 -20.63 -1.91 0.088 -0.001 -20.716 18.719
2457244.50485 0.00 -5.03 11.90 0.327 0.007 -5.365 16.928
2457244.50482 0.00 13.77 21.97 0.413 0.012 13.347 8.202
2457244.50238 0.00 26.98 23.60 0.306 0.011 26.663 -3.378
2457244.49867 0.00 28.41 16.02 0.056 0.005 28.353 -12.393
2457244.49542 0.00 17.40 2.78 -0.221 -0.003 17.625 -14.625
2457244.49416 0.00 -0.90 -9.93 -0.394 -0.010 -0.499 -9.029
2457244.49548 0.00 -17.94 -16.16 -0.383 -0.012 -17.541 1.776
2457244.49544 0.00 -14.87 -11.06 -0.357 -0.011 -14.497 3.808
2457244.49894 0.00 -22.20 -7.14 -0.149 -0.008 -22.045 15.058
2457244.50268 0.00 -14.45 5.44 0.146 0.001 -14.600 19.897
2457244.50446 0.00 3.84 19.33 0.356 0.008 3.478 15.491
2457244.50325 0.00 21.97 26.39 0.357 0.011 21.598 4.419
2457244.49975 0.00 29.30 22.47 0.149 0.008 29.146 -6.831
2457244.49602 0.00 21.55 9.88 -0.146 -0.001 21.700 -11.670
2457244.49423 0.00 3.26 -4.00 -0.356 -0.009 3.623 -7.263
2457244.49544 0.00 -14.87 -11.06 -0.357 -0.011 -14.497 3.808
2457244.49561 0.00 -11.13 -5.46 -0.315 -0.010 -10.805 5.670
2457244.49921 0.00 -17.43 -0.77 -0.096 -0.006 -17.333 16.664
2457244.50269 0.00 -6.75 12.83 0.196 0.003 -6.949 19.583
2457244.50344 0.00 12.88 25.10 0.340 0.009 12.527 12.227
2457244.50089 0.00 26.67 26.80 0.228 0.009 26.430 0.137
2457244.49696 0.00 24.24 16.65 -0.056 0.002 24.290 -7.584
2457244.49461 0.00 7.42 2.29 -0.297 -0.007 7.719 -5.122
2457244.49561 0.00 -11.13 -5.46 -0.315 -0.010 -10.805 5.670
2457244.49598 0.00 -6.90 0.38 -0.259 -0.008 -6.629 7.277
2457244.49949 0.00 -11.53 5.78 -0.038 -0.004 -11.489 17.311
2457244.50232 0.00 1.84 19.37 0.221 0.004 1.612 17.533
2457244.50165 0.00 19.84 27.56 0.259 0.008 19.573 7.721
2457244.49814 0.00 24.47 22.16 0.038 0.004 24.433 -2.313
2457244.49530 0.00 11.11 8.57 -0.221 -0.005 11.332 -2.534
2457244.49598 0.00 -6.90 0.38 -0.259 -0.008 -6.629 7.277
2457244.49652 0.00 -2.35 6.21 -0.192 -0.006 -2.156 8.560
2457244.50041 0.00 -2.13 15.06 0.078 0.000 -2.211 17.194
2457244.50071 0.00 17.41 26.30 0.192 0.006 17.214 8.888
2457244.49683 0.00 17.19 17.44 -0.078 -0.001 17.269 0.253
2457244.49652 0.00 -2.35 6.21 -0.192 -0.006 -2.156 8.560
2457244.49722 0.00 2.29 11.75 -0.115 -0.004 2.413 9.459
2457244.49975 0.00 14.20 23.86 0.115 0.004 14.085 9.656
2457244.49722 0.00 2.29 11.75 -0.115 -0.004 2.413 9.459
2457244.49805 0.00 6.84 16.77 -0.034 -0.001 6.874 9.935
"""
vhs_iraf = []
for line in rvcorr_result.strip().split('\n'):
if not line.strip().startswith('#'):
vhs_iraf.append(float(line.split()[2]))
vhs_iraf = vhs_iraf*u.km/u.s
targets = SkyCoord(_get_test_input_radecs(), obstime=test_input_time,
location=test_input_loc)
vhs_astropy = targets.radial_velocity_correction('heliocentric')
assert_quantity_allclose(vhs_astropy, vhs_iraf, atol=150*u.m/u.s)
return vhs_astropy, vhs_iraf # for interactively examination
def generate_IRAF_input(writefn=None):
dt = test_input_time.utc.datetime
coos = _get_test_input_radecs()
lines = []
for ra, dec in zip(coos.ra, coos.dec):
rastr = Angle(ra).to_string(u.hour, sep=':')
decstr = Angle(dec).to_string(u.deg, sep=':')
msg = '{yr} {mo} {day} {uth}:{utmin} {ra} {dec}'
lines.append(msg.format(yr=dt.year, mo=dt.month, day=dt.day,
uth=dt.hour, utmin=dt.minute,
ra=rastr, dec=decstr))
if writefn:
with open(writefn, 'w') as f:
for l in lines:
f.write(l)
else:
for l in lines:
print(l)
print('Run IRAF as:\nastutil\nrvcorrect f=<filename> observatory=Paranal')
def _get_test_input_radecs():
ras = []
decs = []
for dec in np.linspace(-85, 85, 15):
nra = int(np.round(10*np.cos(dec*u.deg)).value)
ras1 = np.linspace(-180, 180-1e-6, nra)
ras.extend(ras1)
decs.extend([dec]*len(ras1))
return SkyCoord(ra=ras, dec=decs, unit=u.deg)
def test_barycorr():
# this is the result of calling _get_barycorr_bvcs
barycorr_bvcs = u.Quantity([
-10335.93326096, -14198.47605491, -2237.60012494, -14198.47595363,
-17425.46512587, -17131.70901174, 2424.37095076, 2130.61519166,
-17425.46495779, -19872.50026998, -24442.37091097, -11017.08975893,
6978.0622355, 11547.93333743, -1877.34772637, -19872.50004258,
-21430.08240017, -27669.14280689, -16917.08506807, 2729.57222968,
16476.49569232, 13971.97171764, -2898.04250914, -21430.08212368,
-22028.51337105, -29301.92349394, -21481.13036199, -3147.44828909,
14959.50065514, 22232.91155425, 14412.11903105, -3921.56359768,
-22028.51305781, -21641.01479409, -29373.0512649, -24205.90521765,
-8557.34138828, 10250.50350732, 23417.2299926, 24781.98057941,
13706.17339044, -4627.70005932, -21641.01445812, -20284.92627505,
-28193.91696959, -22908.51624166, -6901.82132125, 12336.45758056,
25804.51614607, 27200.50029664, 15871.21385688, -2882.24738355,
-20284.9259314, -18020.92947805, -25752.96564978, -20585.81957567,
-4937.25573801, 13870.58916957, 27037.31568441, 28402.06636994,
17326.25977035, -1007.62209045, -18020.92914212, -14950.33284575,
-22223.74260839, -14402.94943965, 3930.73265119, 22037.68163353,
29311.09265126, 21490.30070307, 3156.62229843, -14950.33253252,
-11210.53846867, -17449.59867676, -6697.54090389, 12949.11642965,
26696.03999586, 24191.5164355, 7321.50355488, -11210.53819218,
-6968.89359681, -11538.76423011, 1886.51695238, 19881.66902396,
24451.54039956, 11026.26000765, -6968.89336945, -2415.20201758,
-2121.44599781, 17434.63406085, 17140.87871753, -2415.2018495,
2246.76923076, 14207.64513054, 2246.76933194, 6808.40787728],
u.m/u.s)
# this tries the *other* way of calling radial_velocity_correction relative
# to the IRAF tests
targets = _get_test_input_radecs()
bvcs_astropy = targets.radial_velocity_correction(obstime=test_input_time,
location=test_input_loc,
kind='barycentric')
assert_quantity_allclose(bvcs_astropy, barycorr_bvcs, atol=10*u.mm/u.s)
return bvcs_astropy, barycorr_bvcs # for interactively examination
def _get_barycorr_bvcs(coos, loc, injupyter=False):
"""
Gets the barycentric correction of the test data from the
http://astroutils.astronomy.ohio-state.edu/exofast/barycorr.html web site.
Requires the https://github.com/tronsgaard/barycorr python interface to that
site.
Provided to reproduce the test data above, but not required to actually run
the tests.
"""
import barycorr
from astropy.utils.console import ProgressBar
bvcs = []
for ra, dec in ProgressBar(list(zip(coos.ra.deg, coos.dec.deg)),
ipython_widget=injupyter):
res = barycorr.bvc(test_input_time.utc.jd, ra, dec,
lat=loc.geodetic[1].deg,
lon=loc.geodetic[0].deg,
elevation=loc.geodetic[2].to(u.m).value)
bvcs.append(res)
return bvcs*u.m/u.s
def test_rvcorr_multiple_obstimes_onskycoord():
loc = EarthLocation(-2309223 * u.m, -3695529 * u.m, -4641767 * u.m)
arrtime = Time('2005-03-21 00:00:00') + np.linspace(-1, 1, 10)*u.day
sc = SkyCoord(1*u.deg, 2*u.deg, 100*u.kpc, obstime=arrtime, location=loc)
rvcbary_sc2 = sc.radial_velocity_correction(kind='barycentric')
assert len(rvcbary_sc2) == 10
# check the multiple-obstime and multi- mode
sc = SkyCoord(([1]*10)*u.deg, 2*u.deg, 100*u.kpc,
obstime=arrtime, location=loc)
rvcbary_sc3 = sc.radial_velocity_correction(kind='barycentric')
assert len(rvcbary_sc3) == 10
def test_invalid_argument_combos():
loc = EarthLocation(-2309223 * u.m, -3695529 * u.m, -4641767 * u.m)
time = Time('2005-03-21 00:00:00')
timel = Time('2005-03-21 00:00:00', location=loc)
scwattrs = SkyCoord(1*u.deg, 2*u.deg, obstime=time, location=loc)
scwoattrs = SkyCoord(1*u.deg, 2*u.deg)
scwattrs.radial_velocity_correction()
with pytest.raises(ValueError):
scwattrs.radial_velocity_correction(obstime=time, location=loc)
with pytest.raises(TypeError):
scwoattrs.radial_velocity_correction(obstime=time)
scwoattrs.radial_velocity_correction(obstime=time, location=loc)
with pytest.raises(TypeError):
scwoattrs.radial_velocity_correction()
with pytest.raises(ValueError):
scwattrs.radial_velocity_correction(timel)
def test_regression_9645():
sc = SkyCoord(10*u.deg, 20*u.deg, distance=5*u.pc, obstime=test_input_time,
pm_ra_cosdec=0*u.mas/u.yr, pm_dec=0*u.mas/u.yr, radial_velocity=0*u.km/u.s)
sc_novel = SkyCoord(10*u.deg, 20*u.deg, distance=5*u.pc, obstime=test_input_time)
corr = sc.radial_velocity_correction(obstime=test_input_time, location=test_input_loc)
corr_novel = sc_novel.radial_velocity_correction(obstime=test_input_time, location=test_input_loc)
assert_quantity_allclose(corr, corr_novel)
def test_barycorr_withvels():
# this is the result of calling _get_barycorr_bvcs_withvels
barycorr_bvcs = u.Quantity(
[-10335.94926581, -14198.49117304, -2237.58656335,
-14198.49078575, -17425.47883864, -17131.72711182,
2424.38466675, 2130.62819093, -17425.47834604,
-19872.51254565, -24442.39064348, -11017.0964353,
6978.07515501, 11547.94831175, -1877.34560543,
-19872.51188308, -21430.0931411, -27669.15919972,
-16917.09482078, 2729.57757823, 16476.5087925,
13971.97955641, -2898.04451551, -21430.09220144,
-22028.52224227, -29301.93613248, -21481.14015151,
-3147.44852058, 14959.50849997, 22232.91906264,
14412.12044201, -3921.56783473, -22028.52088749,
-21641.02117064, -29373.05982792, -24205.91319258,
-8557.34473049, 10250.50560918, 23417.23357219,
24781.98113432, 13706.17025059, -4627.70468688,
-21641.01928189, -20284.92926795, -28193.92117514,
-22908.52127321, -6901.82512637, 12336.45557256,
25804.5137786, 27200.49576347, 15871.20847332,
-2882.25080211, -20284.92696256, -18020.92824383,
-25752.96528309, -20585.82211189, -4937.26088706,
13870.58217495, 27037.30698639, 28402.0571686,
17326.25314311, -1007.62313006, -18020.92552769,
-14950.32653444, -22223.73793506, -14402.95155047,
3930.72325162, 22037.66749783, 29311.07826101,
21490.29193529, 3156.62360741, -14950.32373745,
-11210.52665171, -17449.59068509, -6697.54579192,
12949.09948082, 26696.01956077, 24191.50403015,
7321.50684816, -11210.52389393, -6968.87610888,
-11538.7547047, 1886.50525065, 19881.64366561,
24451.52197666, 11026.26396455, -6968.87351156,
-2415.17899385, -2121.44598968, 17434.60465075,
17140.87204017, -2415.1771038, 2246.79688215,
14207.61339552, 2246.79790276, 6808.43888253], u.m/u.s)
coos = _get_test_input_radecvels()
bvcs_astropy = coos.radial_velocity_correction(obstime=test_input_time,
location=test_input_loc)
assert_quantity_allclose(bvcs_astropy, barycorr_bvcs, atol=10*u.mm/u.s)
return bvcs_astropy, barycorr_bvcs # for interactively examination
def _get_test_input_radecvels():
coos = _get_test_input_radecs()
ras = coos.ra
decs = coos.dec
pmra = np.linspace(-1000, 1000, coos.size)*u.mas/u.yr
pmdec = np.linspace(0, 1000, coos.size)*u.mas/u.yr
rvs = np.linspace(0, 100, coos.size)*u.km/u.s
distance = np.linspace(10, 100, coos.size)*u.pc
return SkyCoord(ras, decs, pm_ra_cosdec=pmra, pm_dec=pmdec,
radial_velocity=rvs, distance=distance,
obstime=test_input_time)
def _get_barycorr_bvcs_withvels(coos, loc, injupyter=False):
"""
Gets the barycentric correction of the test data from the
http://astroutils.astronomy.ohio-state.edu/exofast/barycorr.html web site.
Requires the https://github.com/tronsgaard/barycorr python interface to that
site.
Provided to reproduce the test data above, but not required to actually run
the tests.
"""
import barycorr
from astropy.utils.console import ProgressBar
bvcs = []
for coo in ProgressBar(coos, ipython_widget=injupyter):
res = barycorr.bvc(test_input_time.utc.jd,
coo.ra.deg, coo.dec.deg,
lat=loc.geodetic[1].deg,
lon=loc.geodetic[0].deg,
pmra=coo.pm_ra_cosdec.to_value(u.mas/u.yr),
pmdec=coo.pm_dec.to_value(u.mas/u.yr),
parallax=coo.distance.to_value(u.mas, equivalencies=u.parallax()),
rv=coo.radial_velocity.to_value(u.m/u.s),
epoch=test_input_time.utc.jd,
elevation=loc.geodetic[2].to(u.m).value)
bvcs.append(res)
return bvcs*u.m/u.s
def test_warning_no_obstime_on_skycoord():
c = SkyCoord(l=10*u.degree, b=45*u.degree,
pm_l_cosb=34*u.mas/u.yr, pm_b=-117*u.mas/u.yr,
distance=50*u.pc, frame='galactic')
with pytest.warns(Warning):
c.radial_velocity_correction('barycentric', test_input_time,
test_input_loc)
@pytest.mark.remote_data
def test_regression_10094():
"""
Make sure that when we include the proper motion and radial velocity of
a SkyCoord, our velocity corrections remain close to TEMPO2.
We check that tau Ceti is within 5mm/s
"""
# Wright & Eastman (2014) Table2
# Corrections for tau Ceti
wright_table = Table.read(
get_pkg_data_filename('coordinates/wright_eastmann_2014_tau_ceti.fits')
)
reduced_jds = wright_table['JD-2400000']
tempo2 = wright_table['TEMPO2']
barycorr = wright_table['BARYCORR']
# tau Ceti Hipparchos data
tauCet = SkyCoord('01 44 05.1275 -15 56 22.4006',
unit=(u.hour, u.deg),
pm_ra_cosdec=-1721.05*u.mas/u.yr,
pm_dec=854.16*u.mas/u.yr,
distance=Distance(parallax=273.96*u.mas),
radial_velocity=-16.597*u.km/u.s,
obstime=Time(48348.5625, format='mjd'))
# CTIO location as used in Wright & Eastmann
xyz = u.Quantity([1814985.3, -5213916.8, -3187738.1], u.m)
obs = EarthLocation(*xyz)
times = Time(2400000, reduced_jds, format='jd')
tempo2 = tempo2 * speed_of_light
barycorr = barycorr * speed_of_light
astropy = tauCet.radial_velocity_correction(location=obs, obstime=times)
assert_quantity_allclose(astropy, tempo2, atol=5*u.mm/u.s)
assert_quantity_allclose(astropy, barycorr, atol=5*u.mm/u.s)
|
e43812ff36c827376077179c2ebe5142c23fb06947721249a7375811e8792196 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from astropy import units as u
from astropy.coordinates.builtin_frames import CIRS, ICRS, Galactic, Galactocentric
from astropy.coordinates import builtin_frames as bf
from astropy.coordinates import galactocentric_frame_defaults
from astropy.units import allclose as quantity_allclose
from astropy.coordinates.errors import ConvertError
from astropy.coordinates import representation as r
def test_api():
# transform observed Barycentric velocities to full-space Galactocentric
with galactocentric_frame_defaults.set('latest'):
gc_frame = Galactocentric()
icrs = ICRS(ra=151.*u.deg, dec=-16*u.deg, distance=101*u.pc,
pm_ra_cosdec=21*u.mas/u.yr, pm_dec=-71*u.mas/u.yr,
radial_velocity=71*u.km/u.s)
icrs.transform_to(gc_frame)
# transform a set of ICRS proper motions to Galactic
icrs = ICRS(ra=151.*u.deg, dec=-16*u.deg,
pm_ra_cosdec=21*u.mas/u.yr, pm_dec=-71*u.mas/u.yr)
icrs.transform_to(Galactic())
# transform a Barycentric RV to a GSR RV
icrs = ICRS(ra=151.*u.deg, dec=-16*u.deg, distance=1.*u.pc,
pm_ra_cosdec=0*u.mas/u.yr, pm_dec=0*u.mas/u.yr,
radial_velocity=71*u.km/u.s)
icrs.transform_to(Galactocentric())
all_kwargs = [
dict(ra=37.4*u.deg, dec=-55.8*u.deg),
dict(ra=37.4*u.deg, dec=-55.8*u.deg, distance=150*u.pc),
dict(ra=37.4*u.deg, dec=-55.8*u.deg,
pm_ra_cosdec=-21.2*u.mas/u.yr, pm_dec=17.1*u.mas/u.yr),
dict(ra=37.4*u.deg, dec=-55.8*u.deg, distance=150*u.pc,
pm_ra_cosdec=-21.2*u.mas/u.yr, pm_dec=17.1*u.mas/u.yr),
dict(ra=37.4*u.deg, dec=-55.8*u.deg,
radial_velocity=105.7*u.km/u.s),
dict(ra=37.4*u.deg, dec=-55.8*u.deg, distance=150*u.pc,
radial_velocity=105.7*u.km/u.s),
dict(ra=37.4*u.deg, dec=-55.8*u.deg,
radial_velocity=105.7*u.km/u.s,
pm_ra_cosdec=-21.2*u.mas/u.yr, pm_dec=17.1*u.mas/u.yr),
dict(ra=37.4*u.deg, dec=-55.8*u.deg, distance=150*u.pc,
pm_ra_cosdec=-21.2*u.mas/u.yr, pm_dec=17.1*u.mas/u.yr,
radial_velocity=105.7*u.km/u.s),
# Now test other representation/differential types:
dict(x=100.*u.pc, y=200*u.pc, z=300*u.pc,
representation_type='cartesian'),
dict(x=100.*u.pc, y=200*u.pc, z=300*u.pc,
representation_type=r.CartesianRepresentation),
dict(x=100.*u.pc, y=200*u.pc, z=300*u.pc,
v_x=100.*u.km/u.s, v_y=200*u.km/u.s, v_z=300*u.km/u.s,
representation_type=r.CartesianRepresentation,
differential_type=r.CartesianDifferential),
dict(x=100.*u.pc, y=200*u.pc, z=300*u.pc,
v_x=100.*u.km/u.s, v_y=200*u.km/u.s, v_z=300*u.km/u.s,
representation_type=r.CartesianRepresentation,
differential_type='cartesian'),
]
@pytest.mark.parametrize('kwargs', all_kwargs)
def test_all_arg_options(kwargs):
# Above is a list of all possible valid combinations of arguments.
# Here we do a simple thing and just verify that passing them in, we have
# access to the relevant attributes from the resulting object
icrs = ICRS(**kwargs)
gal = icrs.transform_to(Galactic())
repr_gal = repr(gal)
for k in kwargs:
if k == 'differential_type':
continue
getattr(icrs, k)
if 'pm_ra_cosdec' in kwargs: # should have both
assert 'pm_l_cosb' in repr_gal
assert 'pm_b' in repr_gal
assert 'mas / yr' in repr_gal
if 'radial_velocity' not in kwargs:
assert 'radial_velocity' not in repr_gal
if 'radial_velocity' in kwargs:
assert 'radial_velocity' in repr_gal
assert 'km / s' in repr_gal
if 'pm_ra_cosdec' not in kwargs:
assert 'pm_l_cosb' not in repr_gal
assert 'pm_b' not in repr_gal
@pytest.mark.parametrize('cls,lon,lat', [
[bf.ICRS, 'ra', 'dec'], [bf.FK4, 'ra', 'dec'], [bf.FK4NoETerms, 'ra', 'dec'],
[bf.FK5, 'ra', 'dec'], [bf.GCRS, 'ra', 'dec'], [bf.HCRS, 'ra', 'dec'],
[bf.LSR, 'ra', 'dec'], [bf.CIRS, 'ra', 'dec'], [bf.Galactic, 'l', 'b'],
[bf.AltAz, 'az', 'alt'], [bf.Supergalactic, 'sgl', 'sgb'],
[bf.GalacticLSR, 'l', 'b'], [bf.HeliocentricMeanEcliptic, 'lon', 'lat'],
[bf.GeocentricMeanEcliptic, 'lon', 'lat'],
[bf.BarycentricMeanEcliptic, 'lon', 'lat'],
[bf.PrecessedGeocentric, 'ra', 'dec']
])
def test_expected_arg_names(cls, lon, lat):
kwargs = {lon: 37.4*u.deg, lat: -55.8*u.deg, 'distance': 150*u.pc,
f'pm_{lon}_cos{lat}': -21.2*u.mas/u.yr,
f'pm_{lat}': 17.1*u.mas/u.yr,
'radial_velocity': 105.7*u.km/u.s}
frame = cls(**kwargs)
# these data are extracted from the vizier copy of XHIP:
# http://vizier.u-strasbg.fr/viz-bin/VizieR-3?-source=+V/137A/XHIP
_xhip_head = """
------ ------------ ------------ -------- -------- ------------ ------------ ------- -------- -------- ------- ------ ------ ------
R D pmRA pmDE Di pmGLon pmGLat RV U V W
HIP AJ2000 (deg) EJ2000 (deg) (mas/yr) (mas/yr) GLon (deg) GLat (deg) st (pc) (mas/yr) (mas/yr) (km/s) (km/s) (km/s) (km/s)
------ ------------ ------------ -------- -------- ------------ ------------ ------- -------- -------- ------- ------ ------ ------
"""[1:-1]
_xhip_data = """
19 000.05331690 +38.30408633 -3.17 -15.37 112.00026470 -23.47789171 247.12 -6.40 -14.33 6.30 7.3 2.0 -17.9
20 000.06295067 +23.52928427 36.11 -22.48 108.02779304 -37.85659811 95.90 29.35 -30.78 37.80 -19.3 16.1 -34.2
21 000.06623581 +08.00723430 61.48 -0.23 101.69697120 -52.74179515 183.68 58.06 -20.23 -11.72 -45.2 -30.9 -1.3
24917 080.09698238 -33.39874984 -4.30 13.40 236.92324669 -32.58047131 107.38 -14.03 -1.15 36.10 -22.4 -21.3 -19.9
59207 182.13915108 +65.34963517 18.17 5.49 130.04157185 51.18258601 56.00 -18.98 -0.49 5.70 1.5 6.1 4.4
87992 269.60730667 +36.87462906 -89.58 72.46 62.98053142 25.90148234 129.60 45.64 105.79 -4.00 -39.5 -15.8 56.7
115110 349.72322473 -28.74087144 48.86 -9.25 23.00447250 -69.52799804 116.87 -8.37 -49.02 15.00 -16.8 -12.2 -23.6
"""[1:-1]
# in principal we could parse the above as a table, but doing it "manually"
# makes this test less tied to Table working correctly
@pytest.mark.parametrize('hip,ra,dec,pmra,pmdec,glon,glat,dist,pmglon,pmglat,rv,U,V,W',
[[float(val) for val in row.split()] for row in _xhip_data.split('\n')])
def test_xhip_galactic(hip, ra, dec, pmra, pmdec, glon, glat, dist, pmglon, pmglat, rv, U, V, W):
i = ICRS(ra*u.deg, dec*u.deg, dist*u.pc,
pm_ra_cosdec=pmra*u.marcsec/u.yr, pm_dec=pmdec*u.marcsec/u.yr,
radial_velocity=rv*u.km/u.s)
g = i.transform_to(Galactic())
# precision is limited by 2-deciimal digit string representation of pms
assert quantity_allclose(g.pm_l_cosb, pmglon*u.marcsec/u.yr, atol=.01*u.marcsec/u.yr)
assert quantity_allclose(g.pm_b, pmglat*u.marcsec/u.yr, atol=.01*u.marcsec/u.yr)
# make sure UVW also makes sense
uvwg = g.cartesian.differentials['s']
# precision is limited by 1-decimal digit string representation of vels
assert quantity_allclose(uvwg.d_x, U*u.km/u.s, atol=.1*u.km/u.s)
assert quantity_allclose(uvwg.d_y, V*u.km/u.s, atol=.1*u.km/u.s)
assert quantity_allclose(uvwg.d_z, W*u.km/u.s, atol=.1*u.km/u.s)
@pytest.mark.parametrize('kwargs,expect_success', [
[dict(ra=37.4*u.deg, dec=-55.8*u.deg), False],
[dict(ra=37.4*u.deg, dec=-55.8*u.deg, distance=150*u.pc), True],
[dict(ra=37.4*u.deg, dec=-55.8*u.deg,
pm_ra_cosdec=-21.2*u.mas/u.yr, pm_dec=17.1*u.mas/u.yr), False],
[dict(ra=37.4*u.deg, dec=-55.8*u.deg, radial_velocity=105.7*u.km/u.s), False],
[dict(ra=37.4*u.deg, dec=-55.8*u.deg, distance=150*u.pc,
radial_velocity=105.7*u.km/u.s), False],
[dict(ra=37.4*u.deg, dec=-55.8*u.deg,
radial_velocity=105.7*u.km/u.s,
pm_ra_cosdec=-21.2*u.mas/u.yr, pm_dec=17.1*u.mas/u.yr), False],
[dict(ra=37.4*u.deg, dec=-55.8*u.deg, distance=150*u.pc,
pm_ra_cosdec=-21.2*u.mas/u.yr, pm_dec=17.1*u.mas/u.yr,
radial_velocity=105.7*u.km/u.s), True]
])
def test_frame_affinetransform(kwargs, expect_success):
"""There are already tests in test_transformations.py that check that
an AffineTransform fails without full-space data, but this just checks that
things work as expected at the frame level as well.
"""
with galactocentric_frame_defaults.set('latest'):
icrs = ICRS(**kwargs)
if expect_success:
_ = icrs.transform_to(Galactocentric())
else:
with pytest.raises(ConvertError):
icrs.transform_to(Galactocentric())
def test_differential_type_arg():
"""
Test passing in an explicit differential class to the initializer or
changing the differential class via set_representation_cls
"""
from astropy.coordinates.builtin_frames import ICRS
icrs = ICRS(ra=1*u.deg, dec=60*u.deg,
pm_ra=10*u.mas/u.yr, pm_dec=-11*u.mas/u.yr,
differential_type=r.UnitSphericalDifferential)
assert icrs.pm_ra == 10*u.mas/u.yr
icrs = ICRS(ra=1*u.deg, dec=60*u.deg,
pm_ra=10*u.mas/u.yr, pm_dec=-11*u.mas/u.yr,
differential_type={'s': r.UnitSphericalDifferential})
assert icrs.pm_ra == 10*u.mas/u.yr
icrs = ICRS(ra=1*u.deg, dec=60*u.deg,
pm_ra_cosdec=10*u.mas/u.yr, pm_dec=-11*u.mas/u.yr)
icrs.set_representation_cls(s=r.UnitSphericalDifferential)
assert quantity_allclose(icrs.pm_ra, 20*u.mas/u.yr)
# incompatible representation and differential
with pytest.raises(TypeError):
ICRS(ra=1*u.deg, dec=60*u.deg,
v_x=1*u.km/u.s, v_y=-2*u.km/u.s, v_z=-2*u.km/u.s,
differential_type=r.CartesianDifferential)
# specify both
icrs = ICRS(x=1*u.pc, y=2*u.pc, z=3*u.pc,
v_x=1*u.km/u.s, v_y=2*u.km/u.s, v_z=3*u.km/u.s,
representation_type=r.CartesianRepresentation,
differential_type=r.CartesianDifferential)
assert icrs.x == 1*u.pc
assert icrs.y == 2*u.pc
assert icrs.z == 3*u.pc
assert icrs.v_x == 1*u.km/u.s
assert icrs.v_y == 2*u.km/u.s
assert icrs.v_z == 3*u.km/u.s
def test_slicing_preserves_differential():
icrs = ICRS(ra=37.4*u.deg, dec=-55.8*u.deg, distance=150*u.pc,
pm_ra_cosdec=-21.2*u.mas/u.yr, pm_dec=17.1*u.mas/u.yr,
radial_velocity=105.7*u.km/u.s)
icrs2 = icrs.reshape(1,1)[:1,0]
for name in icrs.representation_component_names.keys():
assert getattr(icrs, name) == getattr(icrs2, name)[0]
for name in icrs.get_representation_component_names('s').keys():
assert getattr(icrs, name) == getattr(icrs2, name)[0]
def test_shorthand_attributes():
# Check that attribute access works
# for array data:
n = 4
icrs1 = ICRS(ra=np.random.uniform(0, 360, n)*u.deg,
dec=np.random.uniform(-90, 90, n)*u.deg,
distance=100*u.pc,
pm_ra_cosdec=np.random.normal(0, 100, n)*u.mas/u.yr,
pm_dec=np.random.normal(0, 100, n)*u.mas/u.yr,
radial_velocity=np.random.normal(0, 100, n)*u.km/u.s)
v = icrs1.velocity
pm = icrs1.proper_motion
assert quantity_allclose(pm[0], icrs1.pm_ra_cosdec)
assert quantity_allclose(pm[1], icrs1.pm_dec)
# for scalar data:
icrs2 = ICRS(ra=37.4*u.deg, dec=-55.8*u.deg, distance=150*u.pc,
pm_ra_cosdec=-21.2*u.mas/u.yr, pm_dec=17.1*u.mas/u.yr,
radial_velocity=105.7*u.km/u.s)
v = icrs2.velocity
pm = icrs2.proper_motion
assert quantity_allclose(pm[0], icrs2.pm_ra_cosdec)
assert quantity_allclose(pm[1], icrs2.pm_dec)
# check that it fails where we expect:
# no distance
rv = 105.7*u.km/u.s
icrs3 = ICRS(ra=37.4*u.deg, dec=-55.8*u.deg,
pm_ra_cosdec=-21.2*u.mas/u.yr, pm_dec=17.1*u.mas/u.yr,
radial_velocity=rv)
with pytest.raises(ValueError):
icrs3.velocity
icrs3.set_representation_cls('cartesian')
assert hasattr(icrs3, 'radial_velocity')
assert quantity_allclose(icrs3.radial_velocity, rv)
icrs4 = ICRS(x=30*u.pc, y=20*u.pc, z=11*u.pc,
v_x=10*u.km/u.s, v_y=10*u.km/u.s, v_z=10*u.km/u.s,
representation_type=r.CartesianRepresentation,
differential_type=r.CartesianDifferential)
icrs4.radial_velocity
def test_negative_distance():
""" Regression test: #7408
Make sure that negative parallaxes turned into distances are handled right
"""
RA = 150 * u.deg
DEC = -11*u.deg
c = ICRS(ra=RA, dec=DEC,
distance=(-10*u.mas).to(u.pc, u.parallax()),
pm_ra_cosdec=10*u.mas/u.yr,
pm_dec=10*u.mas/u.yr)
assert quantity_allclose(c.ra, RA)
assert quantity_allclose(c.dec, DEC)
c = ICRS(ra=RA, dec=DEC,
distance=(-10*u.mas).to(u.pc, u.parallax()))
assert quantity_allclose(c.ra, RA)
assert quantity_allclose(c.dec, DEC)
def test_velocity_units():
"""Check that the differential data given has compatible units
with the time-derivative of representation data"""
msg = ('x has unit "" with physical type "dimensionless", but v_x has '
'incompatible unit "" with physical type "dimensionless" instead '
r'of the expected "frequency"\.')
with pytest.raises(ValueError, match=msg):
c = ICRS(
x=1, y=2, z=3,
v_x=1, v_y=2, v_z=3,
representation_type=r.CartesianRepresentation,
differential_type=r.CartesianDifferential)
def test_frame_with_velocity_without_distance_can_be_transformed():
frame = CIRS(1*u.deg, 2*u.deg, pm_dec=1*u.mas/u.yr, pm_ra_cosdec=2*u.mas/u.yr)
rep = frame.transform_to(ICRS())
assert "<ICRS Coordinate: (ra, dec, distance) in" in repr(rep)
|
0c16a38ecc3f62725f65f1cade6ef79292b079b2bfd284db5d47b2f4a7d822d7 | """
Tests the Angle string formatting capabilities. SkyCoord formatting is in
test_sky_coord
"""
from astropy.coordinates.angles import Angle
from astropy import units as u
def test_to_string_precision():
# There are already some tests in test_api.py, but this is a regression
# test for the bug in issue #1319 which caused incorrect formatting of the
# seconds for precision=0
angle = Angle(-1.23456789, unit=u.degree)
assert angle.to_string(precision=3) == '-1d14m04.444s'
assert angle.to_string(precision=1) == '-1d14m04.4s'
assert angle.to_string(precision=0) == '-1d14m04s'
angle2 = Angle(-1.23456789, unit=u.hourangle)
assert angle2.to_string(precision=3, unit=u.hour) == '-1h14m04.444s'
assert angle2.to_string(precision=1, unit=u.hour) == '-1h14m04.4s'
assert angle2.to_string(precision=0, unit=u.hour) == '-1h14m04s'
# Regression test for #7141
angle3 = Angle(-0.5, unit=u.degree)
assert angle3.to_string(precision=0, fields=3) == '-0d30m00s'
assert angle3.to_string(precision=0, fields=2) == '-0d30m'
assert angle3.to_string(precision=0, fields=1) == '-1d'
def test_to_string_decimal():
# There are already some tests in test_api.py, but this is a regression
# test for the bug in issue #1323 which caused decimal formatting to not
# work
angle1 = Angle(2., unit=u.degree)
assert angle1.to_string(decimal=True, precision=3) == '2.000'
assert angle1.to_string(decimal=True, precision=1) == '2.0'
assert angle1.to_string(decimal=True, precision=0) == '2'
angle2 = Angle(3., unit=u.hourangle)
assert angle2.to_string(decimal=True, precision=3) == '3.000'
assert angle2.to_string(decimal=True, precision=1) == '3.0'
assert angle2.to_string(decimal=True, precision=0) == '3'
angle3 = Angle(4., unit=u.radian)
assert angle3.to_string(decimal=True, precision=3) == '4.000'
assert angle3.to_string(decimal=True, precision=1) == '4.0'
assert angle3.to_string(decimal=True, precision=0) == '4'
def test_to_string_formats():
a = Angle(1.113355, unit=u.deg)
latex_str = r'$1^\circ06{}^\prime48.078{}^{\prime\prime}$'
assert a.to_string(format='latex') == latex_str
assert a.to_string(format='latex_inline') == latex_str
assert a.to_string(format='unicode') == '1°06′48.078″'
a = Angle(1.113355, unit=u.hour)
latex_str = r'$1^{\mathrm{h}}06^{\mathrm{m}}48.078^{\mathrm{s}}$'
assert a.to_string(format='latex') == latex_str
assert a.to_string(format='latex_inline') == latex_str
assert a.to_string(format='unicode') == '1ʰ06ᵐ48.078ˢ'
a = Angle(1.113355, unit=u.radian)
assert a.to_string(format='latex') == r'$1.11336\mathrm{rad}$'
assert a.to_string(format='latex_inline') == r'$1.11336\mathrm{rad}$'
assert a.to_string(format='unicode') == '1.11336rad'
def test_to_string_fields():
a = Angle(1.113355, unit=u.deg)
assert a.to_string(fields=1) == r'1d'
assert a.to_string(fields=2) == r'1d07m'
assert a.to_string(fields=3) == r'1d06m48.078s'
def test_to_string_padding():
a = Angle(0.5653, unit=u.deg)
assert a.to_string(unit='deg', sep=':', pad=True) == r'00:33:55.08'
# Test to make sure negative angles are padded correctly
a = Angle(-0.5653, unit=u.deg)
assert a.to_string(unit='deg', sep=':', pad=True) == r'-00:33:55.08'
def test_sexagesimal_rounding_up():
a = Angle(359.999999999999, unit=u.deg)
assert a.to_string(precision=None) == '360d00m00s'
assert a.to_string(precision=4) == '360d00m00.0000s'
assert a.to_string(precision=5) == '360d00m00.00000s'
assert a.to_string(precision=6) == '360d00m00.000000s'
assert a.to_string(precision=7) == '360d00m00.0000000s'
assert a.to_string(precision=8) == '360d00m00.00000000s'
assert a.to_string(precision=9) == '359d59m59.999999996s'
a = Angle(3.999999, unit=u.deg)
assert a.to_string(fields=2, precision=None) == '4d00m'
assert a.to_string(fields=2, precision=1) == '4d00m'
assert a.to_string(fields=2, precision=5) == '4d00m'
assert a.to_string(fields=1, precision=1) == '4d'
assert a.to_string(fields=1, precision=5) == '4d'
def test_to_string_scalar():
a = Angle(1.113355, unit=u.deg)
assert isinstance(a.to_string(), str)
def test_to_string_radian_with_precision():
"""
Regression test for a bug that caused ``to_string`` to crash for angles in
radians when specifying the precision.
"""
# Check that specifying the precision works
a = Angle(3., unit=u.rad)
assert a.to_string(precision=3, sep='fromunit') == '3.000rad'
def test_sexagesimal_round_down():
a1 = Angle(1, u.deg).to(u.hourangle)
a2 = Angle(2, u.deg)
assert a1.to_string() == '0h04m00s'
assert a2.to_string() == '2d00m00s'
def test_to_string_fields_colon():
a = Angle(1.113355, unit=u.deg)
assert a.to_string(fields=2, sep=':') == '1:07'
assert a.to_string(fields=3, sep=':') == '1:06:48.078'
assert a.to_string(fields=1, sep=':') == '1'
|
90347bdb4932246b79c7bc42b63dfed3fc42d87bbc81edf905a200c373993887 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from astropy import units as u
from astropy.coordinates.distances import Distance
from astropy.coordinates.builtin_frames import ICRS, FK5, Galactic, AltAz, SkyOffsetFrame
from astropy.coordinates import SkyCoord, EarthLocation
from astropy.time import Time
from astropy.tests.helper import assert_quantity_allclose as assert_allclose
def test_altaz_attribute_transforms():
"""Test transforms between AltAz frames with different attributes."""
el1 = EarthLocation(0*u.deg, 0*u.deg, 0*u.m)
origin1 = AltAz(0 * u.deg, 0*u.deg, obstime=Time("2000-01-01T12:00:00"),
location=el1)
frame1 = SkyOffsetFrame(origin=origin1)
coo1 = SkyCoord(1 * u.deg, 1 * u.deg, frame=frame1)
el2 = EarthLocation(0*u.deg, 0*u.deg, 0*u.m)
origin2 = AltAz(0 * u.deg, 0*u.deg, obstime=Time("2000-01-01T11:00:00"),
location=el2)
frame2 = SkyOffsetFrame(origin=origin2)
coo2 = coo1.transform_to(frame2)
coo2_expected = [1.22522446, 0.70624298] * u.deg
assert_allclose([coo2.lon.wrap_at(180*u.deg), coo2.lat],
coo2_expected, atol=convert_precision)
el3 = EarthLocation(0*u.deg, 90*u.deg, 0*u.m)
origin3 = AltAz(0 * u.deg, 90*u.deg, obstime=Time("2000-01-01T12:00:00"),
location=el3)
frame3 = SkyOffsetFrame(origin=origin3)
coo3 = coo2.transform_to(frame3)
assert_allclose([coo3.lon.wrap_at(180*u.deg), coo3.lat],
[1*u.deg, 1*u.deg], atol=convert_precision)
@pytest.mark.parametrize("inradec,expectedlatlon, tolsep", [
((45, 45)*u.deg, (0, 0)*u.deg, .001*u.arcsec),
((45, 0)*u.deg, (0, -45)*u.deg, .001*u.arcsec),
((45, 90)*u.deg, (0, 45)*u.deg, .001*u.arcsec),
((46, 45)*u.deg, (1*np.cos(45*u.deg), 0)*u.deg, 16*u.arcsec),
])
def test_skyoffset(inradec, expectedlatlon, tolsep, originradec=(45, 45)*u.deg):
origin = ICRS(*originradec)
skyoffset_frame = SkyOffsetFrame(origin=origin)
skycoord = SkyCoord(*inradec, frame=ICRS)
skycoord_inaf = skycoord.transform_to(skyoffset_frame)
assert hasattr(skycoord_inaf, 'lon')
assert hasattr(skycoord_inaf, 'lat')
expected = SkyCoord(*expectedlatlon, frame=skyoffset_frame)
assert skycoord_inaf.separation(expected) < tolsep
# Check we can also transform back (regression test for gh-11254).
roundtrip = skycoord_inaf.transform_to(ICRS())
assert roundtrip.separation(skycoord) < 1*u.uas
def test_skyoffset_functional_ra():
# we do the 12)[1:-1] business because sometimes machine precision issues
# lead to results that are either ~0 or ~360, which mucks up the final
# comparison and leads to spurious failures. So this just avoids that by
# staying away from the edges
input_ra = np.linspace(0, 360, 12)[1:-1]
input_dec = np.linspace(-90, 90, 12)[1:-1]
icrs_coord = ICRS(ra=input_ra*u.deg,
dec=input_dec*u.deg,
distance=1.*u.kpc)
for ra in np.linspace(0, 360, 24):
# expected rotation
expected = ICRS(ra=np.linspace(0-ra, 360-ra, 12)[1:-1]*u.deg,
dec=np.linspace(-90, 90, 12)[1:-1]*u.deg,
distance=1.*u.kpc)
expected_xyz = expected.cartesian.xyz
# actual transformation to the frame
skyoffset_frame = SkyOffsetFrame(origin=ICRS(ra*u.deg, 0*u.deg))
actual = icrs_coord.transform_to(skyoffset_frame)
actual_xyz = actual.cartesian.xyz
# back to ICRS
roundtrip = actual.transform_to(ICRS())
roundtrip_xyz = roundtrip.cartesian.xyz
# Verify
assert_allclose(actual_xyz, expected_xyz, atol=1E-5*u.kpc)
assert_allclose(icrs_coord.ra, roundtrip.ra, atol=1E-5*u.deg)
assert_allclose(icrs_coord.dec, roundtrip.dec, atol=1E-5*u.deg)
assert_allclose(icrs_coord.distance, roundtrip.distance, atol=1E-5*u.kpc)
def test_skyoffset_functional_dec():
# we do the 12)[1:-1] business because sometimes machine precision issues
# lead to results that are either ~0 or ~360, which mucks up the final
# comparison and leads to spurious failures. So this just avoids that by
# staying away from the edges
input_ra = np.linspace(0, 360, 12)[1:-1]
input_dec = np.linspace(-90, 90, 12)[1:-1]
input_ra_rad = np.deg2rad(input_ra)
input_dec_rad = np.deg2rad(input_dec)
icrs_coord = ICRS(ra=input_ra*u.deg,
dec=input_dec*u.deg,
distance=1.*u.kpc)
# Dec rotations
# Done in xyz space because dec must be [-90,90]
for dec in np.linspace(-90, 90, 13):
# expected rotation
dec_rad = -np.deg2rad(dec)
expected_x = (-np.sin(input_dec_rad) * np.sin(dec_rad) +
np.cos(input_ra_rad) * np.cos(input_dec_rad) * np.cos(dec_rad))
expected_y = (np.sin(input_ra_rad) * np.cos(input_dec_rad))
expected_z = (np.sin(input_dec_rad) * np.cos(dec_rad) +
np.sin(dec_rad) * np.cos(input_ra_rad) * np.cos(input_dec_rad))
expected = SkyCoord(x=expected_x,
y=expected_y,
z=expected_z, unit='kpc', representation_type='cartesian')
expected_xyz = expected.cartesian.xyz
# actual transformation to the frame
skyoffset_frame = SkyOffsetFrame(origin=ICRS(0*u.deg, dec*u.deg))
actual = icrs_coord.transform_to(skyoffset_frame)
actual_xyz = actual.cartesian.xyz
# back to ICRS
roundtrip = actual.transform_to(ICRS())
# Verify
assert_allclose(actual_xyz, expected_xyz, atol=1E-5*u.kpc)
assert_allclose(icrs_coord.ra, roundtrip.ra, atol=1E-5*u.deg)
assert_allclose(icrs_coord.dec, roundtrip.dec, atol=1E-5*u.deg)
assert_allclose(icrs_coord.distance, roundtrip.distance, atol=1E-5*u.kpc)
def test_skyoffset_functional_ra_dec():
# we do the 12)[1:-1] business because sometimes machine precision issues
# lead to results that are either ~0 or ~360, which mucks up the final
# comparison and leads to spurious failures. So this just avoids that by
# staying away from the edges
input_ra = np.linspace(0, 360, 12)[1:-1]
input_dec = np.linspace(-90, 90, 12)[1:-1]
input_ra_rad = np.deg2rad(input_ra)
input_dec_rad = np.deg2rad(input_dec)
icrs_coord = ICRS(ra=input_ra*u.deg,
dec=input_dec*u.deg,
distance=1.*u.kpc)
for ra in np.linspace(0, 360, 10):
for dec in np.linspace(-90, 90, 5):
# expected rotation
dec_rad = -np.deg2rad(dec)
ra_rad = np.deg2rad(ra)
expected_x = (-np.sin(input_dec_rad) * np.sin(dec_rad) +
np.cos(input_ra_rad) * np.cos(input_dec_rad) * np.cos(dec_rad) * np.cos(ra_rad) +
np.sin(input_ra_rad) * np.cos(input_dec_rad) * np.cos(dec_rad) * np.sin(ra_rad))
expected_y = (np.sin(input_ra_rad) * np.cos(input_dec_rad) * np.cos(ra_rad) -
np.cos(input_ra_rad) * np.cos(input_dec_rad) * np.sin(ra_rad))
expected_z = (np.sin(input_dec_rad) * np.cos(dec_rad) +
np.sin(dec_rad) * np.cos(ra_rad) * np.cos(input_ra_rad) * np.cos(input_dec_rad) +
np.sin(dec_rad) * np.sin(ra_rad) * np.sin(input_ra_rad) * np.cos(input_dec_rad))
expected = SkyCoord(x=expected_x,
y=expected_y,
z=expected_z, unit='kpc', representation_type='cartesian')
expected_xyz = expected.cartesian.xyz
# actual transformation to the frame
skyoffset_frame = SkyOffsetFrame(origin=ICRS(ra*u.deg, dec*u.deg))
actual = icrs_coord.transform_to(skyoffset_frame)
actual_xyz = actual.cartesian.xyz
# back to ICRS
roundtrip = actual.transform_to(ICRS())
# Verify
assert_allclose(actual_xyz, expected_xyz, atol=1E-5*u.kpc)
assert_allclose(icrs_coord.ra, roundtrip.ra, atol=1E-4*u.deg)
assert_allclose(icrs_coord.dec, roundtrip.dec, atol=1E-5*u.deg)
assert_allclose(icrs_coord.distance, roundtrip.distance, atol=1E-5*u.kpc)
def test_skycoord_skyoffset_frame():
m31 = SkyCoord(10.6847083, 41.26875, frame='icrs', unit=u.deg)
m33 = SkyCoord(23.4621, 30.6599417, frame='icrs', unit=u.deg)
m31_astro = m31.skyoffset_frame()
m31_in_m31 = m31.transform_to(m31_astro)
m33_in_m31 = m33.transform_to(m31_astro)
assert_allclose([m31_in_m31.lon, m31_in_m31.lat], [0, 0]*u.deg, atol=1e-10*u.deg)
assert_allclose([m33_in_m31.lon, m33_in_m31.lat], [11.13135175, -9.79084759]*u.deg)
assert_allclose(m33.separation(m31),
np.hypot(m33_in_m31.lon, m33_in_m31.lat),
atol=.1*u.deg)
# used below in the next parametrized test
m31_sys = [ICRS, FK5, Galactic]
m31_coo = [(10.6847929, 41.2690650), (10.6847929, 41.2690650), (121.1744050, -21.5729360)]
m31_dist = Distance(770, u.kpc)
convert_precision = 1 * u.arcsec
roundtrip_precision = 1e-4 * u.degree
dist_precision = 1e-9 * u.kpc
m31_params = []
for i in range(len(m31_sys)):
for j in range(len(m31_sys)):
if i < j:
m31_params.append((m31_sys[i], m31_sys[j], m31_coo[i], m31_coo[j]))
@pytest.mark.parametrize(('fromsys', 'tosys', 'fromcoo', 'tocoo'), m31_params)
def test_m31_coord_transforms(fromsys, tosys, fromcoo, tocoo):
"""
This tests a variety of coordinate conversions for the Chandra point-source
catalog location of M31 from NED, via SkyOffsetFrames
"""
from_origin = fromsys(fromcoo[0]*u.deg, fromcoo[1]*u.deg,
distance=m31_dist)
from_pos = SkyOffsetFrame(1*u.deg, 1*u.deg, origin=from_origin)
to_origin = tosys(tocoo[0]*u.deg, tocoo[1]*u.deg, distance=m31_dist)
to_astroframe = SkyOffsetFrame(origin=to_origin)
target_pos = from_pos.transform_to(to_astroframe)
assert_allclose(to_origin.separation(target_pos),
np.hypot(from_pos.lon, from_pos.lat),
atol=convert_precision)
roundtrip_pos = target_pos.transform_to(from_pos)
assert_allclose([roundtrip_pos.lon.wrap_at(180*u.deg), roundtrip_pos.lat],
[1.0*u.deg, 1.0*u.deg], atol=convert_precision)
@pytest.mark.parametrize("rotation, expectedlatlon", [
(0*u.deg, [0, 1]*u.deg),
(180*u.deg, [0, -1]*u.deg),
(90*u.deg, [-1, 0]*u.deg),
(-90*u.deg, [1, 0]*u.deg)
])
def test_rotation(rotation, expectedlatlon):
origin = ICRS(45*u.deg, 45*u.deg)
target = ICRS(45*u.deg, 46*u.deg)
aframe = SkyOffsetFrame(origin=origin, rotation=rotation)
trans = target.transform_to(aframe)
assert_allclose([trans.lon.wrap_at(180*u.deg), trans.lat],
expectedlatlon, atol=1e-10*u.deg)
@pytest.mark.parametrize("rotation, expectedlatlon", [
(0*u.deg, [0, 1]*u.deg),
(180*u.deg, [0, -1]*u.deg),
(90*u.deg, [-1, 0]*u.deg),
(-90*u.deg, [1, 0]*u.deg)
])
def test_skycoord_skyoffset_frame_rotation(rotation, expectedlatlon):
"""Test if passing a rotation argument via SkyCoord works"""
origin = SkyCoord(45*u.deg, 45*u.deg)
target = SkyCoord(45*u.deg, 46*u.deg)
aframe = origin.skyoffset_frame(rotation=rotation)
trans = target.transform_to(aframe)
assert_allclose([trans.lon.wrap_at(180*u.deg), trans.lat],
expectedlatlon, atol=1e-10*u.deg)
def test_skyoffset_names():
origin1 = ICRS(45*u.deg, 45*u.deg)
aframe1 = SkyOffsetFrame(origin=origin1)
assert type(aframe1).__name__ == 'SkyOffsetICRS'
origin2 = Galactic(45*u.deg, 45*u.deg)
aframe2 = SkyOffsetFrame(origin=origin2)
assert type(aframe2).__name__ == 'SkyOffsetGalactic'
def test_skyoffset_origindata():
origin = ICRS()
with pytest.raises(ValueError):
SkyOffsetFrame(origin=origin)
def test_skyoffset_lonwrap():
origin = ICRS(45*u.deg, 45*u.deg)
sc = SkyCoord(190*u.deg, -45*u.deg, frame=SkyOffsetFrame(origin=origin))
assert sc.lon < 180 * u.deg
sc2 = SkyCoord(-10*u.deg, -45*u.deg, frame=SkyOffsetFrame(origin=origin))
assert sc2.lon < 180 * u.deg
sc3 = sc.realize_frame(sc.represent_as('cartesian'))
assert sc3.lon < 180 * u.deg
sc4 = sc2.realize_frame(sc2.represent_as('cartesian'))
assert sc4.lon < 180 * u.deg
def test_skyoffset_velocity():
c = ICRS(ra=170.9*u.deg, dec=-78.4*u.deg,
pm_ra_cosdec=74.4134*u.mas/u.yr,
pm_dec=-93.2342*u.mas/u.yr)
skyoffset_frame = SkyOffsetFrame(origin=c)
c_skyoffset = c.transform_to(skyoffset_frame)
assert_allclose(c_skyoffset.pm_lon_coslat, c.pm_ra_cosdec)
assert_allclose(c_skyoffset.pm_lat, c.pm_dec)
@pytest.mark.parametrize("rotation, expectedpmlonlat", [
(0*u.deg, [1, 2]*u.mas/u.yr),
(45*u.deg, [-2**-0.5, 3*2**-0.5]*u.mas/u.yr),
(90*u.deg, [-2, 1]*u.mas/u.yr),
(180*u.deg, [-1, -2]*u.mas/u.yr),
(-90*u.deg, [2, -1]*u.mas/u.yr)
])
def test_skyoffset_velocity_rotation(rotation, expectedpmlonlat):
sc = SkyCoord(ra=170.9*u.deg, dec=-78.4*u.deg,
pm_ra_cosdec=1*u.mas/u.yr,
pm_dec=2*u.mas/u.yr)
c_skyoffset0 = sc.transform_to(sc.skyoffset_frame(rotation=rotation))
assert_allclose(c_skyoffset0.pm_lon_coslat, expectedpmlonlat[0])
assert_allclose(c_skyoffset0.pm_lat, expectedpmlonlat[1])
def test_skyoffset_two_frames_interfering():
"""Regression test for gh-11277, where it turned out that the
origin argument validation from one SkyOffsetFrame could interfere
with that of another.
Note that this example brought out a different bug than that at the
top of gh-11277, viz., that an attempt was made to set origin on a SkyCoord
when it should just be stay as part of the SkyOffsetFrame.
"""
# Example adapted from @bmerry's minimal example at
# https://github.com/astropy/astropy/issues/11277#issuecomment-825492335
altaz_frame = AltAz(obstime=Time('2020-04-22T13:00:00Z'),
location=EarthLocation(18, -30))
target = SkyCoord(alt=70*u.deg, az=150*u.deg, frame=altaz_frame)
dirs_altaz_offset = SkyCoord(lon=[-0.02, 0.01, 0.0, 0.0, 0.0] * u.rad,
lat=[0.0, 0.2, 0.0, -0.3, 0.1] * u.rad,
frame=target.skyoffset_frame())
dirs_altaz = dirs_altaz_offset.transform_to(altaz_frame)
dirs_icrs = dirs_altaz.transform_to(ICRS())
target_icrs = target.transform_to(ICRS())
# The line below was almost guaranteed to fail.
dirs_icrs.transform_to(target_icrs.skyoffset_frame())
|
6eb27b210c55610a502625daefa78b7c0cea4d8cd44eb7a64f089bb48f48f27b | import pickle
import pytest
import numpy as np
import astropy.units as u
from astropy.coordinates import Longitude, representation
from astropy import coordinates as coord
from astropy.tests.helper import pickle_protocol, check_pickling_recovery # noqa
# Can't test distances without scipy due to cosmology deps
from astropy.utils.compat.optional_deps import HAS_SCIPY # noqa
def test_basic():
lon1 = Longitude(1.23, "radian", wrap_angle='180d')
s = pickle.dumps(lon1)
lon2 = pickle.loads(s)
def test_pickle_longitude_wrap_angle():
a = Longitude(1.23, "radian", wrap_angle='180d')
s = pickle.dumps(a)
b = pickle.loads(s)
assert a.rad == b.rad
assert a.wrap_angle == b.wrap_angle
_names = [coord.Angle,
coord.Distance,
coord.DynamicMatrixTransform,
coord.ICRS,
coord.Latitude,
coord.Longitude,
coord.StaticMatrixTransform,
]
_xfail = [False,
not HAS_SCIPY,
True,
True,
False,
True,
False]
_args = [[0.0],
[],
[lambda *args: np.identity(3), coord.ICRS, coord.ICRS],
[0, 0],
[0],
[0],
[np.identity(3), coord.ICRS, coord.ICRS],
]
_kwargs = [{'unit': 'radian'},
{'z': 0.23},
{},
{'unit': ['radian', 'radian']},
{'unit': 'radian'},
{'unit': 'radian'},
{},
]
@pytest.mark.parametrize(("name", "args", "kwargs", "xfail"),
tuple(zip(_names, _args, _kwargs, _xfail)))
def test_simple_object(pickle_protocol, name, args, kwargs, xfail):
# Tests easily instantiated objects
if xfail:
pytest.xfail()
original = name(*args, **kwargs)
check_pickling_recovery(original, pickle_protocol)
class _CustomICRS(coord.ICRS):
default_representation = coord.PhysicsSphericalRepresentation
@pytest.mark.parametrize(
"frame",
[
coord.SkyOffsetFrame(origin=coord.ICRS(0*u.deg, 0*u.deg)),
coord.SkyOffsetFrame(5*u.deg, 10*u.deg, origin=coord.Galactic(2*u.deg, -3*u.deg)),
coord.SkyOffsetFrame(5*u.deg, 10*u.deg, 10*u.pc,
origin=coord.Galactic(2*u.deg, -3*u.deg),
representation_type=coord.PhysicsSphericalRepresentation),
coord.SkyOffsetFrame(5*u.deg, 10*u.deg, 0*u.pc,
origin=_CustomICRS(2*u.deg, 3*u.deg, 1*u.pc)),
]
)
def test_skyoffset_pickle(pickle_protocol, frame):
"""
This is a regression test for issue #9249:
https://github.com/astropy/astropy/issues/9249
"""
check_pickling_recovery(frame, pickle_protocol)
|
379d5e27675f34546ccf59976779837920a44073e70d5b4a878deadb33022963 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from astropy import units as u
from astropy.coordinates import galactocentric_frame_defaults
from astropy.coordinates.distances import Distance
from astropy.coordinates.builtin_frames import (
ICRS, FK5, FK4, FK4NoETerms, Galactic, CIRS,
Supergalactic, Galactocentric, HCRS, GCRS, LSR, GalacticLSR)
from astropy.coordinates import SkyCoord
from astropy.tests.helper import assert_quantity_allclose as assert_allclose
from astropy.coordinates import EarthLocation, CartesianRepresentation, CartesianDifferential
from astropy.time import Time
from astropy.units import allclose
# used below in the next parametrized test
m31_sys = [ICRS, FK5, FK4, Galactic]
m31_coo = [(10.6847929, 41.2690650), (10.6847929, 41.2690650),
(10.0004738, 40.9952444), (121.1744050, -21.5729360)]
m31_dist = Distance(770, u.kpc)
convert_precision = 1 * u.arcsec
roundtrip_precision = 1e-4 * u.degree
dist_precision = 1e-9 * u.kpc
m31_params = []
for i in range(len(m31_sys)):
for j in range(len(m31_sys)):
if i < j:
m31_params.append((m31_sys[i], m31_sys[j], m31_coo[i], m31_coo[j]))
@pytest.mark.parametrize(('fromsys', 'tosys', 'fromcoo', 'tocoo'), m31_params)
def test_m31_coord_transforms(fromsys, tosys, fromcoo, tocoo):
"""
This tests a variety of coordinate conversions for the Chandra point-source
catalog location of M31 from NED.
"""
coo1 = fromsys(ra=fromcoo[0]*u.deg, dec=fromcoo[1]*u.deg, distance=m31_dist)
coo2 = coo1.transform_to(tosys())
if tosys is FK4:
coo2_prec = coo2.transform_to(FK4(equinox=Time('B1950')))
assert (coo2_prec.spherical.lon - tocoo[0]*u.deg) < convert_precision # <1 arcsec
assert (coo2_prec.spherical.lat - tocoo[1]*u.deg) < convert_precision
else:
assert (coo2.spherical.lon - tocoo[0]*u.deg) < convert_precision # <1 arcsec
assert (coo2.spherical.lat - tocoo[1]*u.deg) < convert_precision
assert coo1.distance.unit == u.kpc
assert coo2.distance.unit == u.kpc
assert m31_dist.unit == u.kpc
assert (coo2.distance - m31_dist) < dist_precision
# check round-tripping
coo1_2 = coo2.transform_to(fromsys())
assert (coo1_2.spherical.lon - fromcoo[0]*u.deg) < roundtrip_precision
assert (coo1_2.spherical.lat - fromcoo[1]*u.deg) < roundtrip_precision
assert (coo1_2.distance - m31_dist) < dist_precision
def test_precession():
"""
Ensures that FK4 and FK5 coordinates precess their equinoxes
"""
j2000 = Time('J2000')
b1950 = Time('B1950')
j1975 = Time('J1975')
b1975 = Time('B1975')
fk4 = FK4(ra=1*u.radian, dec=0.5*u.radian)
assert fk4.equinox.byear == b1950.byear
fk4_2 = fk4.transform_to(FK4(equinox=b1975))
assert fk4_2.equinox.byear == b1975.byear
fk5 = FK5(ra=1*u.radian, dec=0.5*u.radian)
assert fk5.equinox.jyear == j2000.jyear
fk5_2 = fk5.transform_to(FK4(equinox=j1975))
assert fk5_2.equinox.jyear == j1975.jyear
def test_fk5_galactic():
"""
Check that FK5 -> Galactic gives the same as FK5 -> FK4 -> Galactic.
"""
fk5 = FK5(ra=1*u.deg, dec=2*u.deg)
direct = fk5.transform_to(Galactic())
indirect = fk5.transform_to(FK4()).transform_to(Galactic())
assert direct.separation(indirect).degree < 1.e-10
direct = fk5.transform_to(Galactic())
indirect = fk5.transform_to(FK4NoETerms()).transform_to(Galactic())
assert direct.separation(indirect).degree < 1.e-10
def test_galactocentric():
# when z_sun=0, transformation should be very similar to Galactic
icrs_coord = ICRS(ra=np.linspace(0, 360, 10)*u.deg,
dec=np.linspace(-90, 90, 10)*u.deg,
distance=1.*u.kpc)
g_xyz = icrs_coord.transform_to(Galactic()).cartesian.xyz
with galactocentric_frame_defaults.set('pre-v4.0'):
gc_xyz = icrs_coord.transform_to(Galactocentric(z_sun=0*u.kpc)).cartesian.xyz
diff = np.abs(g_xyz - gc_xyz)
assert allclose(diff[0], 8.3*u.kpc, atol=1E-5*u.kpc)
assert allclose(diff[1:], 0*u.kpc, atol=1E-5*u.kpc)
# generate some test coordinates
g = Galactic(l=[0, 0, 45, 315]*u.deg, b=[-45, 45, 0, 0]*u.deg,
distance=[np.sqrt(2)]*4*u.kpc)
with galactocentric_frame_defaults.set('pre-v4.0'):
xyz = g.transform_to(Galactocentric(galcen_distance=1.*u.kpc, z_sun=0.*u.pc)).cartesian.xyz
true_xyz = np.array([[0, 0, -1.], [0, 0, 1], [0, 1, 0], [0, -1, 0]]).T*u.kpc
assert allclose(xyz.to(u.kpc), true_xyz.to(u.kpc), atol=1E-5*u.kpc)
# check that ND arrays work
# from Galactocentric to Galactic
x = np.linspace(-10., 10., 100) * u.kpc
y = np.linspace(-10., 10., 100) * u.kpc
z = np.zeros_like(x)
# from Galactic to Galactocentric
l = np.linspace(15, 30., 100) * u.deg
b = np.linspace(-10., 10., 100) * u.deg
d = np.ones_like(l.value) * u.kpc
with galactocentric_frame_defaults.set('latest'):
g1 = Galactocentric(x=x, y=y, z=z)
g2 = Galactocentric(x=x.reshape(100, 1, 1), y=y.reshape(100, 1, 1),
z=z.reshape(100, 1, 1))
g1t = g1.transform_to(Galactic())
g2t = g2.transform_to(Galactic())
assert_allclose(g1t.cartesian.xyz, g2t.cartesian.xyz[:, :, 0, 0])
g1 = Galactic(l=l, b=b, distance=d)
g2 = Galactic(l=l.reshape(100, 1, 1), b=b.reshape(100, 1, 1),
distance=d.reshape(100, 1, 1))
g1t = g1.transform_to(Galactocentric())
g2t = g2.transform_to(Galactocentric())
np.testing.assert_almost_equal(g1t.cartesian.xyz.value,
g2t.cartesian.xyz.value[:, :, 0, 0])
def test_supergalactic():
"""
Check Galactic<->Supergalactic and Galactic<->ICRS conversion.
"""
# Check supergalactic North pole.
npole = Galactic(l=47.37*u.degree, b=+6.32*u.degree)
assert allclose(npole.transform_to(Supergalactic()).sgb.deg, +90, atol=1e-9)
# Check the origin of supergalactic longitude.
lon0 = Supergalactic(sgl=0*u.degree, sgb=0*u.degree)
lon0_gal = lon0.transform_to(Galactic())
assert allclose(lon0_gal.l.deg, 137.37, atol=1e-9)
assert allclose(lon0_gal.b.deg, 0, atol=1e-9)
# Test Galactic<->ICRS with some positions that appear in Foley et al. 2008
# (https://ui.adsabs.harvard.edu/abs/2008A%26A...484..143F)
# GRB 021219
supergalactic = Supergalactic(sgl=29.91*u.degree, sgb=+73.72*u.degree)
icrs = SkyCoord('18h50m27s +31d57m17s')
assert supergalactic.separation(icrs) < 0.005 * u.degree
# GRB 030320
supergalactic = Supergalactic(sgl=-174.44*u.degree, sgb=+46.17*u.degree)
icrs = SkyCoord('17h51m36s -25d18m52s')
assert supergalactic.separation(icrs) < 0.005 * u.degree
class TestHCRS():
"""
Check HCRS<->ICRS coordinate conversions.
Uses ICRS Solar positions predicted by get_body_barycentric; with `t1` and
`tarr` as defined below, the ICRS Solar positions were predicted using, e.g.
coord.ICRS(coord.get_body_barycentric(tarr, 'sun')).
"""
def setup(self):
self.t1 = Time("2013-02-02T23:00")
self.t2 = Time("2013-08-02T23:00")
self.tarr = Time(["2013-02-02T23:00", "2013-08-02T23:00"])
self.sun_icrs_scalar = ICRS(ra=244.52984668*u.deg,
dec=-22.36943723*u.deg,
distance=406615.66347377*u.km)
# array of positions corresponds to times in `tarr`
self.sun_icrs_arr = ICRS(ra=[244.52989062, 271.40976248]*u.deg,
dec=[-22.36943605, -25.07431079]*u.deg,
distance=[406615.66347377, 375484.13558956]*u.km)
# corresponding HCRS positions
self.sun_hcrs_t1 = HCRS(CartesianRepresentation([0.0, 0.0, 0.0] * u.km),
obstime=self.t1)
twod_rep = CartesianRepresentation([[0.0, 0.0], [0.0, 0.0], [0.0, 0.0]] * u.km)
self.sun_hcrs_tarr = HCRS(twod_rep, obstime=self.tarr)
self.tolerance = 5*u.km
def test_from_hcrs(self):
# test scalar transform
transformed = self.sun_hcrs_t1.transform_to(ICRS())
separation = transformed.separation_3d(self.sun_icrs_scalar)
assert_allclose(separation, 0*u.km, atol=self.tolerance)
# test non-scalar positions and times
transformed = self.sun_hcrs_tarr.transform_to(ICRS())
separation = transformed.separation_3d(self.sun_icrs_arr)
assert_allclose(separation, 0*u.km, atol=self.tolerance)
def test_from_icrs(self):
# scalar positions
transformed = self.sun_icrs_scalar.transform_to(HCRS(obstime=self.t1))
separation = transformed.separation_3d(self.sun_hcrs_t1)
assert_allclose(separation, 0*u.km, atol=self.tolerance)
# nonscalar positions
transformed = self.sun_icrs_arr.transform_to(HCRS(obstime=self.tarr))
separation = transformed.separation_3d(self.sun_hcrs_tarr)
assert_allclose(separation, 0*u.km, atol=self.tolerance)
class TestHelioBaryCentric():
"""
Check GCRS<->Heliocentric and Barycentric coordinate conversions.
Uses the WHT observing site (information grabbed from data/sites.json).
"""
def setup(self):
wht = EarthLocation(342.12*u.deg, 28.758333333333333*u.deg, 2327*u.m)
self.obstime = Time("2013-02-02T23:00")
self.wht_itrs = wht.get_itrs(obstime=self.obstime)
def test_heliocentric(self):
gcrs = self.wht_itrs.transform_to(GCRS(obstime=self.obstime))
helio = gcrs.transform_to(HCRS(obstime=self.obstime))
# Check it doesn't change from previous times.
previous = [-1.02597256e+11, 9.71725820e+10, 4.21268419e+10] * u.m
assert_allclose(helio.cartesian.xyz, previous)
# And that it agrees with SLALIB to within 14km
helio_slalib = [-0.685820296, 0.6495585893, 0.2816005464] * u.au
assert np.sqrt(((helio.cartesian.xyz -
helio_slalib)**2).sum()) < 14. * u.km
def test_barycentric(self):
gcrs = self.wht_itrs.transform_to(GCRS(obstime=self.obstime))
bary = gcrs.transform_to(ICRS())
previous = [-1.02758958e+11, 9.68331109e+10, 4.19720938e+10] * u.m
assert_allclose(bary.cartesian.xyz, previous)
# And that it agrees with SLALIB answer to within 14km
bary_slalib = [-0.6869012079, 0.6472893646, 0.2805661191] * u.au
assert np.sqrt(((bary.cartesian.xyz -
bary_slalib)**2).sum()) < 14. * u.km
def test_lsr_sanity():
# random numbers, but zero velocity in ICRS frame
icrs = ICRS(ra=15.1241*u.deg, dec=17.5143*u.deg, distance=150.12*u.pc,
pm_ra_cosdec=0*u.mas/u.yr, pm_dec=0*u.mas/u.yr,
radial_velocity=0*u.km/u.s)
lsr = icrs.transform_to(LSR())
lsr_diff = lsr.data.differentials['s']
cart_lsr_vel = lsr_diff.represent_as(CartesianRepresentation, base=lsr.data)
lsr_vel = ICRS(cart_lsr_vel)
gal_lsr = lsr_vel.transform_to(Galactic()).cartesian.xyz
assert allclose(gal_lsr.to(u.km/u.s, u.dimensionless_angles()),
lsr.v_bary.d_xyz)
# moving with LSR velocity
lsr = LSR(ra=15.1241*u.deg, dec=17.5143*u.deg, distance=150.12*u.pc,
pm_ra_cosdec=0*u.mas/u.yr, pm_dec=0*u.mas/u.yr,
radial_velocity=0*u.km/u.s)
icrs = lsr.transform_to(ICRS())
icrs_diff = icrs.data.differentials['s']
cart_vel = icrs_diff.represent_as(CartesianRepresentation, base=icrs.data)
vel = ICRS(cart_vel)
gal_icrs = vel.transform_to(Galactic()).cartesian.xyz
assert allclose(gal_icrs.to(u.km/u.s, u.dimensionless_angles()),
-lsr.v_bary.d_xyz)
def test_hcrs_icrs_differentials():
# Regression to ensure that we can transform velocities from HCRS to LSR.
# Numbers taken from the original issue, gh-6835.
hcrs = HCRS(ra=8.67*u.deg, dec=53.09*u.deg, distance=117*u.pc,
pm_ra_cosdec=4.8*u.mas/u.yr, pm_dec=-15.16*u.mas/u.yr,
radial_velocity=23.42*u.km/u.s)
icrs = hcrs.transform_to(ICRS())
# The position and velocity should not change much
assert allclose(hcrs.cartesian.xyz, icrs.cartesian.xyz, rtol=1e-8)
assert allclose(hcrs.velocity.d_xyz, icrs.velocity.d_xyz, rtol=1e-2)
hcrs2 = icrs.transform_to(HCRS())
# The values should round trip
assert allclose(hcrs.cartesian.xyz, hcrs2.cartesian.xyz, rtol=1e-12)
assert allclose(hcrs.velocity.d_xyz, hcrs2.velocity.d_xyz, rtol=1e-12)
def test_cirs_icrs():
"""
Test CIRS<->ICRS transformations, including self transform
"""
t = Time("J2010")
MOONDIST = 385000*u.km # approximate moon semi-major orbit axis of moon
MOONDIST_CART = CartesianRepresentation(3**-0.5*MOONDIST, 3**-0.5*MOONDIST, 3**-0.5*MOONDIST)
loc = EarthLocation(lat=0*u.deg, lon=0*u.deg)
cirs_geo_frame = CIRS(obstime=t)
cirs_topo_frame = CIRS(obstime=t, location=loc)
moon_geo = cirs_geo_frame.realize_frame(MOONDIST_CART)
moon_topo = moon_geo.transform_to(cirs_topo_frame)
# now check that the distance change is similar to earth radius
assert 1000*u.km < np.abs(moon_topo.distance - moon_geo.distance).to(u.au) < 7000*u.km
# now check that it round-trips
moon2 = moon_topo.transform_to(moon_geo)
assert_allclose(moon_geo.cartesian.xyz, moon2.cartesian.xyz)
# now check ICRS transform gives a decent distance from Barycentre
moon_icrs = moon_geo.transform_to(ICRS())
assert_allclose(moon_icrs.distance - 1*u.au, 0.0*u.R_sun, atol=3*u.R_sun)
@pytest.mark.parametrize('frame', [LSR, GalacticLSR])
def test_lsr_loopback(frame):
xyz = CartesianRepresentation(1, 2, 3)*u.AU
xyz = xyz.with_differentials(CartesianDifferential(4, 5, 6)*u.km/u.s)
v_bary = CartesianDifferential(5, 10, 15)*u.km/u.s
# Test that the loopback properly handles a change in v_bary
from_coo = frame(xyz) # default v_bary
to_frame = frame(v_bary=v_bary)
explicit_coo = from_coo.transform_to(ICRS()).transform_to(to_frame)
implicit_coo = from_coo.transform_to(to_frame)
# Confirm that the explicit transformation changes the velocity but not the position
assert allclose(explicit_coo.cartesian.xyz, from_coo.cartesian.xyz, rtol=1e-10)
assert not allclose(explicit_coo.velocity.d_xyz, from_coo.velocity.d_xyz, rtol=1e-10)
# Confirm that the loopback matches the explicit transformation
assert allclose(explicit_coo.cartesian.xyz, implicit_coo.cartesian.xyz, rtol=1e-10)
assert allclose(explicit_coo.velocity.d_xyz, implicit_coo.velocity.d_xyz, rtol=1e-10)
@pytest.mark.parametrize('to_frame',
[Galactocentric(galcen_coord=ICRS(300*u.deg, -30*u.deg)),
Galactocentric(galcen_distance=10*u.kpc),
Galactocentric(z_sun=10*u.pc),
Galactocentric(roll=1*u.deg)])
def test_galactocentric_loopback(to_frame):
xyz = CartesianRepresentation(1, 2, 3)*u.pc
from_coo = Galactocentric(xyz)
explicit_coo = from_coo.transform_to(ICRS()).transform_to(to_frame)
implicit_coo = from_coo.transform_to(to_frame)
# Confirm that the explicit transformation changes the position
assert not allclose(explicit_coo.cartesian.xyz, from_coo.cartesian.xyz, rtol=1e-10)
# Confirm that the loopback matches the explicit transformation
assert allclose(explicit_coo.cartesian.xyz, implicit_coo.cartesian.xyz, rtol=1e-10)
|
9021275860338ba3dfdd8849c5b44b9d90a6ad84488ef6263dccb5b0a5debc88 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test initialization and other aspects of Angle and subclasses"""
import threading
import warnings
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_array_equal
import astropy.units as u
from astropy.coordinates.angles import Longitude, Latitude, Angle
from astropy.coordinates.errors import (
IllegalSecondError, IllegalMinuteError, IllegalHourError,
IllegalSecondWarning, IllegalMinuteWarning)
from astropy.utils.exceptions import AstropyDeprecationWarning
def test_create_angles():
"""
Tests creating and accessing Angle objects
"""
''' The "angle" is a fundamental object. The internal
representation is stored in radians, but this is transparent to the user.
Units *must* be specified rather than a default value be assumed. This is
as much for self-documenting code as anything else.
Angle objects simply represent a single angular coordinate. More specific
angular coordinates (e.g. Longitude, Latitude) are subclasses of Angle.'''
a1 = Angle(54.12412, unit=u.degree)
a2 = Angle("54.12412", unit=u.degree)
a3 = Angle("54:07:26.832", unit=u.degree)
a4 = Angle("54.12412 deg")
a5 = Angle("54.12412 degrees")
a6 = Angle("54.12412°") # because we like Unicode
a8 = Angle("54°07'26.832\"")
a9 = Angle([54, 7, 26.832], unit=u.degree)
assert_allclose(a9.value, [54, 7, 26.832])
assert a9.unit is u.degree
a10 = Angle(3.60827466667, unit=u.hour)
a11 = Angle("3:36:29.7888000120", unit=u.hour)
with pytest.warns(AstropyDeprecationWarning, match='hms_to_hour'):
a12 = Angle((3, 36, 29.7888000120), unit=u.hour) # *must* be a tuple
with pytest.warns(AstropyDeprecationWarning, match='hms_to_hour'):
# Regression test for #5001
a13 = Angle((3, 36, 29.7888000120), unit='hour')
Angle(0.944644098745, unit=u.radian)
with pytest.raises(u.UnitsError):
Angle(54.12412)
# raises an exception because this is ambiguous
with pytest.raises(u.UnitsError):
Angle(54.12412, unit=u.m)
with pytest.raises(ValueError):
Angle(12.34, unit="not a unit")
a14 = Angle("03h36m29.7888000120") # no trailing 's', but unambiguous
a15 = Angle("5h4m3s") # single digits, no decimal
assert a15.unit == u.hourangle
a16 = Angle("1 d")
a17 = Angle("1 degree")
assert a16.degree == 1
assert a17.degree == 1
a18 = Angle("54 07.4472", unit=u.degree)
a19 = Angle("54:07.4472", unit=u.degree)
a20 = Angle("54d07.4472m", unit=u.degree)
a21 = Angle("3h36m", unit=u.hour)
a22 = Angle("3.6h", unit=u.hour)
a23 = Angle("- 3h", unit=u.hour)
a24 = Angle("+ 3h", unit=u.hour)
# ensure the above angles that should match do
assert a1 == a2 == a3 == a4 == a5 == a6 == a8 == a18 == a19 == a20
assert_allclose(a1.radian, a2.radian)
assert_allclose(a2.degree, a3.degree)
assert_allclose(a3.radian, a4.radian)
assert_allclose(a4.radian, a5.radian)
assert_allclose(a5.radian, a6.radian)
assert_allclose(a10.degree, a11.degree)
assert a11 == a12 == a13 == a14
assert a21 == a22
assert a23 == -a24
# check for illegal ranges / values
with pytest.raises(IllegalSecondError):
a = Angle("12 32 99", unit=u.degree)
with pytest.raises(IllegalMinuteError):
a = Angle("12 99 23", unit=u.degree)
with pytest.raises(IllegalSecondError):
a = Angle("12 32 99", unit=u.hour)
with pytest.raises(IllegalMinuteError):
a = Angle("12 99 23", unit=u.hour)
with pytest.raises(IllegalHourError):
a = Angle("99 25 51.0", unit=u.hour)
with pytest.raises(ValueError):
a = Angle("12 25 51.0xxx", unit=u.hour)
with pytest.raises(ValueError):
a = Angle("12h34321m32.2s")
assert a1 is not None
def test_angle_from_view():
q = np.arange(3.) * u.deg
a = q.view(Angle)
assert type(a) is Angle
assert a.unit is q.unit
assert np.all(a == q)
q2 = np.arange(4) * u.m
with pytest.raises(u.UnitTypeError):
q2.view(Angle)
def test_angle_ops():
"""
Tests operations on Angle objects
"""
# Angles can be added and subtracted. Multiplication and division by a
# scalar is also permitted. A negative operator is also valid. All of
# these operate in a single dimension. Attempting to multiply or divide two
# Angle objects will return a quantity. An exception will be raised if it
# is attempted to store output with a non-angular unit in an Angle [#2718].
a1 = Angle(3.60827466667, unit=u.hour)
a2 = Angle("54:07:26.832", unit=u.degree)
a1 + a2 # creates new Angle object
a1 - a2
-a1
assert_allclose((a1 * 2).hour, 2 * 3.6082746666700003)
assert abs((a1 / 3.123456).hour - 3.60827466667 / 3.123456) < 1e-10
# commutativity
assert (2 * a1).hour == (a1 * 2).hour
a3 = Angle(a1) # makes a *copy* of the object, but identical content as a1
assert_allclose(a1.radian, a3.radian)
assert a1 is not a3
a4 = abs(-a1)
assert a4.radian == a1.radian
a5 = Angle(5.0, unit=u.hour)
assert a5 > a1
assert a5 >= a1
assert a1 < a5
assert a1 <= a5
# check operations with non-angular result give Quantity.
a6 = Angle(45., u.degree)
a7 = a6 * a5
assert type(a7) is u.Quantity
# but those with angular result yield Angle.
# (a9 is regression test for #5327)
a8 = a1 + 1.*u.deg
assert type(a8) is Angle
a9 = 1.*u.deg + a1
assert type(a9) is Angle
with pytest.raises(TypeError):
a6 *= a5
with pytest.raises(TypeError):
a6 *= u.m
with pytest.raises(TypeError):
np.sin(a6, out=a6)
def test_angle_methods():
# Most methods tested as part of the Quantity tests.
# A few tests here which caused problems before: #8368
a = Angle([0., 2.], 'deg')
a_mean = a.mean()
assert type(a_mean) is Angle
assert a_mean == 1. * u.degree
a_std = a.std()
assert type(a_std) is Angle
assert a_std == 1. * u.degree
a_var = a.var()
assert type(a_var) is u.Quantity
assert a_var == 1. * u.degree ** 2
a_ptp = a.ptp()
assert type(a_ptp) is Angle
assert a_ptp == 2. * u.degree
a_max = a.max()
assert type(a_max) is Angle
assert a_max == 2. * u.degree
a_min = a.min()
assert type(a_min) is Angle
assert a_min == 0. * u.degree
def test_angle_convert():
"""
Test unit conversion of Angle objects
"""
angle = Angle("54.12412", unit=u.degree)
assert_allclose(angle.hour, 3.60827466667)
assert_allclose(angle.radian, 0.944644098745)
assert_allclose(angle.degree, 54.12412)
assert len(angle.hms) == 3
assert isinstance(angle.hms, tuple)
assert angle.hms[0] == 3
assert angle.hms[1] == 36
assert_allclose(angle.hms[2], 29.78879999999947)
# also check that the namedtuple attribute-style access works:
assert angle.hms.h == 3
assert angle.hms.m == 36
assert_allclose(angle.hms.s, 29.78879999999947)
assert len(angle.dms) == 3
assert isinstance(angle.dms, tuple)
assert angle.dms[0] == 54
assert angle.dms[1] == 7
assert_allclose(angle.dms[2], 26.831999999992036)
# also check that the namedtuple attribute-style access works:
assert angle.dms.d == 54
assert angle.dms.m == 7
assert_allclose(angle.dms.s, 26.831999999992036)
assert isinstance(angle.dms[0], float)
assert isinstance(angle.hms[0], float)
# now make sure dms and signed_dms work right for negative angles
negangle = Angle("-54.12412", unit=u.degree)
assert negangle.dms.d == -54
assert negangle.dms.m == -7
assert_allclose(negangle.dms.s, -26.831999999992036)
assert negangle.signed_dms.sign == -1
assert negangle.signed_dms.d == 54
assert negangle.signed_dms.m == 7
assert_allclose(negangle.signed_dms.s, 26.831999999992036)
def test_angle_formatting():
"""
Tests string formatting for Angle objects
"""
'''
The string method of Angle has this signature:
def string(self, unit=DEGREE, decimal=False, sep=" ", precision=5,
pad=False):
The "decimal" parameter defaults to False since if you need to print the
Angle as a decimal, there's no need to use the "format" method (see
above).
'''
angle = Angle("54.12412", unit=u.degree)
# __str__ is the default `format`
assert str(angle) == angle.to_string()
res = 'Angle as HMS: 3h36m29.7888s'
assert f"Angle as HMS: {angle.to_string(unit=u.hour)}" == res
res = 'Angle as HMS: 3:36:29.7888'
assert f"Angle as HMS: {angle.to_string(unit=u.hour, sep=':')}" == res
res = 'Angle as HMS: 3:36:29.79'
assert f"Angle as HMS: {angle.to_string(unit=u.hour, sep=':', precision=2)}" == res
# Note that you can provide one, two, or three separators passed as a
# tuple or list
res = 'Angle as HMS: 3h36m29.7888s'
assert "Angle as HMS: {}".format(angle.to_string(unit=u.hour,
sep=("h", "m", "s"),
precision=4)) == res
res = 'Angle as HMS: 3-36|29.7888'
assert "Angle as HMS: {}".format(angle.to_string(unit=u.hour, sep=["-", "|"],
precision=4)) == res
res = 'Angle as HMS: 3-36-29.7888'
assert f"Angle as HMS: {angle.to_string(unit=u.hour, sep='-', precision=4)}" == res
res = 'Angle as HMS: 03h36m29.7888s'
assert f"Angle as HMS: {angle.to_string(unit=u.hour, precision=4, pad=True)}" == res
# Same as above, in degrees
angle = Angle("3 36 29.78880", unit=u.degree)
res = 'Angle as DMS: 3d36m29.7888s'
assert f"Angle as DMS: {angle.to_string(unit=u.degree)}" == res
res = 'Angle as DMS: 3:36:29.7888'
assert f"Angle as DMS: {angle.to_string(unit=u.degree, sep=':')}" == res
res = 'Angle as DMS: 3:36:29.79'
assert "Angle as DMS: {}".format(angle.to_string(unit=u.degree, sep=":",
precision=2)) == res
# Note that you can provide one, two, or three separators passed as a
# tuple or list
res = 'Angle as DMS: 3d36m29.7888s'
assert "Angle as DMS: {}".format(angle.to_string(unit=u.degree,
sep=("d", "m", "s"),
precision=4)) == res
res = 'Angle as DMS: 3-36|29.7888'
assert "Angle as DMS: {}".format(angle.to_string(unit=u.degree, sep=["-", "|"],
precision=4)) == res
res = 'Angle as DMS: 3-36-29.7888'
assert "Angle as DMS: {}".format(angle.to_string(unit=u.degree, sep="-",
precision=4)) == res
res = 'Angle as DMS: 03d36m29.7888s'
assert "Angle as DMS: {}".format(angle.to_string(unit=u.degree, precision=4,
pad=True)) == res
res = 'Angle as rad: 0.0629763rad'
assert f"Angle as rad: {angle.to_string(unit=u.radian)}" == res
res = 'Angle as rad decimal: 0.0629763'
assert f"Angle as rad decimal: {angle.to_string(unit=u.radian, decimal=True)}" == res
# check negative angles
angle = Angle(-1.23456789, unit=u.degree)
angle2 = Angle(-1.23456789, unit=u.hour)
assert angle.to_string() == '-1d14m04.444404s'
assert angle.to_string(pad=True) == '-01d14m04.444404s'
assert angle.to_string(unit=u.hour) == '-0h04m56.2962936s'
assert angle2.to_string(unit=u.hour, pad=True) == '-01h14m04.444404s'
assert angle.to_string(unit=u.radian, decimal=True) == '-0.0215473'
def test_to_string_vector():
# Regression test for the fact that vectorize doesn't work with Numpy 1.6
assert Angle([1./7., 1./7.], unit='deg').to_string()[0] == "0d08m34.28571429s"
assert Angle([1./7.], unit='deg').to_string()[0] == "0d08m34.28571429s"
assert Angle(1./7., unit='deg').to_string() == "0d08m34.28571429s"
def test_angle_format_roundtripping():
"""
Ensures that the string representation of an angle can be used to create a
new valid Angle.
"""
a1 = Angle(0, unit=u.radian)
a2 = Angle(10, unit=u.degree)
a3 = Angle(0.543, unit=u.degree)
a4 = Angle('1d2m3.4s')
assert Angle(str(a1)).degree == a1.degree
assert Angle(str(a2)).degree == a2.degree
assert Angle(str(a3)).degree == a3.degree
assert Angle(str(a4)).degree == a4.degree
# also check Longitude/Latitude
ra = Longitude('1h2m3.4s')
dec = Latitude('1d2m3.4s')
assert_allclose(Angle(str(ra)).degree, ra.degree)
assert_allclose(Angle(str(dec)).degree, dec.degree)
def test_radec():
"""
Tests creation/operations of Longitude and Latitude objects
"""
'''
Longitude and Latitude are objects that are subclassed from Angle. As with Angle, Longitude
and Latitude can parse any unambiguous format (tuples, formatted strings, etc.).
The intention is not to create an Angle subclass for every possible
coordinate object (e.g. galactic l, galactic b). However, equatorial Longitude/Latitude
are so prevalent in astronomy that it's worth creating ones for these
units. They will be noted as "special" in the docs and use of the just the
Angle class is to be used for other coordinate systems.
'''
with pytest.raises(u.UnitsError):
ra = Longitude("4:08:15.162342") # error - hours or degrees?
with pytest.raises(u.UnitsError):
ra = Longitude("-4:08:15.162342")
# the "smart" initializer allows >24 to automatically do degrees, but the
# Angle-based one does not
# TODO: adjust in 0.3 for whatever behavior is decided on
# ra = Longitude("26:34:15.345634") # unambiguous b/c hours don't go past 24
# assert_allclose(ra.degree, 26.570929342)
with pytest.raises(u.UnitsError):
ra = Longitude("26:34:15.345634")
# ra = Longitude(68)
with pytest.raises(u.UnitsError):
ra = Longitude(68)
with pytest.raises(u.UnitsError):
ra = Longitude(12)
with pytest.raises(ValueError):
ra = Longitude("garbage containing a d and no units")
ra = Longitude("12h43m23s")
assert_allclose(ra.hour, 12.7230555556)
# TODO: again, fix based on >24 behavior
# ra = Longitude((56,14,52.52))
with pytest.raises(u.UnitsError):
ra = Longitude((56, 14, 52.52))
with pytest.raises(u.UnitsError):
ra = Longitude((12, 14, 52)) # ambiguous w/o units
with pytest.warns(AstropyDeprecationWarning, match='hms_to_hours'):
ra = Longitude((12, 14, 52), unit=u.hour)
# Units can be specified
ra = Longitude("4:08:15.162342", unit=u.hour)
# TODO: this was the "smart" initializer behavior - adjust in 0.3 appropriately
# Where Longitude values are commonly found in hours or degrees, declination is
# nearly always specified in degrees, so this is the default.
# dec = Latitude("-41:08:15.162342")
with pytest.raises(u.UnitsError):
dec = Latitude("-41:08:15.162342")
dec = Latitude("-41:08:15.162342", unit=u.degree) # same as above
def test_negative_zero_dms():
# Test for DMS parser
a = Angle('-00:00:10', u.deg)
assert_allclose(a.degree, -10. / 3600.)
# Unicode minus
a = Angle('−00:00:10', u.deg)
assert_allclose(a.degree, -10. / 3600.)
def test_negative_zero_dm():
# Test for DM parser
a = Angle('-00:10', u.deg)
assert_allclose(a.degree, -10. / 60.)
def test_negative_zero_hms():
# Test for HMS parser
a = Angle('-00:00:10', u.hour)
assert_allclose(a.hour, -10. / 3600.)
def test_negative_zero_hm():
# Test for HM parser
a = Angle('-00:10', u.hour)
assert_allclose(a.hour, -10. / 60.)
def test_negative_sixty_hm():
# Test for HM parser
with pytest.warns(IllegalMinuteWarning):
a = Angle('-00:60', u.hour)
assert_allclose(a.hour, -1.)
def test_plus_sixty_hm():
# Test for HM parser
with pytest.warns(IllegalMinuteWarning):
a = Angle('00:60', u.hour)
assert_allclose(a.hour, 1.)
def test_negative_fifty_nine_sixty_dms():
# Test for DMS parser
with pytest.warns(IllegalSecondWarning):
a = Angle('-00:59:60', u.deg)
assert_allclose(a.degree, -1.)
def test_plus_fifty_nine_sixty_dms():
# Test for DMS parser
with pytest.warns(IllegalSecondWarning):
a = Angle('+00:59:60', u.deg)
assert_allclose(a.degree, 1.)
def test_negative_sixty_dms():
# Test for DMS parser
with pytest.warns(IllegalSecondWarning):
a = Angle('-00:00:60', u.deg)
assert_allclose(a.degree, -1. / 60.)
def test_plus_sixty_dms():
# Test for DMS parser
with pytest.warns(IllegalSecondWarning):
a = Angle('+00:00:60', u.deg)
assert_allclose(a.degree, 1. / 60.)
def test_angle_to_is_angle():
with pytest.warns(IllegalSecondWarning):
a = Angle('00:00:60', u.deg)
assert isinstance(a, Angle)
assert isinstance(a.to(u.rad), Angle)
def test_angle_to_quantity():
with pytest.warns(IllegalSecondWarning):
a = Angle('00:00:60', u.deg)
q = u.Quantity(a)
assert isinstance(q, u.Quantity)
assert q.unit is u.deg
def test_quantity_to_angle():
a = Angle(1.0*u.deg)
assert isinstance(a, Angle)
with pytest.raises(u.UnitsError):
Angle(1.0*u.meter)
a = Angle(1.0*u.hour)
assert isinstance(a, Angle)
assert a.unit is u.hourangle
with pytest.raises(u.UnitsError):
Angle(1.0*u.min)
def test_angle_string():
with pytest.warns(IllegalSecondWarning):
a = Angle('00:00:60', u.deg)
assert str(a) == '0d01m00s'
a = Angle('00:00:59S', u.deg)
assert str(a) == '-0d00m59s'
a = Angle('00:00:59N', u.deg)
assert str(a) == '0d00m59s'
a = Angle('00:00:59E', u.deg)
assert str(a) == '0d00m59s'
a = Angle('00:00:59W', u.deg)
assert str(a) == '-0d00m59s'
a = Angle('-00:00:10', u.hour)
assert str(a) == '-0h00m10s'
a = Angle('00:00:59E', u.hour)
assert str(a) == '0h00m59s'
a = Angle('00:00:59W', u.hour)
assert str(a) == '-0h00m59s'
a = Angle(3.2, u.radian)
assert str(a) == '3.2rad'
a = Angle(4.2, u.microarcsecond)
assert str(a) == '4.2uarcsec'
a = Angle('1.0uarcsec')
assert a.value == 1.0
assert a.unit == u.microarcsecond
a = Angle('1.0uarcsecN')
assert a.value == 1.0
assert a.unit == u.microarcsecond
a = Angle('1.0uarcsecS')
assert a.value == -1.0
assert a.unit == u.microarcsecond
a = Angle('1.0uarcsecE')
assert a.value == 1.0
assert a.unit == u.microarcsecond
a = Angle('1.0uarcsecW')
assert a.value == -1.0
assert a.unit == u.microarcsecond
a = Angle("3d")
assert_allclose(a.value, 3.0)
assert a.unit == u.degree
a = Angle("3dN")
assert str(a) == "3d00m00s"
assert a.unit == u.degree
a = Angle("3dS")
assert str(a) == "-3d00m00s"
assert a.unit == u.degree
a = Angle("3dE")
assert str(a) == "3d00m00s"
assert a.unit == u.degree
a = Angle("3dW")
assert str(a) == "-3d00m00s"
assert a.unit == u.degree
a = Angle('10"')
assert_allclose(a.value, 10.0)
assert a.unit == u.arcsecond
a = Angle("10'N")
assert_allclose(a.value, 10.0)
assert a.unit == u.arcminute
a = Angle("10'S")
assert_allclose(a.value, -10.0)
assert a.unit == u.arcminute
a = Angle("10'E")
assert_allclose(a.value, 10.0)
assert a.unit == u.arcminute
a = Angle("10'W")
assert_allclose(a.value, -10.0)
assert a.unit == u.arcminute
a = Angle('45°55′12″N')
assert str(a) == '45d55m12s'
assert_allclose(a.value, 45.92)
assert a.unit == u.deg
a = Angle('45°55′12″S')
assert str(a) == '-45d55m12s'
assert_allclose(a.value, -45.92)
assert a.unit == u.deg
a = Angle('45°55′12″E')
assert str(a) == '45d55m12s'
assert_allclose(a.value, 45.92)
assert a.unit == u.deg
a = Angle('45°55′12″W')
assert str(a) == '-45d55m12s'
assert_allclose(a.value, -45.92)
assert a.unit == u.deg
with pytest.raises(ValueError):
Angle('00h00m10sN')
with pytest.raises(ValueError):
Angle('45°55′12″NS')
def test_angle_repr():
assert 'Angle' in repr(Angle(0, u.deg))
assert 'Longitude' in repr(Longitude(0, u.deg))
assert 'Latitude' in repr(Latitude(0, u.deg))
a = Angle(0, u.deg)
repr(a)
def test_large_angle_representation():
"""Test that angles above 360 degrees can be output as strings,
in repr, str, and to_string. (regression test for #1413)"""
a = Angle(350, u.deg) + Angle(350, u.deg)
a.to_string()
a.to_string(u.hourangle)
repr(a)
repr(a.to(u.hourangle))
str(a)
str(a.to(u.hourangle))
def test_wrap_at_inplace():
a = Angle([-20, 150, 350, 360] * u.deg)
out = a.wrap_at('180d', inplace=True)
assert out is None
assert np.all(a.degree == np.array([-20., 150., -10., 0.]))
def test_latitude():
with pytest.raises(ValueError):
lat = Latitude(['91d', '89d'])
with pytest.raises(ValueError):
lat = Latitude('-91d')
lat = Latitude(['90d', '89d'])
# check that one can get items
assert lat[0] == 90 * u.deg
assert lat[1] == 89 * u.deg
# and that comparison with angles works
assert np.all(lat == Angle(['90d', '89d']))
# check setitem works
lat[1] = 45. * u.deg
assert np.all(lat == Angle(['90d', '45d']))
# but not with values out of range
with pytest.raises(ValueError):
lat[0] = 90.001 * u.deg
with pytest.raises(ValueError):
lat[0] = -90.001 * u.deg
# these should also not destroy input (#1851)
assert np.all(lat == Angle(['90d', '45d']))
# conserve type on unit change (closes #1423)
angle = lat.to('radian')
assert type(angle) is Latitude
# but not on calculations
angle = lat - 190 * u.deg
assert type(angle) is Angle
assert angle[0] == -100 * u.deg
lat = Latitude('80d')
angle = lat / 2.
assert type(angle) is Angle
assert angle == 40 * u.deg
angle = lat * 2.
assert type(angle) is Angle
assert angle == 160 * u.deg
angle = -lat
assert type(angle) is Angle
assert angle == -80 * u.deg
# Test errors when trying to interoperate with longitudes.
with pytest.raises(TypeError) as excinfo:
lon = Longitude(10, 'deg')
lat = Latitude(lon)
assert "A Latitude angle cannot be created from a Longitude angle" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
lon = Longitude(10, 'deg')
lat = Latitude([20], 'deg')
lat[0] = lon
assert "A Longitude angle cannot be assigned to a Latitude angle" in str(excinfo.value)
# Check we can work around the Lat vs Long checks by casting explicitly to Angle.
lon = Longitude(10, 'deg')
lat = Latitude(Angle(lon))
assert lat.value == 10.0
# Check setitem.
lon = Longitude(10, 'deg')
lat = Latitude([20], 'deg')
lat[0] = Angle(lon)
assert lat.value[0] == 10.0
def test_longitude():
# Default wrapping at 360d with an array input
lon = Longitude(['370d', '88d'])
assert np.all(lon == Longitude(['10d', '88d']))
assert np.all(lon == Angle(['10d', '88d']))
# conserve type on unit change and keep wrap_angle (closes #1423)
angle = lon.to('hourangle')
assert type(angle) is Longitude
assert angle.wrap_angle == lon.wrap_angle
angle = lon[0]
assert type(angle) is Longitude
assert angle.wrap_angle == lon.wrap_angle
angle = lon[1:]
assert type(angle) is Longitude
assert angle.wrap_angle == lon.wrap_angle
# but not on calculations
angle = lon / 2.
assert np.all(angle == Angle(['5d', '44d']))
assert type(angle) is Angle
assert not hasattr(angle, 'wrap_angle')
angle = lon * 2. + 400 * u.deg
assert np.all(angle == Angle(['420d', '576d']))
assert type(angle) is Angle
# Test setting a mutable value and having it wrap
lon[1] = -10 * u.deg
assert np.all(lon == Angle(['10d', '350d']))
# Test wrapping and try hitting some edge cases
lon = Longitude(np.array([0, 0.5, 1.0, 1.5, 2.0]) * np.pi, unit=u.radian)
assert np.all(lon.degree == np.array([0., 90, 180, 270, 0]))
lon = Longitude(np.array([0, 0.5, 1.0, 1.5, 2.0]) * np.pi, unit=u.radian, wrap_angle='180d')
assert np.all(lon.degree == np.array([0., 90, -180, -90, 0]))
# Wrap on setting wrap_angle property (also test auto-conversion of wrap_angle to an Angle)
lon = Longitude(np.array([0, 0.5, 1.0, 1.5, 2.0]) * np.pi, unit=u.radian)
lon.wrap_angle = '180d'
assert np.all(lon.degree == np.array([0., 90, -180, -90, 0]))
lon = Longitude('460d')
assert lon == Angle('100d')
lon.wrap_angle = '90d'
assert lon == Angle('-260d')
# check that if we initialize a longitude with another longitude,
# wrap_angle is kept by default
lon2 = Longitude(lon)
assert lon2.wrap_angle == lon.wrap_angle
# but not if we explicitly set it
lon3 = Longitude(lon, wrap_angle='180d')
assert lon3.wrap_angle == 180 * u.deg
# check that wrap_angle is always an Angle
lon = Longitude(lon, wrap_angle=Longitude(180 * u.deg))
assert lon.wrap_angle == 180 * u.deg
assert lon.wrap_angle.__class__ is Angle
# check that wrap_angle is not copied
wrap_angle=180 * u.deg
lon = Longitude(lon, wrap_angle=wrap_angle)
assert lon.wrap_angle == 180 * u.deg
assert np.may_share_memory(lon.wrap_angle, wrap_angle)
# check for problem reported in #2037 about Longitude initializing to -0
lon = Longitude(0, u.deg)
lonstr = lon.to_string()
assert not lonstr.startswith('-')
# also make sure dtype is correctly conserved
assert Longitude(0, u.deg, dtype=float).dtype == np.dtype(float)
assert Longitude(0, u.deg, dtype=int).dtype == np.dtype(int)
# Test errors when trying to interoperate with latitudes.
with pytest.raises(TypeError) as excinfo:
lat = Latitude(10, 'deg')
lon = Longitude(lat)
assert "A Longitude angle cannot be created from a Latitude angle" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
lat = Latitude(10, 'deg')
lon = Longitude([20], 'deg')
lon[0] = lat
assert "A Latitude angle cannot be assigned to a Longitude angle" in str(excinfo.value)
# Check we can work around the Lat vs Long checks by casting explicitly to Angle.
lat = Latitude(10, 'deg')
lon = Longitude(Angle(lat))
assert lon.value == 10.0
# Check setitem.
lat = Latitude(10, 'deg')
lon = Longitude([20], 'deg')
lon[0] = Angle(lat)
assert lon.value[0] == 10.0
def test_wrap_at():
a = Angle([-20, 150, 350, 360] * u.deg)
assert np.all(a.wrap_at(360 * u.deg).degree == np.array([340., 150., 350., 0.]))
assert np.all(a.wrap_at(Angle(360, unit=u.deg)).degree == np.array([340., 150., 350., 0.]))
assert np.all(a.wrap_at('360d').degree == np.array([340., 150., 350., 0.]))
assert np.all(a.wrap_at('180d').degree == np.array([-20., 150., -10., 0.]))
assert np.all(a.wrap_at(np.pi * u.rad).degree == np.array([-20., 150., -10., 0.]))
# Test wrapping a scalar Angle
a = Angle('190d')
assert a.wrap_at('180d') == Angle('-170d')
a = Angle(np.arange(-1000.0, 1000.0, 0.125), unit=u.deg)
for wrap_angle in (270, 0.2, 0.0, 360.0, 500, -2000.125):
aw = a.wrap_at(wrap_angle * u.deg)
assert np.all(aw.degree >= wrap_angle - 360.0)
assert np.all(aw.degree < wrap_angle)
aw = a.to(u.rad).wrap_at(wrap_angle * u.deg)
assert np.all(aw.degree >= wrap_angle - 360.0)
assert np.all(aw.degree < wrap_angle)
def test_is_within_bounds():
a = Angle([-20, 150, 350] * u.deg)
assert a.is_within_bounds('0d', '360d') is False
assert a.is_within_bounds(None, '360d') is True
assert a.is_within_bounds(-30 * u.deg, None) is True
a = Angle('-20d')
assert a.is_within_bounds('0d', '360d') is False
assert a.is_within_bounds(None, '360d') is True
assert a.is_within_bounds(-30 * u.deg, None) is True
def test_angle_mismatched_unit():
a = Angle('+6h7m8s', unit=u.degree)
assert_allclose(a.value, 91.78333333333332)
def test_regression_formatting_negative():
# Regression test for a bug that caused:
#
# >>> Angle(-1., unit='deg').to_string()
# '-1d00m-0s'
assert Angle(-0., unit='deg').to_string() == '-0d00m00s'
assert Angle(-1., unit='deg').to_string() == '-1d00m00s'
assert Angle(-0., unit='hour').to_string() == '-0h00m00s'
assert Angle(-1., unit='hour').to_string() == '-1h00m00s'
def test_regression_formatting_default_precision():
# Regression test for issue #11140
assert Angle('10:20:30.12345678d').to_string() == '10d20m30.12345678s'
assert Angle('10d20m30.123456784564s').to_string() == '10d20m30.12345678s'
assert Angle('10d20m30.123s').to_string() == '10d20m30.123s'
def test_empty_sep():
a = Angle('05h04m31.93830s')
assert a.to_string(sep='', precision=2, pad=True) == '050431.94'
def test_create_tuple():
"""
Tests creation of an angle with an (h,m,s) tuple
(d, m, s) tuples are not tested because of sign ambiguity issues (#13162)
"""
with pytest.warns(AstropyDeprecationWarning, match='hms_to_hours'):
a1 = Angle((1, 30, 0), unit=u.hourangle)
assert a1.value == 1.5
def test_list_of_quantities():
a1 = Angle([1*u.deg, 1*u.hourangle])
assert a1.unit == u.deg
assert_allclose(a1.value, [1, 15])
a2 = Angle([1*u.hourangle, 1*u.deg], u.deg)
assert a2.unit == u.deg
assert_allclose(a2.value, [15, 1])
def test_multiply_divide():
# Issue #2273
a1 = Angle([1, 2, 3], u.deg)
a2 = Angle([4, 5, 6], u.deg)
a3 = a1 * a2
assert_allclose(a3.value, [4, 10, 18])
assert a3.unit == (u.deg * u.deg)
a3 = a1 / a2
assert_allclose(a3.value, [.25, .4, .5])
assert a3.unit == u.dimensionless_unscaled
def test_mixed_string_and_quantity():
a1 = Angle(['1d', 1. * u.deg])
assert_array_equal(a1.value, [1., 1.])
assert a1.unit == u.deg
a2 = Angle(['1d', 1 * u.rad * np.pi, '3d'])
assert_array_equal(a2.value, [1., 180., 3.])
assert a2.unit == u.deg
def test_array_angle_tostring():
aobj = Angle([1, 2], u.deg)
assert aobj.to_string().dtype.kind == 'U'
assert np.all(aobj.to_string() == ['1d00m00s', '2d00m00s'])
def test_wrap_at_without_new():
"""
Regression test for subtle bugs from situations where an Angle is
created via numpy channels that don't do the standard __new__ but instead
depend on array_finalize to set state. Longitude is used because the
bug was in its _wrap_angle not getting initialized correctly
"""
l1 = Longitude([1]*u.deg)
l2 = Longitude([2]*u.deg)
l = np.concatenate([l1, l2])
assert l._wrap_angle is not None
def test__str__():
"""
Check the __str__ method used in printing the Angle
"""
# scalar angle
scangle = Angle('10.2345d')
strscangle = scangle.__str__()
assert strscangle == '10d14m04.2s'
# non-scalar array angles
arrangle = Angle(['10.2345d', '-20d'])
strarrangle = arrangle.__str__()
assert strarrangle == '[10d14m04.2s -20d00m00s]'
# summarizing for large arrays, ... should appear
bigarrangle = Angle(np.ones(10000), u.deg)
assert '...' in bigarrangle.__str__()
def test_repr_latex():
"""
Check the _repr_latex_ method, used primarily by IPython notebooks
"""
# try with both scalar
scangle = Angle(2.1, u.deg)
rlscangle = scangle._repr_latex_()
# and array angles
arrangle = Angle([1, 2.1], u.deg)
rlarrangle = arrangle._repr_latex_()
assert rlscangle == r'$2^\circ06{}^\prime00{}^{\prime\prime}$'
assert rlscangle.split('$')[1] in rlarrangle
# make sure the ... appears for large arrays
bigarrangle = Angle(np.ones(50000)/50000., u.deg)
assert '...' in bigarrangle._repr_latex_()
def test_angle_with_cds_units_enabled():
"""Regression test for #5350
Especially the example in
https://github.com/astropy/astropy/issues/5350#issuecomment-248770151
"""
from astropy.units import cds
# the problem is with the parser, so remove it temporarily
from astropy.coordinates.angle_formats import _AngleParser
del _AngleParser._thread_local._parser
with cds.enable():
Angle('5d')
del _AngleParser._thread_local._parser
Angle('5d')
def test_longitude_nan():
# Check that passing a NaN to Longitude doesn't raise a warning
Longitude([0, np.nan, 1] * u.deg)
def test_latitude_nan():
# Check that passing a NaN to Latitude doesn't raise a warning
Latitude([0, np.nan, 1] * u.deg)
def test_angle_wrap_at_nan():
# Check that no attempt is made to wrap a NaN angle
angle = Angle([0, np.nan, 1] * u.deg)
angle.flags.writeable = False # to force an error if a write is attempted
angle.wrap_at(180*u.deg, inplace=True)
def test_angle_multithreading():
"""
Regression test for issue #7168
"""
angles = ['00:00:00']*10000
def parse_test(i=0):
Angle(angles, unit='hour')
for i in range(10):
threading.Thread(target=parse_test, args=(i,)).start()
@pytest.mark.parametrize("cls", [Angle, Longitude, Latitude])
@pytest.mark.parametrize("input, expstr, exprepr",
[(np.nan*u.deg,
"nan",
"nan deg"),
([np.nan, 5, 0]*u.deg,
"[nan 5d00m00s 0d00m00s]",
"[nan, 5., 0.] deg"),
([6, np.nan, 0]*u.deg,
"[6d00m00s nan 0d00m00s]",
"[6., nan, 0.] deg"),
([np.nan, np.nan, np.nan]*u.deg,
"[nan nan nan]",
"[nan, nan, nan] deg"),
(np.nan*u.hour,
"nan",
"nan hourangle"),
([np.nan, 5, 0]*u.hour,
"[nan 5h00m00s 0h00m00s]",
"[nan, 5., 0.] hourangle"),
([6, np.nan, 0]*u.hour,
"[6h00m00s nan 0h00m00s]",
"[6., nan, 0.] hourangle"),
([np.nan, np.nan, np.nan]*u.hour,
"[nan nan nan]",
"[nan, nan, nan] hourangle"),
(np.nan*u.rad,
"nan",
"nan rad"),
([np.nan, 1, 0]*u.rad,
"[nan 1rad 0rad]",
"[nan, 1., 0.] rad"),
([1.50, np.nan, 0]*u.rad,
"[1.5rad nan 0rad]",
"[1.5, nan, 0.] rad"),
([np.nan, np.nan, np.nan]*u.rad,
"[nan nan nan]",
"[nan, nan, nan] rad")])
def test_str_repr_angles_nan(cls, input, expstr, exprepr):
"""
Regression test for issue #11473
"""
q = cls(input)
assert str(q) == expstr
# Deleting whitespaces since repr appears to be adding them for some values
# making the test fail.
assert repr(q).replace(" ", "") == f'<{cls.__name__}{exprepr}>'.replace(" ","")
|
f747fd09ce57172fdfe5f215c2ee284bbd08870bae8ee27b2e9ee4218dd0f6d3 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for the projected separation stuff
"""
import pytest
import numpy as np
from astropy.tests.helper import assert_quantity_allclose as assert_allclose
from astropy import units as u
from astropy.coordinates.builtin_frames import ICRS, FK5, Galactic
from astropy.coordinates import Angle, Distance
# lon1, lat1, lon2, lat2 in degrees
coords = [(1, 0, 0, 0),
(0, 1, 0, 0),
(0, 0, 1, 0),
(0, 0, 0, 1),
(0, 0, 10, 0),
(0, 0, 90, 0),
(0, 0, 180, 0),
(0, 45, 0, -45),
(0, 60, 0, -30),
(-135, -15, 45, 15),
(100, -89, -80, 89),
(0, 0, 0, 0),
(0, 0, 1. / 60., 1. / 60.)]
correct_seps = [1, 1, 1, 1, 10, 90, 180, 90, 90, 180, 180, 0,
0.023570225877234643]
correctness_margin = 2e-10
def test_angsep():
"""
Tests that the angular separation object also behaves correctly.
"""
from astropy.coordinates.angle_utilities import angular_separation
# check it both works with floats in radians, Quantities, or Angles
for conv in (np.deg2rad,
lambda x: u.Quantity(x, "deg"),
lambda x: Angle(x, "deg")):
for (lon1, lat1, lon2, lat2), corrsep in zip(coords, correct_seps):
angsep = angular_separation(conv(lon1), conv(lat1),
conv(lon2), conv(lat2))
assert np.fabs(angsep - conv(corrsep)) < conv(correctness_margin)
def test_fk5_seps():
"""
This tests if `separation` works for FK5 objects.
This is a regression test for github issue #891
"""
a = FK5(1.*u.deg, 1.*u.deg)
b = FK5(2.*u.deg, 2.*u.deg)
a.separation(b)
def test_proj_separations():
"""
Test angular separation functionality
"""
c1 = ICRS(ra=0*u.deg, dec=0*u.deg)
c2 = ICRS(ra=0*u.deg, dec=1*u.deg)
sep = c2.separation(c1)
# returns an Angle object
assert isinstance(sep, Angle)
assert_allclose(sep.degree, 1.)
assert_allclose(sep.arcminute, 60.)
# these operations have ambiguous interpretations for points on a sphere
with pytest.raises(TypeError):
c1 + c2
with pytest.raises(TypeError):
c1 - c2
ngp = Galactic(l=0*u.degree, b=90*u.degree)
ncp = ICRS(ra=0*u.degree, dec=90*u.degree)
# if there is a defined conversion between the relevant coordinate systems,
# it will be automatically performed to get the right angular separation
assert_allclose(ncp.separation(ngp.transform_to(ICRS())).degree,
ncp.separation(ngp).degree)
# distance from the north galactic pole to celestial pole
assert_allclose(ncp.separation(ngp.transform_to(ICRS())).degree,
62.87174758503201)
def test_3d_separations():
"""
Test 3D separation functionality
"""
c1 = ICRS(ra=1*u.deg, dec=1*u.deg, distance=9*u.kpc)
c2 = ICRS(ra=1*u.deg, dec=1*u.deg, distance=10*u.kpc)
sep3d = c2.separation_3d(c1)
assert isinstance(sep3d, Distance)
assert_allclose(sep3d - 1*u.kpc, 0*u.kpc, atol=1e-12*u.kpc)
|
2a65591630fd9cd8bb528400df6f9c4127f777ce7c8ecfff1ba8c5a7790f0f86 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This is the APE5 coordinates API document re-written to work as a series of test
functions.
Note that new tests for coordinates functionality should generally *not* be
added to this file - instead, add them to other appropriate test modules in
this package, like ``test_sky_coord.py``, ``test_frames.py``, or
``test_representation.py``. This file is instead meant mainly to keep track of
deviations from the original APE5 plan.
"""
import pytest
import numpy as np
from numpy import testing as npt
from astropy.tests.helper import assert_quantity_allclose as assert_allclose
from astropy import units as u
from astropy import time
from astropy import coordinates as coords
from astropy.units import allclose
from astropy.utils.compat.optional_deps import HAS_SCIPY # noqa
def test_representations_api():
from astropy.coordinates.representation import SphericalRepresentation, \
UnitSphericalRepresentation, PhysicsSphericalRepresentation, \
CartesianRepresentation
from astropy.coordinates import Angle, Longitude, Latitude, Distance
# <-----------------Classes for representation of coordinate data-------------->
# These classes inherit from a common base class and internally contain Quantity
# objects, which are arrays (although they may act as scalars, like numpy's
# length-0 "arrays")
# They can be initialized with a variety of ways that make intuitive sense.
# Distance is optional.
UnitSphericalRepresentation(lon=8*u.hour, lat=5*u.deg)
UnitSphericalRepresentation(lon=8*u.hourangle, lat=5*u.deg)
SphericalRepresentation(lon=8*u.hourangle, lat=5*u.deg, distance=10*u.kpc)
# In the initial implementation, the lat/lon/distance arguments to the
# initializer must be in order. A *possible* future change will be to allow
# smarter guessing of the order. E.g. `Latitude` and `Longitude` objects can be
# given in any order.
UnitSphericalRepresentation(Longitude(8, u.hour), Latitude(5, u.deg))
SphericalRepresentation(Longitude(8, u.hour), Latitude(5, u.deg), Distance(10, u.kpc))
# Arrays of any of the inputs are fine
UnitSphericalRepresentation(lon=[8, 9]*u.hourangle, lat=[5, 6]*u.deg)
# Default is to copy arrays, but optionally, it can be a reference
UnitSphericalRepresentation(lon=[8, 9]*u.hourangle, lat=[5, 6]*u.deg, copy=False)
# strings are parsed by `Latitude` and `Longitude` constructors, so no need to
# implement parsing in the Representation classes
UnitSphericalRepresentation(lon=Angle('2h6m3.3s'), lat=Angle('0.1rad'))
# Or, you can give `Quantity`s with keywords, and they will be internally
# converted to Angle/Distance
c1 = SphericalRepresentation(lon=8*u.hourangle, lat=5*u.deg, distance=10*u.kpc)
# Can also give another representation object with the `reprobj` keyword.
c2 = SphericalRepresentation.from_representation(c1)
# distance, lat, and lon typically will just match in shape
SphericalRepresentation(lon=[8, 9]*u.hourangle, lat=[5, 6]*u.deg, distance=[10, 11]*u.kpc)
# if the inputs are not the same, if possible they will be broadcast following
# numpy's standard broadcasting rules.
c2 = SphericalRepresentation(lon=[8, 9]*u.hourangle, lat=[5, 6]*u.deg, distance=10*u.kpc)
assert len(c2.distance) == 2
# when they can't be broadcast, it is a ValueError (same as Numpy)
with pytest.raises(ValueError):
c2 = UnitSphericalRepresentation(lon=[8, 9, 10]*u.hourangle, lat=[5, 6]*u.deg)
# It's also possible to pass in scalar quantity lists with mixed units. These
# are converted to array quantities following the same rule as `Quantity`: all
# elements are converted to match the first element's units.
c2 = UnitSphericalRepresentation(lon=Angle([8*u.hourangle, 135*u.deg]),
lat=Angle([5*u.deg, (6*np.pi/180)*u.rad]))
assert c2.lat.unit == u.deg and c2.lon.unit == u.hourangle
npt.assert_almost_equal(c2.lon[1].value, 9)
# The Quantity initializer itself can also be used to force the unit even if the
# first element doesn't have the right unit
lon = u.Quantity([120*u.deg, 135*u.deg], u.hourangle)
lat = u.Quantity([(5*np.pi/180)*u.rad, 0.4*u.hourangle], u.deg)
c2 = UnitSphericalRepresentation(lon, lat)
# regardless of how input, the `lat` and `lon` come out as angle/distance
assert isinstance(c1.lat, Angle)
assert isinstance(c1.lat, Latitude) # `Latitude` is an `~astropy.coordinates.Angle` subclass
assert isinstance(c1.distance, Distance)
# but they are read-only, as representations are immutable once created
with pytest.raises(AttributeError):
c1.lat = Latitude(5, u.deg)
# Note that it is still possible to modify the array in-place, but this is not
# sanctioned by the API, as this would prevent things like caching.
c2.lat[:] = [0] * u.deg # possible, but NOT SUPPORTED
# To address the fact that there are various other conventions for how spherical
# coordinates are defined, other conventions can be included as new classes.
# Later there may be other conventions that we implement - for now just the
# physics convention, as it is one of the most common cases.
_ = PhysicsSphericalRepresentation(phi=120*u.deg, theta=85*u.deg, r=3*u.kpc)
# first dimension must be length-3 if a lone `Quantity` is passed in.
c1 = CartesianRepresentation(np.random.randn(3, 100) * u.kpc)
assert c1.xyz.shape[0] == 3
assert c1.xyz.unit == u.kpc
assert c1.x.shape[0] == 100
assert c1.y.shape[0] == 100
assert c1.z.shape[0] == 100
# can also give each as separate keywords
CartesianRepresentation(x=np.random.randn(100)*u.kpc,
y=np.random.randn(100)*u.kpc,
z=np.random.randn(100)*u.kpc)
# if the units don't match but are all distances, they will automatically be
# converted to match `x`
xarr, yarr, zarr = np.random.randn(3, 100)
c1 = CartesianRepresentation(x=xarr*u.kpc, y=yarr*u.kpc, z=zarr*u.kpc)
c2 = CartesianRepresentation(x=xarr*u.kpc, y=yarr*u.kpc, z=zarr*u.pc)
assert c1.xyz.unit == c2.xyz.unit == u.kpc
assert_allclose((c1.z / 1000) - c2.z, 0*u.kpc, atol=1e-10*u.kpc)
# representations convert into other representations via `represent_as`
srep = SphericalRepresentation(lon=90*u.deg, lat=0*u.deg, distance=1*u.pc)
crep = srep.represent_as(CartesianRepresentation)
assert_allclose(crep.x, 0*u.pc, atol=1e-10*u.pc)
assert_allclose(crep.y, 1*u.pc, atol=1e-10*u.pc)
assert_allclose(crep.z, 0*u.pc, atol=1e-10*u.pc)
# The functions that actually do the conversion are defined via methods on the
# representation classes. This may later be expanded into a full registerable
# transform graph like the coordinate frames, but initially it will be a simpler
# method system
def test_frame_api():
from astropy.coordinates.representation import SphericalRepresentation, \
UnitSphericalRepresentation
from astropy.coordinates.builtin_frames import ICRS, FK5
# <--------------------Reference Frame/"Low-level" classes--------------------->
# The low-level classes have a dual role: they act as specifiers of coordinate
# frames and they *may* also contain data as one of the representation objects,
# in which case they are the actual coordinate objects themselves.
# They can always accept a representation as a first argument
icrs = ICRS(UnitSphericalRepresentation(lon=8*u.hour, lat=5*u.deg))
# which is stored as the `data` attribute
assert icrs.data.lat == 5*u.deg
assert icrs.data.lon == 8*u.hourangle
# Frames that require additional information like equinoxs or obstimes get them
# as keyword parameters to the frame constructor. Where sensible, defaults are
# used. E.g., FK5 is almost always J2000 equinox
fk5 = FK5(UnitSphericalRepresentation(lon=8*u.hour, lat=5*u.deg))
J2000 = time.Time('J2000')
fk5_2000 = FK5(UnitSphericalRepresentation(lon=8*u.hour, lat=5*u.deg), equinox=J2000)
assert fk5.equinox == fk5_2000.equinox
# the information required to specify the frame is immutable
J2001 = time.Time('J2001')
with pytest.raises(AttributeError):
fk5.equinox = J2001
# Similar for the representation data.
with pytest.raises(AttributeError):
fk5.data = UnitSphericalRepresentation(lon=8*u.hour, lat=5*u.deg)
# There is also a class-level attribute that lists the attributes needed to
# identify the frame. These include attributes like `equinox` shown above.
assert all(nm in ('equinox', 'obstime') for nm in fk5.get_frame_attr_names())
# the result of `get_frame_attr_names` is called for particularly in the
# high-level class (discussed below) to allow round-tripping between various
# frames. It is also part of the public API for other similar developer /
# advanced users' use.
# The actual position information is accessed via the representation objects
assert_allclose(icrs.represent_as(SphericalRepresentation).lat, 5*u.deg)
# shorthand for the above
assert_allclose(icrs.spherical.lat, 5*u.deg)
assert icrs.cartesian.z.value > 0
# Many frames have a "default" representation, the one in which they are
# conventionally described, often with a special name for some of the
# coordinates. E.g., most equatorial coordinate systems are spherical with RA and
# Dec. This works simply as a shorthand for the longer form above
assert_allclose(icrs.dec, 5*u.deg)
assert_allclose(fk5.ra, 8*u.hourangle)
assert icrs.representation_type == SphericalRepresentation
# low-level classes can also be initialized with names valid for that representation
# and frame:
icrs_2 = ICRS(ra=8*u.hour, dec=5*u.deg, distance=1*u.kpc)
assert_allclose(icrs.ra, icrs_2.ra)
# and these are taken as the default if keywords are not given:
# icrs_nokwarg = ICRS(8*u.hour, 5*u.deg, distance=1*u.kpc)
# assert icrs_nokwarg.ra == icrs_2.ra and icrs_nokwarg.dec == icrs_2.dec
# they also are capable of computing on-sky or 3d separations from each other,
# which will be a direct port of the existing methods:
coo1 = ICRS(ra=0*u.hour, dec=0*u.deg)
coo2 = ICRS(ra=0*u.hour, dec=1*u.deg)
# `separation` is the on-sky separation
assert_allclose(coo1.separation(coo2).degree, 1.0)
# while `separation_3d` includes the 3D distance information
coo3 = ICRS(ra=0*u.hour, dec=0*u.deg, distance=1*u.kpc)
coo4 = ICRS(ra=0*u.hour, dec=0*u.deg, distance=2*u.kpc)
assert coo3.separation_3d(coo4).kpc == 1.0
# The next example fails because `coo1` and `coo2` don't have distances
with pytest.raises(ValueError):
assert coo1.separation_3d(coo2).kpc == 1.0
# repr/str also shows info, with frame and data
# assert repr(fk5) == ''
def test_transform_api():
from astropy.coordinates.representation import UnitSphericalRepresentation
from astropy.coordinates.builtin_frames import ICRS, FK5
from astropy.coordinates.baseframe import frame_transform_graph, BaseCoordinateFrame
from astropy.coordinates.transformations import DynamicMatrixTransform
# <------------------------Transformations------------------------------------->
# Transformation functionality is the key to the whole scheme: they transform
# low-level classes from one frame to another.
# (used below but defined above in the API)
fk5 = FK5(ra=8*u.hour, dec=5*u.deg)
# If no data (or `None`) is given, the class acts as a specifier of a frame, but
# without any stored data.
J2001 = time.Time('J2001')
fk5_J2001_frame = FK5(equinox=J2001)
# if they do not have data, the string instead is the frame specification
assert repr(fk5_J2001_frame) == "<FK5 Frame (equinox=J2001.000)>"
# Note that, although a frame object is immutable and can't have data added, it
# can be used to create a new object that does have data by giving the
# `realize_frame` method a representation:
srep = UnitSphericalRepresentation(lon=8*u.hour, lat=5*u.deg)
fk5_j2001_with_data = fk5_J2001_frame.realize_frame(srep)
assert fk5_j2001_with_data.data is not None
# Now `fk5_j2001_with_data` is in the same frame as `fk5_J2001_frame`, but it
# is an actual low-level coordinate, rather than a frame without data.
# These frames are primarily useful for specifying what a coordinate should be
# transformed *into*, as they are used by the `transform_to` method
# E.g., this snippet precesses the point to the new equinox
newfk5 = fk5.transform_to(fk5_J2001_frame)
assert newfk5.equinox == J2001
# transforming to a new frame necessarily loses framespec information if that
# information is not applicable to the new frame. This means transforms are not
# always round-trippable:
fk5_2 = FK5(ra=8*u.hour, dec=5*u.deg, equinox=J2001)
ic_trans = fk5_2.transform_to(ICRS())
# `ic_trans` does not have an `equinox`, so now when we transform back to FK5,
# it's a *different* RA and Dec
fk5_trans = ic_trans.transform_to(FK5())
assert not allclose(fk5_2.ra, fk5_trans.ra, rtol=0, atol=1e-10*u.deg)
# But if you explicitly give the right equinox, all is fine
fk5_trans_2 = fk5_2.transform_to(FK5(equinox=J2001))
assert_allclose(fk5_2.ra, fk5_trans_2.ra, rtol=0, atol=1e-10*u.deg)
# Trying to transforming a frame with no data is of course an error:
with pytest.raises(ValueError):
FK5(equinox=J2001).transform_to(ICRS())
# To actually define a new transformation, the same scheme as in the
# 0.2/0.3 coordinates framework can be re-used - a graph of transform functions
# connecting various coordinate classes together. The main changes are:
# 1) The transform functions now get the frame object they are transforming the
# current data into.
# 2) Frames with additional information need to have a way to transform between
# objects of the same class, but with different framespecinfo values
# An example transform function:
class SomeNewSystem(BaseCoordinateFrame):
pass
@frame_transform_graph.transform(DynamicMatrixTransform, SomeNewSystem, FK5)
def new_to_fk5(newobj, fk5frame):
_ = newobj.obstime
_ = fk5frame.equinox
# ... build a *cartesian* transform matrix using `eq` that transforms from
# the `newobj` frame as observed at `ot` to FK5 an equinox `eq`
matrix = np.eye(3)
return matrix
# Other options for transform functions include one that simply returns the new
# coordinate object, and one that returns a cartesian matrix but does *not*
# require `newobj` or `fk5frame` - this allows optimization of the transform.
def test_highlevel_api():
J2001 = time.Time('J2001')
# <--------------------------"High-level" class-------------------------------->
# The "high-level" class is intended to wrap the lower-level classes in such a
# way that they can be round-tripped, as well as providing a variety of
# convenience functionality. This document is not intended to show *all* of the
# possible high-level functionality, rather how the high-level classes are
# initialized and interact with the low-level classes
# this creates an object that contains an `ICRS` low-level class, initialized
# identically to the first ICRS example further up.
sc = coords.SkyCoord(coords.SphericalRepresentation(lon=8 * u.hour,
lat=5 * u.deg, distance=1 * u.kpc), frame='icrs')
# Other representations and `system` keywords delegate to the appropriate
# low-level class. The already-existing registry for user-defined coordinates
# will be used by `SkyCoordinate` to figure out what various the `system`
# keyword actually means.
sc = coords.SkyCoord(ra=8 * u.hour, dec=5 * u.deg, frame='icrs')
sc = coords.SkyCoord(l=120 * u.deg, b=5 * u.deg, frame='galactic')
# High-level classes can also be initialized directly from low-level objects
sc = coords.SkyCoord(coords.ICRS(ra=8 * u.hour, dec=5 * u.deg))
# The next example raises an error because the high-level class must always
# have position data.
with pytest.raises(ValueError):
sc = coords.SkyCoord(coords.FK5(equinox=J2001)) # raises ValueError
# similarly, the low-level object can always be accessed
# this is how it's supposed to look, but sometimes the numbers get rounded in
# funny ways
# assert repr(sc.frame) == '<ICRS Coordinate: ra=120.0 deg, dec=5.0 deg>'
rscf = repr(sc.frame)
assert rscf.startswith('<ICRS Coordinate: (ra, dec) in deg')
# and the string representation will be inherited from the low-level class.
# same deal, should loook like this, but different archituectures/ python
# versions may round the numbers differently
# assert repr(sc) == '<SkyCoord (ICRS): ra=120.0 deg, dec=5.0 deg>'
rsc = repr(sc)
assert rsc.startswith('<SkyCoord (ICRS): (ra, dec) in deg')
# Supports a variety of possible complex string formats
sc = coords.SkyCoord('8h00m00s +5d00m00.0s', frame='icrs')
# In the next example, the unit is only needed b/c units are ambiguous. In
# general, we *never* accept ambiguity
sc = coords.SkyCoord('8:00:00 +5:00:00.0', unit=(u.hour, u.deg), frame='icrs')
# The next one would yield length-2 array coordinates, because of the comma
sc = coords.SkyCoord(['8h 5d', '2°2′3″ 0.3rad'], frame='icrs')
# It should also interpret common designation styles as a coordinate
# NOT YET
# sc = coords.SkyCoord('SDSS J123456.89-012345.6', frame='icrs')
# but it should also be possible to provide formats for outputting to strings,
# similar to `Time`. This can be added right away or at a later date.
# transformation is done the same as for low-level classes, which it delegates to
sc_fk5_j2001 = sc.transform_to(coords.FK5(equinox=J2001))
assert sc_fk5_j2001.equinox == J2001
# The key difference is that the high-level class remembers frame information
# necessary for round-tripping, unlike the low-level classes:
sc1 = coords.SkyCoord(ra=8 * u.hour, dec=5 * u.deg, equinox=J2001, frame='fk5')
sc2 = sc1.transform_to('icrs')
# The next assertion succeeds, but it doesn't mean anything for ICRS, as ICRS
# isn't defined in terms of an equinox
assert sc2.equinox == J2001
# But it *is* necessary once we transform to FK5
sc3 = sc2.transform_to('fk5')
assert sc3.equinox == J2001
assert_allclose(sc1.ra, sc3.ra)
# `SkyCoord` will also include the attribute-style access that is in the
# v0.2/0.3 coordinate objects. This will *not* be in the low-level classes
sc = coords.SkyCoord(ra=8 * u.hour, dec=5 * u.deg, frame='icrs')
scgal = sc.galactic
assert str(scgal).startswith('<SkyCoord (Galactic): (l, b)')
# the existing `from_name` and `match_to_catalog_*` methods will be moved to the
# high-level class as convenience functionality.
# in remote-data test below!
# m31icrs = coords.SkyCoord.from_name('M31', frame='icrs')
# assert str(m31icrs) == '<SkyCoord (ICRS) RA=10.68471 deg, Dec=41.26875 deg>'
if HAS_SCIPY:
cat1 = coords.SkyCoord(ra=[1, 2]*u.hr, dec=[3, 4.01]*u.deg,
distance=[5, 6]*u.kpc, frame='icrs')
cat2 = coords.SkyCoord(ra=[1, 2, 2.01]*u.hr, dec=[3, 4, 5]*u.deg,
distance=[5, 200, 6]*u.kpc, frame='icrs')
idx1, sep2d1, dist3d1 = cat1.match_to_catalog_sky(cat2)
idx2, sep2d2, dist3d2 = cat1.match_to_catalog_3d(cat2)
assert np.any(idx1 != idx2)
# additional convenience functionality for the future should be added as methods
# on `SkyCoord`, *not* the low-level classes.
@pytest.mark.remote_data
def test_highlevel_api_remote():
m31icrs = coords.SkyCoord.from_name('M31', frame='icrs')
m31str = str(m31icrs)
assert m31str.startswith('<SkyCoord (ICRS): (ra, dec) in deg\n (')
assert m31str.endswith(')>')
assert '10.68' in m31str
assert '41.26' in m31str
# The above is essentially a replacement of the below, but tweaked so that
# small/moderate changes in what `from_name` returns don't cause the tests
# to fail
# assert str(m31icrs) == '<SkyCoord (ICRS): (ra, dec) in deg\n (10.6847083, 41.26875)>'
m31fk4 = coords.SkyCoord.from_name('M31', frame='fk4')
assert not m31icrs.is_equivalent_frame(m31fk4)
assert np.abs(m31icrs.ra - m31fk4.ra) > .5*u.deg
|
959395aa536cb12ca40404b8c066755737321780f4397398015d8439be86381d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for putting velocity differentials into SkyCoord objects.
Note: the skyoffset velocity tests are in a different file, in
test_skyoffset_transformations.py
"""
import pytest
import numpy as np
from astropy import units as u
from astropy.tests.helper import assert_quantity_allclose
from astropy.coordinates import (
SkyCoord, ICRS, SphericalRepresentation, SphericalDifferential,
SphericalCosLatDifferential, UnitSphericalRepresentation,
UnitSphericalDifferential, UnitSphericalCosLatDifferential,
RadialDifferential, CartesianRepresentation,
CartesianDifferential, Galactic, PrecessedGeocentric)
from astropy.utils.compat.optional_deps import HAS_SCIPY # noqa
def test_creation_frameobjs():
i = ICRS(1*u.deg, 2*u.deg, pm_ra_cosdec=.2*u.mas/u.yr, pm_dec=.1*u.mas/u.yr)
sc = SkyCoord(i)
for attrnm in ['ra', 'dec', 'pm_ra_cosdec', 'pm_dec']:
assert_quantity_allclose(getattr(i, attrnm), getattr(sc, attrnm))
sc_nod = SkyCoord(ICRS(1*u.deg, 2*u.deg))
for attrnm in ['ra', 'dec']:
assert_quantity_allclose(getattr(sc, attrnm), getattr(sc_nod, attrnm))
def test_creation_attrs():
sc1 = SkyCoord(1*u.deg, 2*u.deg,
pm_ra_cosdec=.2*u.mas/u.yr, pm_dec=.1*u.mas/u.yr,
frame='fk5')
assert_quantity_allclose(sc1.ra, 1*u.deg)
assert_quantity_allclose(sc1.dec, 2*u.deg)
assert_quantity_allclose(sc1.pm_ra_cosdec, .2*u.arcsec/u.kyr)
assert_quantity_allclose(sc1.pm_dec, .1*u.arcsec/u.kyr)
sc2 = SkyCoord(1*u.deg, 2*u.deg,
pm_ra=.2*u.mas/u.yr, pm_dec=.1*u.mas/u.yr,
differential_type=SphericalDifferential)
assert_quantity_allclose(sc2.ra, 1*u.deg)
assert_quantity_allclose(sc2.dec, 2*u.deg)
assert_quantity_allclose(sc2.pm_ra, .2*u.arcsec/u.kyr)
assert_quantity_allclose(sc2.pm_dec, .1*u.arcsec/u.kyr)
sc3 = SkyCoord('1:2:3 4:5:6',
pm_ra_cosdec=.2*u.mas/u.yr, pm_dec=.1*u.mas/u.yr,
unit=(u.hour, u.deg))
assert_quantity_allclose(sc3.ra, 1*u.hourangle + 2*u.arcmin*15 + 3*u.arcsec*15)
assert_quantity_allclose(sc3.dec, 4*u.deg + 5*u.arcmin + 6*u.arcsec)
# might as well check with sillier units?
assert_quantity_allclose(sc3.pm_ra_cosdec, 1.2776637006616473e-07 * u.arcmin / u.fortnight)
assert_quantity_allclose(sc3.pm_dec, 6.388318503308237e-08 * u.arcmin / u.fortnight)
def test_creation_copy_basic():
i = ICRS(1*u.deg, 2*u.deg, pm_ra_cosdec=.2*u.mas/u.yr, pm_dec=.1*u.mas/u.yr)
sc = SkyCoord(i)
sc_cpy = SkyCoord(sc)
for attrnm in ['ra', 'dec', 'pm_ra_cosdec', 'pm_dec']:
assert_quantity_allclose(getattr(sc, attrnm), getattr(sc_cpy, attrnm))
def test_creation_copy_rediff():
sc = SkyCoord(1*u.deg, 2*u.deg,
pm_ra=.2*u.mas/u.yr, pm_dec=.1*u.mas/u.yr,
differential_type=SphericalDifferential)
sc_cpy = SkyCoord(sc)
for attrnm in ['ra', 'dec', 'pm_ra', 'pm_dec']:
assert_quantity_allclose(getattr(sc, attrnm), getattr(sc_cpy, attrnm))
sc_newdiff = SkyCoord(sc, differential_type=SphericalCosLatDifferential)
reprepr = sc.represent_as(SphericalRepresentation, SphericalCosLatDifferential)
assert_quantity_allclose(sc_newdiff.pm_ra_cosdec,
reprepr.differentials['s'].d_lon_coslat)
def test_creation_cartesian():
rep = CartesianRepresentation([10, 0., 0.]*u.pc)
dif = CartesianDifferential([0, 100, 0.]*u.pc/u.Myr)
rep = rep.with_differentials(dif)
c = SkyCoord(rep)
sdif = dif.represent_as(SphericalCosLatDifferential, rep)
assert_quantity_allclose(c.pm_ra_cosdec, sdif.d_lon_coslat)
def test_useful_error_missing():
sc_nod = SkyCoord(ICRS(1*u.deg, 2*u.deg))
try:
sc_nod.l
except AttributeError as e:
# this is double-checking the *normal* behavior
msg_l = e.args[0]
try:
sc_nod.pm_dec
except Exception as e:
msg_pm_dec = e.args[0]
assert "has no attribute" in msg_l
assert "has no associated differentials" in msg_pm_dec
# ----------------------Operations on SkyCoords w/ velocities-------------------
# define some fixtures to get baseline coordinates to try operations with
@pytest.fixture(scope="module", params=[(False, False),
(True, False),
(False, True),
(True, True)])
def sc(request):
incldist, inclrv = request.param
args = [1*u.deg, 2*u.deg]
kwargs = dict(pm_dec=1*u.mas/u.yr, pm_ra_cosdec=2*u.mas/u.yr)
if incldist:
kwargs['distance'] = 213.4*u.pc
if inclrv:
kwargs['radial_velocity'] = 61*u.km/u.s
return SkyCoord(*args, **kwargs)
@pytest.fixture(scope="module")
def scmany():
return SkyCoord(ICRS(ra=[1]*100*u.deg, dec=[2]*100*u.deg,
pm_ra_cosdec=np.random.randn(100)*u.mas/u.yr,
pm_dec=np.random.randn(100)*u.mas/u.yr,))
@pytest.fixture(scope="module")
def sc_for_sep():
return SkyCoord(1*u.deg, 2*u.deg,
pm_dec=1*u.mas/u.yr, pm_ra_cosdec=2*u.mas/u.yr)
def test_separation(sc, sc_for_sep):
sc.separation(sc_for_sep)
def test_accessors(sc, scmany):
sc.data.differentials['s']
sph = sc.spherical
gal = sc.galactic
if (sc.data.get_name().startswith('unit') and not
sc.data.differentials['s'].get_name().startswith('unit')):
# this xfail can be eliminated when issue #7028 is resolved
pytest.xfail('.velocity fails if there is an RV but not distance')
sc.velocity
assert isinstance(sph, SphericalRepresentation)
assert gal.data.differentials is not None
scmany[0]
sph = scmany.spherical
gal = scmany.galactic
assert isinstance(sph, SphericalRepresentation)
assert gal.data.differentials is not None
def test_transforms(sc):
trans = sc.transform_to('galactic')
assert isinstance(trans.frame, Galactic)
def test_transforms_diff(sc):
# note that arguably this *should* fail for the no-distance cases: 3D
# information is necessary to truly solve this, hence the xfail
if not sc.distance.unit.is_equivalent(u.m):
pytest.xfail('Should fail for no-distance cases')
else:
trans = sc.transform_to(PrecessedGeocentric(equinox='B1975'))
assert isinstance(trans.frame, PrecessedGeocentric)
@pytest.mark.skipif('not HAS_SCIPY')
def test_matching(sc, scmany):
# just check that it works and yields something
idx, d2d, d3d = sc.match_to_catalog_sky(scmany)
def test_position_angle(sc, sc_for_sep):
sc.position_angle(sc_for_sep)
def test_constellations(sc):
const = sc.get_constellation()
assert const == 'Pisces'
def test_separation_3d_with_differentials():
c1 = SkyCoord(ra=138*u.deg, dec=-17*u.deg, distance=100*u.pc,
pm_ra_cosdec=5*u.mas/u.yr,
pm_dec=-7*u.mas/u.yr,
radial_velocity=160*u.km/u.s)
c2 = SkyCoord(ra=138*u.deg, dec=-17*u.deg, distance=105*u.pc,
pm_ra_cosdec=15*u.mas/u.yr,
pm_dec=-74*u.mas/u.yr,
radial_velocity=-60*u.km/u.s)
sep = c1.separation_3d(c2)
assert_quantity_allclose(sep, 5*u.pc)
@pytest.mark.parametrize('sph_type', ['spherical', 'unitspherical'])
def test_cartesian_to_spherical(sph_type):
"""Conversion to unitspherical should work, even if we lose distance."""
c = SkyCoord(x=1*u.kpc, y=0*u.kpc, z=0*u.kpc,
v_x=10*u.km/u.s, v_y=0*u.km/u.s, v_z=4.74*u.km/u.s,
representation_type='cartesian')
c.representation_type = sph_type
assert c.ra == 0
assert c.dec == 0
assert c.pm_ra == 0
assert u.allclose(c.pm_dec, 1*u.mas/u.yr, rtol=1e-3)
assert c.radial_velocity == 10*u.km/u.s
if sph_type == 'spherical':
assert c.distance == 1*u.kpc
else:
assert not hasattr(c, 'distance')
@pytest.mark.parametrize('diff_info, diff_cls', [
(dict(radial_velocity=[20, 30]*u.km/u.s), RadialDifferential),
(dict(pm_ra=[2, 3]*u.mas/u.yr, pm_dec=[-3, -4]*u.mas/u.yr,
differential_type='unitspherical'), UnitSphericalDifferential),
(dict(pm_ra_cosdec=[2, 3]*u.mas/u.yr, pm_dec=[-3, -4]*u.mas/u.yr),
UnitSphericalCosLatDifferential)], scope='class')
class TestDifferentialClassPropagation:
"""Test that going in between spherical and unit-spherical, we do not
change differential type (since both can handle the same types).
"""
def test_sc_unit_spherical_with_pm_or_rv_only(self, diff_info, diff_cls):
sc = SkyCoord(ra=[10, 20]*u.deg, dec=[-10, 10]*u.deg, **diff_info)
assert isinstance(sc.data, UnitSphericalRepresentation)
assert isinstance(sc.data.differentials['s'], diff_cls)
sr = sc.represent_as('spherical')
assert isinstance(sr, SphericalRepresentation)
assert isinstance(sr.differentials['s'], diff_cls)
def test_sc_spherical_with_pm_or_rv_only(self, diff_info, diff_cls):
sc = SkyCoord(ra=[10, 20]*u.deg, dec=[-10, 10]*u.deg,
distance=1.*u.kpc, **diff_info)
assert isinstance(sc.data, SphericalRepresentation)
assert isinstance(sc.data.differentials['s'], diff_cls)
sr = sc.represent_as('unitspherical')
assert isinstance(sr, UnitSphericalRepresentation)
assert isinstance(sr.differentials['s'], diff_cls)
|
964f3e73393eb068564810ff0fd257a85aa9f832ff839f83e83c11967f08cfce | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
import pytest
import numpy as np
from numpy import testing as npt
import erfa
from astropy import units as u
from astropy.time import Time
from astropy.coordinates.builtin_frames import ICRS, AltAz
from astropy.coordinates.builtin_frames.utils import get_jd12
from astropy.coordinates import EarthLocation
from astropy.coordinates import SkyCoord
from astropy.utils import iers
from astropy.coordinates.angle_utilities import golden_spiral_grid
# These fixtures are used in test_iau_fullstack
@pytest.fixture(scope="function")
def fullstack_icrs():
rep = golden_spiral_grid(size=1000)
return ICRS(rep)
@pytest.fixture(scope="function")
def fullstack_fiducial_altaz(fullstack_icrs):
altazframe = AltAz(location=EarthLocation(lat=0*u.deg, lon=0*u.deg, height=0*u.m),
obstime=Time('J2000'))
with warnings.catch_warnings(): # Ignore remote_data warning
warnings.simplefilter('ignore')
result = fullstack_icrs.transform_to(altazframe)
return result
@pytest.fixture(scope="function", params=['J2000.1', 'J2010'])
def fullstack_times(request):
return Time(request.param)
@pytest.fixture(scope="function", params=[(0, 0, 0), (23, 0, 0), (-70, 0, 0), (0, 100, 0), (23, 0, 3000)])
def fullstack_locations(request):
return EarthLocation(lat=request.param[0]*u.deg, lon=request.param[0]*u.deg,
height=request.param[0]*u.m)
@pytest.fixture(scope="function",
params=[(0*u.bar, 0*u.deg_C, 0, 1*u.micron),
(1*u.bar, 0*u.deg_C, 0*u.one, 1*u.micron),
(1*u.bar, 10*u.deg_C, 0, 1*u.micron),
(1*u.bar, 0*u.deg_C, 50*u.percent, 1*u.micron),
(1*u.bar, 0*u.deg_C, 0, 21*u.cm)])
def fullstack_obsconditions(request):
return request.param
def _erfa_check(ira, idec, astrom):
"""
This function does the same thing the astropy layer is supposed to do, but
all in erfa
"""
cra, cdec = erfa.atciq(ira, idec, 0, 0, 0, 0, astrom)
az, zen, ha, odec, ora = erfa.atioq(cra, cdec, astrom)
alt = np.pi/2-zen
cra2, cdec2 = erfa.atoiq('A', az, zen, astrom)
ira2, idec2 = erfa.aticq(cra2, cdec2, astrom)
dct = locals()
del dct['astrom']
return dct
def test_iau_fullstack(fullstack_icrs, fullstack_fiducial_altaz,
fullstack_times, fullstack_locations,
fullstack_obsconditions):
"""
Test the full transform from ICRS <-> AltAz
"""
# create the altaz frame
altazframe = AltAz(obstime=fullstack_times, location=fullstack_locations,
pressure=fullstack_obsconditions[0],
temperature=fullstack_obsconditions[1],
relative_humidity=fullstack_obsconditions[2],
obswl=fullstack_obsconditions[3])
aacoo = fullstack_icrs.transform_to(altazframe)
# compare aacoo to the fiducial AltAz - should always be different
assert np.all(np.abs(aacoo.alt - fullstack_fiducial_altaz.alt) > 50*u.milliarcsecond)
assert np.all(np.abs(aacoo.az - fullstack_fiducial_altaz.az) > 50*u.milliarcsecond)
# if the refraction correction is included, we *only* do the comparisons
# where altitude >5 degrees. The SOFA guides imply that below 5 is where
# where accuracy gets more problematic, and testing reveals that alt<~0
# gives garbage round-tripping, and <10 can give ~1 arcsec uncertainty
if fullstack_obsconditions[0].value == 0:
# but if there is no refraction correction, check everything
msk = slice(None)
tol = 5*u.microarcsecond
else:
msk = aacoo.alt > 5*u.deg
# most of them aren't this bad, but some of those at low alt are offset
# this much. For alt > 10, this is always better than 100 masec
tol = 750*u.milliarcsecond
# now make sure the full stack round-tripping works
icrs2 = aacoo.transform_to(ICRS())
adras = np.abs(fullstack_icrs.ra - icrs2.ra)[msk]
addecs = np.abs(fullstack_icrs.dec - icrs2.dec)[msk]
assert np.all(adras < tol), f'largest RA change is {np.max(adras.arcsec * 1000)} mas, > {tol}'
assert np.all(addecs < tol), f'largest Dec change is {np.max(addecs.arcsec * 1000)} mas, > {tol}'
# check that we're consistent with the ERFA alt/az result
iers_tab = iers.earth_orientation_table.get()
xp, yp = u.Quantity(iers_tab.pm_xy(fullstack_times)).to_value(u.radian)
lon = fullstack_locations.geodetic[0].to_value(u.radian)
lat = fullstack_locations.geodetic[1].to_value(u.radian)
height = fullstack_locations.geodetic[2].to_value(u.m)
jd1, jd2 = get_jd12(fullstack_times, 'utc')
pressure = fullstack_obsconditions[0].to_value(u.hPa)
temperature = fullstack_obsconditions[1].to_value(u.deg_C)
# Relative humidity can be a quantity or a number.
relative_humidity = u.Quantity(fullstack_obsconditions[2], u.one).value
obswl = fullstack_obsconditions[3].to_value(u.micron)
astrom, eo = erfa.apco13(jd1, jd2,
fullstack_times.delta_ut1_utc,
lon, lat, height,
xp, yp,
pressure, temperature, relative_humidity,
obswl)
erfadct = _erfa_check(fullstack_icrs.ra.rad, fullstack_icrs.dec.rad, astrom)
npt.assert_allclose(erfadct['alt'], aacoo.alt.radian, atol=1e-7)
npt.assert_allclose(erfadct['az'], aacoo.az.radian, atol=1e-7)
def test_fiducial_roudtrip(fullstack_icrs, fullstack_fiducial_altaz):
"""
Test the full transform from ICRS <-> AltAz
"""
aacoo = fullstack_icrs.transform_to(fullstack_fiducial_altaz)
# make sure the round-tripping works
icrs2 = aacoo.transform_to(ICRS())
npt.assert_allclose(fullstack_icrs.ra.deg, icrs2.ra.deg)
npt.assert_allclose(fullstack_icrs.dec.deg, icrs2.dec.deg)
def test_future_altaz():
"""
While this does test the full stack, it is mostly meant to check that a
warning is raised when attempting to get to AltAz in the future (beyond
IERS tables)
"""
from astropy.utils.exceptions import AstropyWarning
# this is an ugly hack to get the warning to show up even if it has already
# appeared
from astropy.coordinates.builtin_frames import utils
if hasattr(utils, '__warningregistry__'):
utils.__warningregistry__.clear()
location = EarthLocation(lat=0*u.deg, lon=0*u.deg)
t = Time('J2161')
# check that these message(s) appear among any other warnings. If tests are run with
# --remote-data then the IERS table will be an instance of IERS_Auto which is
# assured of being "fresh". In this case getting times outside the range of the
# table does not raise an exception. Only if using IERS_B (which happens without
# --remote-data, i.e. for all CI testing) do we expect another warning.
with pytest.warns(AstropyWarning, match=r"Tried to get polar motions for "
"times after IERS data is valid.*") as found_warnings:
SkyCoord(1*u.deg, 2*u.deg).transform_to(AltAz(location=location, obstime=t))
if isinstance(iers.earth_orientation_table.get(), iers.IERS_B):
messages_found = ["(some) times are outside of range covered by IERS "
"table." in str(w.message) for w in found_warnings]
assert any(messages_found)
|
33a66018f892069836188d8ea0ed2c77ce1d508ece3ef72758d729f1ced698b6 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test replacements for ERFA functions atciqz and aticq."""
import pytest
import erfa
from astropy.tests.helper import assert_quantity_allclose as assert_allclose
from astropy.time import Time
import astropy.units as u
from astropy.coordinates.builtin_frames.utils import get_jd12, atciqz, aticq
from astropy.coordinates import SphericalRepresentation
# Hard-coded random values
sph = SphericalRepresentation(lon=[15., 214.] * u.deg,
lat=[-12., 64.] * u.deg,
distance=[1, 1.])
@pytest.mark.parametrize('t', [Time("2014-06-25T00:00"),
Time(["2014-06-25T00:00", "2014-09-24"])])
@pytest.mark.parametrize('pos', [sph[0], sph])
def test_atciqz_aticq(t, pos):
"""Check replacements against erfa versions for consistency."""
jd1, jd2 = get_jd12(t, 'tdb')
astrom, _ = erfa.apci13(jd1, jd2)
ra = pos.lon.to_value(u.rad)
dec = pos.lat.to_value(u.rad)
assert_allclose(erfa.atciqz(ra, dec, astrom), atciqz(pos, astrom))
assert_allclose(erfa.aticq(ra, dec, astrom), aticq(pos, astrom))
|
0b837868bee30d030b771506d77c24ef38f81f17d0780253ed2bc82aa5276ea0 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from numpy.testing import assert_allclose, assert_array_equal
from astropy import units as u
from astropy.coordinates.matrix_utilities import (rotation_matrix, angle_axis,
is_O3, is_rotation)
def test_rotation_matrix():
assert_array_equal(rotation_matrix(0*u.deg, 'x'), np.eye(3))
assert_allclose(rotation_matrix(90*u.deg, 'y'), [[0, 0, -1],
[0, 1, 0],
[1, 0, 0]], atol=1e-12)
assert_allclose(rotation_matrix(-90*u.deg, 'z'), [[0, -1, 0],
[1, 0, 0],
[0, 0, 1]], atol=1e-12)
assert_allclose(rotation_matrix(45*u.deg, 'x'),
rotation_matrix(45*u.deg, [1, 0, 0]))
assert_allclose(rotation_matrix(125*u.deg, 'y'),
rotation_matrix(125*u.deg, [0, 1, 0]))
assert_allclose(rotation_matrix(-30*u.deg, 'z'),
rotation_matrix(-30*u.deg, [0, 0, 1]))
assert_allclose(np.dot(rotation_matrix(180*u.deg, [1, 1, 0]), [1, 0, 0]),
[0, 1, 0], atol=1e-12)
# make sure it also works for very small angles
assert_allclose(rotation_matrix(0.000001*u.deg, 'x'),
rotation_matrix(0.000001*u.deg, [1, 0, 0]))
def test_angle_axis():
m1 = rotation_matrix(35*u.deg, 'x')
an1, ax1 = angle_axis(m1)
assert an1 - 35*u.deg < 1e-10*u.deg
assert_allclose(ax1, [1, 0, 0])
m2 = rotation_matrix(-89*u.deg, [1, 1, 0])
an2, ax2 = angle_axis(m2)
assert an2 - 89*u.deg < 1e-10*u.deg
assert_allclose(ax2, [-2**-0.5, -2**-0.5, 0])
def test_is_O3():
"""Test the matrix checker ``is_O3``."""
# Normal rotation matrix
m1 = rotation_matrix(35*u.deg, 'x')
assert is_O3(m1)
# and (M, 3, 3)
n1 = np.tile(m1, (2, 1, 1))
assert tuple(is_O3(n1)) == (True, True) # (show the broadcasting)
# reflection
m2 = m1.copy()
m2[0,0] *= -1
assert is_O3(m2)
# and (M, 3, 3)
n2 = np.stack((m1, m2))
assert tuple(is_O3(n2)) == (True, True) # (show the broadcasting)
# Not any sort of O(3)
m3 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
assert not is_O3(m3)
# and (M, 3, 3)
n3 = np.stack((m1, m3))
assert tuple(is_O3(n3)) == (True, False) # (show the broadcasting)
def test_is_rotation():
"""Test the rotation matrix checker ``is_rotation``."""
# Normal rotation matrix
m1 = rotation_matrix(35*u.deg, 'x')
assert is_rotation(m1)
assert is_rotation(m1, allow_improper=True) # (a less restrictive test)
# and (M, 3, 3)
n1 = np.tile(m1, (2, 1, 1))
assert tuple(is_rotation(n1)) == (True, True) # (show the broadcasting)
# Improper rotation (unit rotation + reflection)
m2 = np.identity(3)
m2[0,0] = -1
assert not is_rotation(m2)
assert is_rotation(m2, allow_improper=True)
# and (M, 3, 3)
n2 = np.stack((m1, m2))
assert tuple(is_rotation(n2)) == (True, False) # (show the broadcasting)
# Not any sort of rotation
m3 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
assert not is_rotation(m3)
assert not is_rotation(m3, allow_improper=True)
# and (M, 3, 3)
n3 = np.stack((m1, m3))
assert tuple(is_rotation(n3)) == (True, False) # (show the broadcasting)
|
81b8560fb9ceedaa3423ea7ee69724771ea7b0469a2a5c78772f2fc0b0c42cc9 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Regression tests for coordinates-related bugs that don't have an obvious other
place to live
"""
import io
import copy
import pytest
import numpy as np
from contextlib import nullcontext
from erfa import ErfaWarning
from astropy import units as u
from astropy.coordinates import (
AltAz, EarthLocation, SkyCoord, get_sun, ICRS,
GeocentricMeanEcliptic, Longitude, Latitude, GCRS, HCRS, CIRS,
get_moon, FK4, FK4NoETerms, BaseCoordinateFrame, ITRS,
QuantityAttribute, UnitSphericalRepresentation,
SphericalRepresentation, CartesianRepresentation,
FunctionTransform, get_body,
CylindricalRepresentation, CylindricalDifferential,
CartesianDifferential)
from astropy.coordinates.sites import get_builtin_sites
from astropy.time import Time
from astropy.utils import iers
from astropy.table import Table
from astropy.tests.helper import assert_quantity_allclose
from astropy.utils.compat.optional_deps import HAS_SCIPY # noqa
from astropy.units import allclose as quantity_allclose
def test_regression_5085():
"""
PR #5085 was put in place to fix the following issue.
Issue: https://github.com/astropy/astropy/issues/5069
At root was the transformation of Ecliptic coordinates with
non-scalar times.
"""
# Note: for regression test, we need to be sure that we use UTC for the
# epoch, even though more properly that should be TT; but the "expected"
# values were calculated using that.
j2000 = Time('J2000', scale='utc')
times = Time(["2015-08-28 03:30", "2015-09-05 10:30", "2015-09-15 18:35"])
latitudes = Latitude([3.9807075, -5.00733806, 1.69539491]*u.deg)
longitudes = Longitude([311.79678613, 72.86626741, 199.58698226]*u.deg)
distances = u.Quantity([0.00243266, 0.0025424, 0.00271296]*u.au)
coo = GeocentricMeanEcliptic(lat=latitudes,
lon=longitudes,
distance=distances, obstime=times, equinox=times)
# expected result
ras = Longitude([310.50095400, 314.67109920, 319.56507428]*u.deg)
decs = Latitude([-18.25190443, -17.1556676, -15.71616522]*u.deg)
distances = u.Quantity([1.78309901, 1.710874, 1.61326649]*u.au)
expected_result = GCRS(ra=ras, dec=decs,
distance=distances, obstime=j2000).cartesian.xyz
actual_result = coo.transform_to(GCRS(obstime=j2000)).cartesian.xyz
assert_quantity_allclose(expected_result, actual_result)
def test_regression_3920():
"""
Issue: https://github.com/astropy/astropy/issues/3920
"""
loc = EarthLocation.from_geodetic(0*u.deg, 0*u.deg, 0)
time = Time('2010-1-1')
aa = AltAz(location=loc, obstime=time)
sc = SkyCoord(10*u.deg, 3*u.deg)
assert sc.transform_to(aa).shape == tuple()
# That part makes sense: the input is a scalar so the output is too
sc2 = SkyCoord(10*u.deg, 3*u.deg, 1*u.AU)
assert sc2.transform_to(aa).shape == tuple()
# in 3920 that assert fails, because the shape is (1,)
# check that the same behavior occurs even if transform is from low-level classes
icoo = ICRS(sc.data)
icoo2 = ICRS(sc2.data)
assert icoo.transform_to(aa).shape == tuple()
assert icoo2.transform_to(aa).shape == tuple()
def test_regression_3938():
"""
Issue: https://github.com/astropy/astropy/issues/3938
"""
# Set up list of targets - we don't use `from_name` here to avoid
# remote_data requirements, but it does the same thing
# vega = SkyCoord.from_name('Vega')
vega = SkyCoord(279.23473479*u.deg, 38.78368896*u.deg)
# capella = SkyCoord.from_name('Capella')
capella = SkyCoord(79.17232794*u.deg, 45.99799147*u.deg)
# sirius = SkyCoord.from_name('Sirius')
sirius = SkyCoord(101.28715533*u.deg, -16.71611586*u.deg)
targets = [vega, capella, sirius]
# Feed list of targets into SkyCoord
combined_coords = SkyCoord(targets)
# Set up AltAz frame
time = Time('2012-01-01 00:00:00')
location = EarthLocation('10d', '45d', 0)
aa = AltAz(location=location, obstime=time)
combined_coords.transform_to(aa)
# in 3938 the above yields ``UnitConversionError: '' (dimensionless) and 'pc' (length) are not convertible``
def test_regression_3998():
"""
Issue: https://github.com/astropy/astropy/issues/3998
"""
time = Time('2012-01-01 00:00:00')
assert time.isscalar
sun = get_sun(time)
assert sun.isscalar
# in 3998, the above yields False - `sun` is a length-1 vector
assert sun.obstime is time
def test_regression_4033():
"""
Issue: https://github.com/astropy/astropy/issues/4033
"""
# alb = SkyCoord.from_name('Albireo')
alb = SkyCoord(292.68033548*u.deg, 27.95968007*u.deg)
alb_wdist = SkyCoord(alb, distance=133*u.pc)
# de = SkyCoord.from_name('Deneb')
de = SkyCoord(310.35797975*u.deg, 45.28033881*u.deg)
de_wdist = SkyCoord(de, distance=802*u.pc)
aa = AltAz(location=EarthLocation(lat=45*u.deg, lon=0*u.deg), obstime='2010-1-1')
deaa = de.transform_to(aa)
albaa = alb.transform_to(aa)
alb_wdistaa = alb_wdist.transform_to(aa)
de_wdistaa = de_wdist.transform_to(aa)
# these work fine
sepnod = deaa.separation(albaa)
sepwd = deaa.separation(alb_wdistaa)
assert_quantity_allclose(sepnod, 22.2862*u.deg, rtol=1e-6)
assert_quantity_allclose(sepwd, 22.2862*u.deg, rtol=1e-6)
# parallax should be present when distance added
assert np.abs(sepnod - sepwd) > 1*u.marcsec
# in 4033, the following fail with a recursion error
assert_quantity_allclose(de_wdistaa.separation(alb_wdistaa), 22.2862*u.deg, rtol=1e-3)
assert_quantity_allclose(alb_wdistaa.separation(deaa), 22.2862*u.deg, rtol=1e-3)
@pytest.mark.skipif(not HAS_SCIPY, reason='No Scipy')
def test_regression_4082():
"""
Issue: https://github.com/astropy/astropy/issues/4082
"""
from astropy.coordinates import search_around_sky, search_around_3d
cat = SkyCoord([10.076, 10.00455], [18.54746, 18.54896], unit='deg')
search_around_sky(cat[0:1], cat, seplimit=u.arcsec * 60, storekdtree=False)
# in the issue, this raises a TypeError
# also check 3d for good measure, although it's not really affected by this bug directly
cat3d = SkyCoord([10.076, 10.00455]*u.deg, [18.54746, 18.54896]*u.deg, distance=[0.1, 1.5]*u.kpc)
search_around_3d(cat3d[0:1], cat3d, 1*u.kpc, storekdtree=False)
def test_regression_4210():
"""
Issue: https://github.com/astropy/astropy/issues/4210
Related PR with actual change: https://github.com/astropy/astropy/pull/4211
"""
crd = SkyCoord(0*u.deg, 0*u.deg, distance=1*u.AU)
ecl = crd.geocentricmeanecliptic
# bug was that "lambda", which at the time was the name of the geocentric
# ecliptic longitude, is a reserved keyword. So this just makes sure the
# new name is are all valid
ecl.lon
# and for good measure, check the other ecliptic systems are all the same
# names for their attributes
from astropy.coordinates.builtin_frames import ecliptic
for frame_name in ecliptic.__all__:
eclcls = getattr(ecliptic, frame_name)
eclobj = eclcls(1*u.deg, 2*u.deg, 3*u.AU)
eclobj.lat
eclobj.lon
eclobj.distance
def test_regression_futuretimes_4302():
"""
Checks that an error is not raised for future times not covered by IERS
tables (at least in a simple transform like CIRS->ITRS that simply requires
the UTC<->UT1 conversion).
Relevant comment: https://github.com/astropy/astropy/pull/4302#discussion_r44836531
"""
from astropy.utils.exceptions import AstropyWarning
# this is an ugly hack to get the warning to show up even if it has already
# appeared
from astropy.coordinates.builtin_frames import utils
if hasattr(utils, '__warningregistry__'):
utils.__warningregistry__.clear()
# check that out-of-range warning appears among any other warnings. If
# tests are run with --remote-data then the IERS table will be an instance
# of IERS_Auto which is assured of being "fresh". In this case getting
# times outside the range of the table does not raise an exception. Only
# if using IERS_B (which happens without --remote-data, i.e. for all CI
# testing) do we expect another warning.
if isinstance(iers.earth_orientation_table.get(), iers.IERS_B):
ctx = pytest.warns(
AstropyWarning,
match=r'\(some\) times are outside of range covered by IERS table.*')
else:
ctx = nullcontext()
with ctx:
future_time = Time('2511-5-1')
c = CIRS(1*u.deg, 2*u.deg, obstime=future_time)
c.transform_to(ITRS(obstime=future_time))
def test_regression_4996():
# this part is the actual regression test
deltat = np.linspace(-12, 12, 1000)*u.hour
times = Time('2012-7-13 00:00:00') + deltat
suncoo = get_sun(times)
assert suncoo.shape == (len(times),)
# and this is an additional test to make sure more complex arrays work
times2 = Time('2012-7-13 00:00:00') + deltat.reshape(10, 20, 5)
suncoo2 = get_sun(times2)
assert suncoo2.shape == times2.shape
# this is intentionally not allclose - they should be *exactly* the same
assert np.all(suncoo.ra.ravel() == suncoo2.ra.ravel())
def test_regression_4293():
"""Really just an extra test on FK4 no e, after finding that the units
were not always taken correctly. This test is against explicitly doing
the transformations on pp170 of Explanatory Supplement to the Astronomical
Almanac (Seidelmann, 2005).
See https://github.com/astropy/astropy/pull/4293#issuecomment-234973086
"""
# Check all over sky, but avoiding poles (note that FK4 did not ignore
# e terms within 10∘ of the poles... see p170 of explan.supp.).
ra, dec = np.meshgrid(np.arange(0, 359, 45), np.arange(-80, 81, 40))
fk4 = FK4(ra.ravel() * u.deg, dec.ravel() * u.deg)
Dc = -0.065838*u.arcsec
Dd = +0.335299*u.arcsec
# Dc * tan(obliquity), as given on p.170
Dctano = -0.028553*u.arcsec
fk4noe_dec = (fk4.dec - (Dd*np.cos(fk4.ra) -
Dc*np.sin(fk4.ra))*np.sin(fk4.dec) -
Dctano*np.cos(fk4.dec))
fk4noe_ra = fk4.ra - (Dc*np.cos(fk4.ra) +
Dd*np.sin(fk4.ra)) / np.cos(fk4.dec)
fk4noe = fk4.transform_to(FK4NoETerms())
# Tolerance here just set to how well the coordinates match, which is much
# better than the claimed accuracy of <1 mas for this first-order in
# v_earth/c approximation.
# Interestingly, if one divides by np.cos(fk4noe_dec) in the ra correction,
# the match becomes good to 2 μas.
assert_quantity_allclose(fk4noe.ra, fk4noe_ra, atol=11.*u.uas, rtol=0)
assert_quantity_allclose(fk4noe.dec, fk4noe_dec, atol=3.*u.uas, rtol=0)
def test_regression_4926():
times = Time('2010-01-1') + np.arange(20)*u.day
green = get_builtin_sites()['greenwich']
# this is the regression test
moon = get_moon(times, green)
# this is an additional test to make sure the GCRS->ICRS transform works for complex shapes
moon.transform_to(ICRS())
# and some others to increase coverage of transforms
moon.transform_to(HCRS(obstime="J2000"))
moon.transform_to(HCRS(obstime=times))
def test_regression_5209():
"check that distances are not lost on SkyCoord init"
time = Time('2015-01-01')
moon = get_moon(time)
new_coord = SkyCoord([moon])
assert_quantity_allclose(new_coord[0].distance, moon.distance)
def test_regression_5133():
N = 1000
np.random.seed(12345)
lon = np.random.uniform(-10, 10, N) * u.deg
lat = np.random.uniform(50, 52, N) * u.deg
alt = np.random.uniform(0, 10., N) * u.km
time = Time('2010-1-1')
objects = EarthLocation.from_geodetic(lon, lat, height=alt)
itrs_coo = objects.get_itrs(time)
homes = [EarthLocation.from_geodetic(lon=-1 * u.deg, lat=52 * u.deg, height=h)
for h in (0, 1000, 10000)*u.km]
altaz_frames = [AltAz(obstime=time, location=h) for h in homes]
altaz_coos = [itrs_coo.transform_to(f) for f in altaz_frames]
# they should all be different
for coo in altaz_coos[1:]:
assert not quantity_allclose(coo.az, coo.az[0])
assert not quantity_allclose(coo.alt, coo.alt[0])
def test_itrs_vals_5133():
"""
Test to check if alt-az calculations respect height of observer
Because ITRS is geocentric and includes aberration, an object that
appears 'straight up' to a geocentric observer (ITRS) won't be
straight up to a topocentric observer - see
https://github.com/astropy/astropy/issues/10983
This is worse for small height above the Earth, which is why this test
uses large distances.
"""
time = Time('2010-1-1')
height = 500000. * u.km
el = EarthLocation.from_geodetic(lon=20*u.deg, lat=45*u.deg, height=height)
lons = [20, 30, 20]*u.deg
lats = [44, 45, 45]*u.deg
alts = u.Quantity([height, height, 10*height])
coos = [EarthLocation.from_geodetic(lon, lat, height=alt).get_itrs(time)
for lon, lat, alt in zip(lons, lats, alts)]
aaf = AltAz(obstime=time, location=el)
aacs = [coo.transform_to(aaf) for coo in coos]
assert all([coo.isscalar for coo in aacs])
# the ~1 degree tolerance is b/c aberration makes it not exact
assert_quantity_allclose(aacs[0].az, 180*u.deg, atol=1*u.deg)
assert aacs[0].alt < 0*u.deg
assert aacs[0].distance > 5000*u.km
# it should *not* actually be 90 degrees, b/c constant latitude is not
# straight east anywhere except the equator... but should be close-ish
assert_quantity_allclose(aacs[1].az, 90*u.deg, atol=5*u.deg)
assert aacs[1].alt < 0*u.deg
assert aacs[1].distance > 5000*u.km
assert_quantity_allclose(aacs[2].alt, 90*u.deg, atol=1*u.arcminute)
assert_quantity_allclose(aacs[2].distance, 9*height)
def test_regression_simple_5133():
"""
Simple test to check if alt-az calculations respect height of observer
Because ITRS is geocentric and includes aberration, an object that
appears 'straight up' to a geocentric observer (ITRS) won't be
straight up to a topocentric observer - see
https://github.com/astropy/astropy/issues/10983
This is why we construct a topocentric GCRS SkyCoord before calculating AltAz
"""
t = Time('J2010')
obj = EarthLocation(-1*u.deg, 52*u.deg, height=[10., 0.]*u.km)
home = EarthLocation(-1*u.deg, 52*u.deg, height=5.*u.km)
obsloc_gcrs, obsvel_gcrs = home.get_gcrs_posvel(t)
gcrs_geo = obj.get_itrs(t).transform_to(GCRS(obstime=t))
obsrepr = home.get_itrs(t).transform_to(GCRS(obstime=t)).cartesian
topo_gcrs_repr = gcrs_geo.cartesian - obsrepr
topocentric_gcrs_frame = GCRS(obstime=t, obsgeoloc=obsloc_gcrs, obsgeovel=obsvel_gcrs)
gcrs_topo = topocentric_gcrs_frame.realize_frame(topo_gcrs_repr)
aa = gcrs_topo.transform_to(AltAz(obstime=t, location=home))
# az is more-or-less undefined for straight up or down
assert_quantity_allclose(aa.alt, [90, -90]*u.deg, rtol=1e-7)
assert_quantity_allclose(aa.distance, 5*u.km)
def test_regression_5743():
sc = SkyCoord([5, 10], [20, 30], unit=u.deg,
obstime=['2017-01-01T00:00', '2017-01-01T00:10'])
assert sc[0].obstime.shape == tuple()
def test_regression_5889_5890():
# ensure we can represent all Representations and transform to ND frames
greenwich = EarthLocation(
*u.Quantity([3980608.90246817, -102.47522911, 4966861.27310067],
unit=u.m))
times = Time("2017-03-20T12:00:00") + np.linspace(-2, 2, 3)*u.hour
moon = get_moon(times, location=greenwich)
targets = SkyCoord([350.7*u.deg, 260.7*u.deg], [18.4*u.deg, 22.4*u.deg])
targs2d = targets[:, np.newaxis]
targs2d.transform_to(moon)
def test_regression_6236():
# sunpy changes its representation upon initialisation of a frame,
# including via `realize_frame`. Ensure this works.
class MyFrame(BaseCoordinateFrame):
default_representation = CartesianRepresentation
my_attr = QuantityAttribute(default=0, unit=u.m)
class MySpecialFrame(MyFrame):
def __init__(self, *args, **kwargs):
_rep_kwarg = kwargs.get('representation_type', None)
super().__init__(*args, **kwargs)
if not _rep_kwarg:
self.representation_type = self.default_representation
self._data = self.data.represent_as(self.representation_type)
rep1 = UnitSphericalRepresentation([0., 1]*u.deg, [2., 3.]*u.deg)
rep2 = SphericalRepresentation([10., 11]*u.deg, [12., 13.]*u.deg,
[14., 15.]*u.kpc)
mf1 = MyFrame(rep1, my_attr=1.*u.km)
mf2 = mf1.realize_frame(rep2)
# Normally, data is stored as is, but the representation gets set to a
# default, even if a different representation instance was passed in.
# realize_frame should do the same. Just in case, check attrs are passed.
assert mf1.data is rep1
assert mf2.data is rep2
assert mf1.representation_type is CartesianRepresentation
assert mf2.representation_type is CartesianRepresentation
assert mf2.my_attr == mf1.my_attr
# It should be independent of whether I set the representation explicitly
mf3 = MyFrame(rep1, my_attr=1.*u.km, representation_type='unitspherical')
mf4 = mf3.realize_frame(rep2)
assert mf3.data is rep1
assert mf4.data is rep2
assert mf3.representation_type is UnitSphericalRepresentation
assert mf4.representation_type is CartesianRepresentation
assert mf4.my_attr == mf3.my_attr
# This should be enough to help sunpy, but just to be sure, a test
# even closer to what is done there, i.e., transform the representation.
msf1 = MySpecialFrame(rep1, my_attr=1.*u.km)
msf2 = msf1.realize_frame(rep2)
assert msf1.data is not rep1 # Gets transformed to Cartesian.
assert msf2.data is not rep2
assert type(msf1.data) is CartesianRepresentation
assert type(msf2.data) is CartesianRepresentation
assert msf1.representation_type is CartesianRepresentation
assert msf2.representation_type is CartesianRepresentation
assert msf2.my_attr == msf1.my_attr
# And finally a test where the input is not transformed.
msf3 = MySpecialFrame(rep1, my_attr=1.*u.km,
representation_type='unitspherical')
msf4 = msf3.realize_frame(rep2)
assert msf3.data is rep1
assert msf4.data is not rep2
assert msf3.representation_type is UnitSphericalRepresentation
assert msf4.representation_type is CartesianRepresentation
assert msf4.my_attr == msf3.my_attr
@pytest.mark.skipif(not HAS_SCIPY, reason='No Scipy')
def test_regression_6347():
sc1 = SkyCoord([1, 2]*u.deg, [3, 4]*u.deg)
sc2 = SkyCoord([1.1, 2.1]*u.deg, [3.1, 4.1]*u.deg)
sc0 = sc1[:0]
idx1_10, idx2_10, d2d_10, d3d_10 = sc1.search_around_sky(sc2, 10*u.arcmin)
idx1_1, idx2_1, d2d_1, d3d_1 = sc1.search_around_sky(sc2, 1*u.arcmin)
idx1_0, idx2_0, d2d_0, d3d_0 = sc0.search_around_sky(sc2, 10*u.arcmin)
assert len(d2d_10) == 2
assert len(d2d_0) == 0
assert type(d2d_0) is type(d2d_10)
assert len(d2d_1) == 0
assert type(d2d_1) is type(d2d_10)
@pytest.mark.skipif(not HAS_SCIPY, reason='No Scipy')
def test_regression_6347_3d():
sc1 = SkyCoord([1, 2]*u.deg, [3, 4]*u.deg, [5, 6]*u.kpc)
sc2 = SkyCoord([1, 2]*u.deg, [3, 4]*u.deg, [5.1, 6.1]*u.kpc)
sc0 = sc1[:0]
idx1_10, idx2_10, d2d_10, d3d_10 = sc1.search_around_3d(sc2, 500*u.pc)
idx1_1, idx2_1, d2d_1, d3d_1 = sc1.search_around_3d(sc2, 50*u.pc)
idx1_0, idx2_0, d2d_0, d3d_0 = sc0.search_around_3d(sc2, 500*u.pc)
assert len(d2d_10) > 0
assert len(d2d_0) == 0
assert type(d2d_0) is type(d2d_10)
assert len(d2d_1) == 0
assert type(d2d_1) is type(d2d_10)
def test_gcrs_itrs_cartesian_repr():
# issue 6436: transformation failed if coordinate representation was
# Cartesian
gcrs = GCRS(CartesianRepresentation((859.07256, -4137.20368, 5295.56871),
unit='km'), representation_type='cartesian')
gcrs.transform_to(ITRS())
def test_regression_6446():
# this succeeds even before 6446:
sc1 = SkyCoord([1, 2], [3, 4], unit='deg')
t1 = Table([sc1])
sio1 = io.StringIO()
t1.write(sio1, format='ascii.ecsv')
# but this fails due to the 6446 bug
c1 = SkyCoord(1, 3, unit='deg')
c2 = SkyCoord(2, 4, unit='deg')
sc2 = SkyCoord([c1, c2])
t2 = Table([sc2])
sio2 = io.StringIO()
t2.write(sio2, format='ascii.ecsv')
assert sio1.getvalue() == sio2.getvalue()
def test_regression_6597():
frame_name = 'galactic'
c1 = SkyCoord(1, 3, unit='deg', frame=frame_name)
c2 = SkyCoord(2, 4, unit='deg', frame=frame_name)
sc1 = SkyCoord([c1, c2])
assert sc1.frame.name == frame_name
def test_regression_6597_2():
"""
This tests the more subtle flaw that #6597 indirectly uncovered: that even
in the case that the frames are ra/dec, they still might be the wrong *kind*
"""
frame = FK4(equinox='J1949')
c1 = SkyCoord(1, 3, unit='deg', frame=frame)
c2 = SkyCoord(2, 4, unit='deg', frame=frame)
sc1 = SkyCoord([c1, c2])
assert sc1.frame.name == frame.name
def test_regression_6697():
"""
Test for regression of a bug in get_gcrs_posvel that introduced errors at the 1m/s level.
Comparison data is derived from calculation in PINT
https://github.com/nanograv/PINT/blob/master/pint/erfautils.py
"""
pint_vels = CartesianRepresentation(*(348.63632871, -212.31704928, -0.60154936), unit=u.m/u.s)
location = EarthLocation(*(5327448.9957829, -1718665.73869569, 3051566.90295403), unit=u.m)
t = Time(2458036.161966612, format='jd')
obsgeopos, obsgeovel = location.get_gcrs_posvel(t)
delta = (obsgeovel-pint_vels).norm()
assert delta < 1*u.cm/u.s
def test_regression_8138():
sc = SkyCoord(1*u.deg, 2*u.deg)
newframe = GCRS()
sc2 = sc.transform_to(newframe)
assert newframe.is_equivalent_frame(sc2.frame)
def test_regression_8276():
from astropy.coordinates import baseframe
class MyFrame(BaseCoordinateFrame):
a = QuantityAttribute(unit=u.m)
# we save the transform graph so that it doesn't acidentally mess with other tests
old_transform_graph = baseframe.frame_transform_graph
try:
baseframe.frame_transform_graph = copy.copy(baseframe.frame_transform_graph)
# as reported in 8276, this previously failed right here because
# registering the transform tries to create a frame attribute
@baseframe.frame_transform_graph.transform(FunctionTransform, MyFrame, AltAz)
def trans(my_frame_coord, altaz_frame):
pass
# should also be able to *create* the Frame at this point
MyFrame()
finally:
baseframe.frame_transform_graph = old_transform_graph
def test_regression_8615():
# note this is a "higher-level" symptom of the problem that a test now moved
# to pyerfa (erfa/tests/test_erfa:test_float32_input) is testing for, but we keep
# it here as well due to being a more practical version of the issue.
crf = CartesianRepresentation(np.array([3, 0, 4], dtype=float) * u.pc)
srf = SphericalRepresentation.from_cartesian(crf) # does not error in 8615
cr = CartesianRepresentation(np.array([3, 0, 4], dtype='f4') * u.pc)
sr = SphericalRepresentation.from_cartesian(cr) # errors in 8615
assert_quantity_allclose(sr.distance, 5 * u.pc)
assert_quantity_allclose(srf.distance, 5 * u.pc)
def test_regression_8924():
"""This checks that the ValueError in
BaseRepresentation._re_represent_differentials is raised properly
"""
# A case where the representation has a 's' differential, but we try to
# re-represent only with an 's2' differential
rep = CartesianRepresentation(1, 2, 3, unit=u.kpc)
dif = CartesianDifferential(4, 5, 6, u.km/u.s)
rep = rep.with_differentials(dif)
with pytest.raises(ValueError):
rep._re_represent_differentials(CylindricalRepresentation,
{'s2': CylindricalDifferential})
def test_regression_10092():
"""
Check that we still get a proper motion even for SkyCoords without distance
"""
c = SkyCoord(l=10*u.degree, b=45*u.degree,
pm_l_cosb=34*u.mas/u.yr, pm_b=-117*u.mas/u.yr,
frame='galactic',
obstime=Time('1988-12-18 05:11:23.5'))
with pytest.warns(ErfaWarning, match='ERFA function "pmsafe" yielded .*'):
# expect ErfaWarning here
newc = c.apply_space_motion(dt=10*u.year)
assert_quantity_allclose(newc.pm_l_cosb, 33.99980714*u.mas/u.yr,
atol=1.0e-5*u.mas/u.yr)
def test_regression_10226():
# Dictionary representation of SkyCoord should contain differentials.
sc = SkyCoord([270, 280]*u.deg, [30, 35]*u.deg, [10, 11]*u.pc,
radial_velocity=[20, -20]*u.km/u.s)
sc_as_dict = sc.info._represent_as_dict()
assert 'radial_velocity' in sc_as_dict
# But only the components that have been specified.
assert 'pm_dec' not in sc_as_dict
@pytest.mark.parametrize('mjd', (
52000, [52000], [[52000]], [52001, 52002], [[52001], [52002]]))
def test_regression_10422(mjd):
"""
Check that we can get a GCRS for a scalar EarthLocation and a
size=1 non-scalar Time.
"""
# Avoid trying to download new IERS data.
with iers.earth_orientation_table.set(iers.IERS_B.open(iers.IERS_B_FILE)):
t = Time(mjd, format="mjd", scale="tai")
loc = EarthLocation(88258.0 * u.m, -4924882.2 * u.m, 3943729.0 * u.m)
p, v = loc.get_gcrs_posvel(obstime=t)
assert p.shape == v.shape == t.shape
@pytest.mark.remote_data
def test_regression_10291():
"""
According to https://eclipse.gsfc.nasa.gov/OH/transit12.html,
the minimum separation between Venus and the Sun during the 2012
transit is 554 arcseconds for an observer at the Geocenter.
If light deflection from the Sun is incorrectly applied, this increases
to 557 arcseconds.
"""
t = Time('2012-06-06 01:29:36')
sun = get_body('sun', t)
venus = get_body('venus', t)
assert_quantity_allclose(venus.separation(sun),
554.427*u.arcsecond, atol=0.001*u.arcsecond)
|
b194abc368a899add1cf600e133a1e30c21a6bd75cc0b4f5d790fa95ec920c1e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy import testing as npt
from astropy.tests.helper import assert_quantity_allclose as assert_allclose
from astropy import units as u
from astropy.coordinates import matching
from astropy.utils.compat.optional_deps import HAS_SCIPY # noqa
"""
These are the tests for coordinate matching.
Note that this requires scipy.
"""
@pytest.mark.skipif(not HAS_SCIPY, reason="Requires scipy.")
def test_matching_function():
from astropy.coordinates import ICRS
from astropy.coordinates.matching import match_coordinates_3d
# this only uses match_coordinates_3d because that's the actual implementation
cmatch = ICRS([4, 2.1]*u.degree, [0, 0]*u.degree)
ccatalog = ICRS([1, 2, 3, 4]*u.degree, [0, 0, 0, 0]*u.degree)
idx, d2d, d3d = match_coordinates_3d(cmatch, ccatalog)
npt.assert_array_equal(idx, [3, 1])
npt.assert_array_almost_equal(d2d.degree, [0, 0.1])
assert d3d.value[0] == 0
idx, d2d, d3d = match_coordinates_3d(cmatch, ccatalog, nthneighbor=2)
assert np.all(idx == 2)
npt.assert_array_almost_equal(d2d.degree, [1, 0.9])
npt.assert_array_less(d3d.value, 0.02)
@pytest.mark.skipif(not HAS_SCIPY, reason="Requires scipy.")
def test_matching_function_3d_and_sky():
from astropy.coordinates import ICRS
from astropy.coordinates.matching import match_coordinates_3d, match_coordinates_sky
cmatch = ICRS([4, 2.1]*u.degree, [0, 0]*u.degree, distance=[1, 5] * u.kpc)
ccatalog = ICRS([1, 2, 3, 4]*u.degree, [0, 0, 0, 0]*u.degree, distance=[1, 1, 1, 5] * u.kpc)
idx, d2d, d3d = match_coordinates_3d(cmatch, ccatalog)
npt.assert_array_equal(idx, [2, 3])
assert_allclose(d2d, [1, 1.9] * u.deg)
assert np.abs(d3d[0].to_value(u.kpc) - np.radians(1)) < 1e-6
assert np.abs(d3d[1].to_value(u.kpc) - 5*np.radians(1.9)) < 1e-5
idx, d2d, d3d = match_coordinates_sky(cmatch, ccatalog)
npt.assert_array_equal(idx, [3, 1])
assert_allclose(d2d, [0, 0.1] * u.deg)
assert_allclose(d3d, [4, 4.0000019] * u.kpc)
@pytest.mark.parametrize('functocheck, args, defaultkdtname, bothsaved',
[(matching.match_coordinates_3d, [], 'kdtree_3d', False),
(matching.match_coordinates_sky, [], 'kdtree_sky', False),
(matching.search_around_3d, [1*u.kpc], 'kdtree_3d', True),
(matching.search_around_sky, [1*u.deg], 'kdtree_sky', False)
])
@pytest.mark.skipif(not HAS_SCIPY, reason="Requires scipy.")
def test_kdtree_storage(functocheck, args, defaultkdtname, bothsaved):
from astropy.coordinates import ICRS
def make_scs():
cmatch = ICRS([4, 2.1]*u.degree, [0, 0]*u.degree, distance=[1, 2]*u.kpc)
ccatalog = ICRS([1, 2, 3, 4]*u.degree, [0, 0, 0, 0]*u.degree, distance=[1, 2, 3, 4]*u.kpc)
return cmatch, ccatalog
cmatch, ccatalog = make_scs()
functocheck(cmatch, ccatalog, *args, storekdtree=False)
assert 'kdtree' not in ccatalog.cache
assert defaultkdtname not in ccatalog.cache
cmatch, ccatalog = make_scs()
functocheck(cmatch, ccatalog, *args)
assert defaultkdtname in ccatalog.cache
assert 'kdtree' not in ccatalog.cache
cmatch, ccatalog = make_scs()
functocheck(cmatch, ccatalog, *args, storekdtree=True)
assert 'kdtree' in ccatalog.cache
assert defaultkdtname not in ccatalog.cache
cmatch, ccatalog = make_scs()
assert 'tislit_cheese' not in ccatalog.cache
functocheck(cmatch, ccatalog, *args, storekdtree='tislit_cheese')
assert 'tislit_cheese' in ccatalog.cache
assert defaultkdtname not in ccatalog.cache
assert 'kdtree' not in ccatalog.cache
if bothsaved:
assert 'tislit_cheese' in cmatch.cache
assert defaultkdtname not in cmatch.cache
assert 'kdtree' not in cmatch.cache
else:
assert 'tislit_cheese' not in cmatch.cache
# now a bit of a hacky trick to make sure it at least tries to *use* it
ccatalog.cache['tislit_cheese'] = 1
cmatch.cache['tislit_cheese'] = 1
with pytest.raises(TypeError) as e:
functocheck(cmatch, ccatalog, *args, storekdtree='tislit_cheese')
assert 'KD' in e.value.args[0]
@pytest.mark.skipif(not HAS_SCIPY, reason="Requires scipy.")
def test_python_kdtree(monkeypatch):
from astropy.coordinates import ICRS
cmatch = ICRS([4, 2.1]*u.degree, [0, 0]*u.degree, distance=[1, 2]*u.kpc)
ccatalog = ICRS([1, 2, 3, 4]*u.degree, [0, 0, 0, 0]*u.degree, distance=[1, 2, 3, 4]*u.kpc)
monkeypatch.delattr("scipy.spatial.cKDTree")
with pytest.warns(UserWarning, match=r'C-based KD tree not found'):
matching.match_coordinates_sky(cmatch, ccatalog)
@pytest.mark.skipif(not HAS_SCIPY, reason="Requires scipy.")
def test_matching_method():
from astropy.coordinates import ICRS, SkyCoord
from astropy.utils import NumpyRNGContext
from astropy.coordinates.matching import match_coordinates_3d, match_coordinates_sky
with NumpyRNGContext(987654321):
cmatch = ICRS(np.random.rand(20) * 360.*u.degree,
(np.random.rand(20) * 180. - 90.)*u.degree)
ccatalog = ICRS(np.random.rand(100) * 360. * u.degree,
(np.random.rand(100) * 180. - 90.)*u.degree)
idx1, d2d1, d3d1 = SkyCoord(cmatch).match_to_catalog_3d(ccatalog)
idx2, d2d2, d3d2 = match_coordinates_3d(cmatch, ccatalog)
npt.assert_array_equal(idx1, idx2)
assert_allclose(d2d1, d2d2)
assert_allclose(d3d1, d3d2)
# should be the same as above because there's no distance, but just make sure this method works
idx1, d2d1, d3d1 = SkyCoord(cmatch).match_to_catalog_sky(ccatalog)
idx2, d2d2, d3d2 = match_coordinates_sky(cmatch, ccatalog)
npt.assert_array_equal(idx1, idx2)
assert_allclose(d2d1, d2d2)
assert_allclose(d3d1, d3d2)
assert len(idx1) == len(d2d1) == len(d3d1) == 20
@pytest.mark.skipif(not HAS_SCIPY, reason="Requires scipy")
def test_search_around():
from astropy.coordinates import ICRS, SkyCoord
from astropy.coordinates.matching import search_around_sky, search_around_3d
coo1 = ICRS([4, 2.1]*u.degree, [0, 0]*u.degree, distance=[1, 5] * u.kpc)
coo2 = ICRS([1, 2, 3, 4]*u.degree, [0, 0, 0, 0]*u.degree, distance=[1, 1, 1, 5] * u.kpc)
idx1_1deg, idx2_1deg, d2d_1deg, d3d_1deg = search_around_sky(coo1, coo2, 1.01*u.deg)
idx1_0p05deg, idx2_0p05deg, d2d_0p05deg, d3d_0p05deg = search_around_sky(coo1, coo2, 0.05*u.deg)
assert list(zip(idx1_1deg, idx2_1deg)) == [(0, 2), (0, 3), (1, 1), (1, 2)]
assert_allclose(d2d_1deg[0], 1.0*u.deg, atol=1e-14*u.deg, rtol=0)
assert_allclose(d2d_1deg, [1, 0, .1, .9]*u.deg)
assert list(zip(idx1_0p05deg, idx2_0p05deg)) == [(0, 3)]
idx1_1kpc, idx2_1kpc, d2d_1kpc, d3d_1kpc = search_around_3d(coo1, coo2, 1*u.kpc)
idx1_sm, idx2_sm, d2d_sm, d3d_sm = search_around_3d(coo1, coo2, 0.05*u.kpc)
assert list(zip(idx1_1kpc, idx2_1kpc)) == [(0, 0), (0, 1), (0, 2), (1, 3)]
assert list(zip(idx1_sm, idx2_sm)) == [(0, 1), (0, 2)]
assert_allclose(d2d_sm, [2, 1]*u.deg)
# Test for the non-matches, #4877
coo1 = ICRS([4.1, 2.1]*u.degree, [0, 0]*u.degree, distance=[1, 5] * u.kpc)
idx1, idx2, d2d, d3d = search_around_sky(coo1, coo2, 1*u.arcsec)
assert idx1.size == idx2.size == d2d.size == d3d.size == 0
assert idx1.dtype == idx2.dtype == int
assert d2d.unit == u.deg
assert d3d.unit == u.kpc
idx1, idx2, d2d, d3d = search_around_3d(coo1, coo2, 1*u.m)
assert idx1.size == idx2.size == d2d.size == d3d.size == 0
assert idx1.dtype == idx2.dtype == int
assert d2d.unit == u.deg
assert d3d.unit == u.kpc
# Test when one or both of the coordinate arrays is empty, #4875
empty = ICRS(ra=[] * u.degree, dec=[] * u.degree, distance=[] * u.kpc)
idx1, idx2, d2d, d3d = search_around_sky(empty, coo2, 1*u.arcsec)
assert idx1.size == idx2.size == d2d.size == d3d.size == 0
assert idx1.dtype == idx2.dtype == int
assert d2d.unit == u.deg
assert d3d.unit == u.kpc
idx1, idx2, d2d, d3d = search_around_sky(coo1, empty, 1*u.arcsec)
assert idx1.size == idx2.size == d2d.size == d3d.size == 0
assert idx1.dtype == idx2.dtype == int
assert d2d.unit == u.deg
assert d3d.unit == u.kpc
empty = ICRS(ra=[] * u.degree, dec=[] * u.degree, distance=[] * u.kpc)
idx1, idx2, d2d, d3d = search_around_sky(empty, empty[:], 1*u.arcsec)
assert idx1.size == idx2.size == d2d.size == d3d.size == 0
assert idx1.dtype == idx2.dtype == int
assert d2d.unit == u.deg
assert d3d.unit == u.kpc
idx1, idx2, d2d, d3d = search_around_3d(empty, coo2, 1*u.m)
assert idx1.size == idx2.size == d2d.size == d3d.size == 0
assert idx1.dtype == idx2.dtype == int
assert d2d.unit == u.deg
assert d3d.unit == u.kpc
idx1, idx2, d2d, d3d = search_around_3d(coo1, empty, 1*u.m)
assert idx1.size == idx2.size == d2d.size == d3d.size == 0
assert idx1.dtype == idx2.dtype == int
assert d2d.unit == u.deg
assert d3d.unit == u.kpc
idx1, idx2, d2d, d3d = search_around_3d(empty, empty[:], 1*u.m)
assert idx1.size == idx2.size == d2d.size == d3d.size == 0
assert idx1.dtype == idx2.dtype == int
assert d2d.unit == u.deg
assert d3d.unit == u.kpc
# Test that input without distance units results in a
# 'dimensionless_unscaled' unit
cempty = SkyCoord(ra=[], dec=[], unit=u.deg)
idx1, idx2, d2d, d3d = search_around_3d(cempty, cempty[:], 1*u.m)
assert d2d.unit == u.deg
assert d3d.unit == u.dimensionless_unscaled
idx1, idx2, d2d, d3d = search_around_sky(cempty, cempty[:], 1*u.m)
assert d2d.unit == u.deg
assert d3d.unit == u.dimensionless_unscaled
@pytest.mark.skipif(not HAS_SCIPY, reason="Requires scipy")
def test_search_around_scalar():
from astropy.coordinates import SkyCoord, Angle
cat = SkyCoord([1, 2, 3], [-30, 45, 8], unit="deg")
target = SkyCoord('1.1 -30.1', unit="deg")
with pytest.raises(ValueError) as excinfo:
cat.search_around_sky(target, Angle('2d'))
# make sure the error message is *specific* to search_around_sky rather than
# generic as reported in #3359
assert 'search_around_sky' in str(excinfo.value)
with pytest.raises(ValueError) as excinfo:
cat.search_around_3d(target, Angle('2d'))
assert 'search_around_3d' in str(excinfo.value)
@pytest.mark.skipif(not HAS_SCIPY, reason="Requires scipy")
def test_match_catalog_empty():
from astropy.coordinates import SkyCoord
sc1 = SkyCoord(1, 2, unit="deg")
cat0 = SkyCoord([], [], unit="deg")
cat1 = SkyCoord([1.1], [2.1], unit="deg")
cat2 = SkyCoord([1.1, 3], [2.1, 5], unit="deg")
sc1.match_to_catalog_sky(cat2)
sc1.match_to_catalog_3d(cat2)
sc1.match_to_catalog_sky(cat1)
sc1.match_to_catalog_3d(cat1)
with pytest.raises(ValueError) as excinfo:
sc1.match_to_catalog_sky(cat1[0])
assert 'catalog' in str(excinfo.value)
with pytest.raises(ValueError) as excinfo:
sc1.match_to_catalog_3d(cat1[0])
assert 'catalog' in str(excinfo.value)
with pytest.raises(ValueError) as excinfo:
sc1.match_to_catalog_sky(cat0)
assert 'catalog' in str(excinfo.value)
with pytest.raises(ValueError) as excinfo:
sc1.match_to_catalog_3d(cat0)
assert 'catalog' in str(excinfo.value)
@pytest.mark.skipif(not HAS_SCIPY, reason="Requires scipy")
@pytest.mark.filterwarnings(
r'ignore:invalid value encountered in.*:RuntimeWarning')
def test_match_catalog_nan():
from astropy.coordinates import SkyCoord, Galactic
sc1 = SkyCoord(1, 2, unit="deg")
sc_with_nans = SkyCoord(1, np.nan, unit="deg")
cat = SkyCoord([1.1, 3], [2.1, 5], unit="deg")
cat_with_nans = SkyCoord([1.1, np.nan], [2.1, 5], unit="deg")
galcat_with_nans = Galactic([1.2, np.nan]*u.deg, [5.6, 7.8]*u.deg)
with pytest.raises(ValueError) as excinfo:
sc1.match_to_catalog_sky(cat_with_nans)
assert 'Catalog coordinates cannot contain' in str(excinfo.value)
with pytest.raises(ValueError) as excinfo:
sc1.match_to_catalog_3d(cat_with_nans)
assert 'Catalog coordinates cannot contain' in str(excinfo.value)
with pytest.raises(ValueError) as excinfo:
sc1.match_to_catalog_sky(galcat_with_nans)
assert 'Catalog coordinates cannot contain' in str(excinfo.value)
with pytest.raises(ValueError) as excinfo:
sc1.match_to_catalog_3d(galcat_with_nans)
assert 'Catalog coordinates cannot contain' in str(excinfo.value)
with pytest.raises(ValueError) as excinfo:
sc_with_nans.match_to_catalog_sky(cat)
assert 'Matching coordinates cannot contain' in str(excinfo.value)
with pytest.raises(ValueError) as excinfo:
sc_with_nans.match_to_catalog_3d(cat)
assert 'Matching coordinates cannot contain' in str(excinfo.value)
@pytest.mark.skipif(not HAS_SCIPY, reason="Requires scipy")
def test_match_catalog_nounit():
from astropy.coordinates import ICRS, CartesianRepresentation
from astropy.coordinates.matching import match_coordinates_sky
i1 = ICRS([[1], [2], [3]], representation_type=CartesianRepresentation)
i2 = ICRS([[1], [2], [4, 5]], representation_type=CartesianRepresentation)
i, sep, sep3d = match_coordinates_sky(i1, i2)
assert_allclose(sep3d, [1]*u.dimensionless_unscaled)
|
1e86f14b133d3776fcf5d3530ab727138d9525b9ef76692ba96d6e482bd45e90 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from contextlib import ExitStack
import pytest
import numpy as np
from numpy import testing as npt
from astropy import units as u
from astropy.time import Time
from astropy.tests.helper import assert_quantity_allclose as assert_allclose
from astropy.utils.compat import NUMPY_LT_1_19, NUMPY_LT_1_24
from astropy.utils.exceptions import AstropyDeprecationWarning
from astropy.coordinates import (Angle, ICRS, FK4, FK5, Galactic, SkyCoord,
CartesianRepresentation)
def test_angle_arrays():
"""
Test arrays values with Angle objects.
"""
# Tests incomplete
a1 = Angle([0, 45, 90, 180, 270, 360, 720.], unit=u.degree)
npt.assert_almost_equal([0., 45., 90., 180., 270., 360., 720.], a1.value)
a2 = Angle(np.array([-90, -45, 0, 45, 90, 180, 270, 360]), unit=u.degree)
npt.assert_almost_equal([-90, -45, 0, 45, 90, 180, 270, 360],
a2.value)
a3 = Angle(["12 degrees", "3 hours", "5 deg", "4rad"])
npt.assert_almost_equal([12., 45., 5., 229.18311805],
a3.value)
assert a3.unit == u.degree
a4 = Angle(["12 degrees", "3 hours", "5 deg", "4rad"], u.radian)
npt.assert_almost_equal(a4.degree, a3.value)
assert a4.unit == u.radian
a5 = Angle([0, 45, 90, 180, 270, 360], unit=u.degree)
a6 = a5.sum()
npt.assert_almost_equal(a6.value, 945.0)
assert a6.unit is u.degree
with ExitStack() as stack:
if NUMPY_LT_1_24:
stack.enter_context(pytest.raises(TypeError))
# Arrays where the elements are Angle objects are not supported -- it's
# really tricky to do correctly, if at all, due to the possibility of
# nesting.
if not NUMPY_LT_1_19:
stack.enter_context(
pytest.warns(DeprecationWarning,
match='automatic object dtype is deprecated'))
else:
stack.enter_context(pytest.raises(ValueError))
a7 = Angle([a1, a2, a3], unit=u.degree)
a8 = Angle(["04:02:02", "03:02:01", "06:02:01"], unit=u.degree)
npt.assert_almost_equal(a8.value, [4.03388889, 3.03361111, 6.03361111])
a9 = Angle(np.array(["04:02:02", "03:02:01", "06:02:01"]), unit=u.degree)
npt.assert_almost_equal(a9.value, a8.value)
with pytest.raises(u.UnitsError):
a10 = Angle(["04:02:02", "03:02:01", "06:02:01"])
def test_dms():
a1 = Angle([0, 45.5, -45.5], unit=u.degree)
d, m, s = a1.dms
npt.assert_almost_equal(d, [0, 45, -45])
npt.assert_almost_equal(m, [0, 30, -30])
npt.assert_almost_equal(s, [0, 0, -0])
def test_hms():
a1 = Angle([0, 11.5, -11.5], unit=u.hour)
h, m, s = a1.hms
npt.assert_almost_equal(h, [0, 11, -11])
npt.assert_almost_equal(m, [0, 30, -30])
npt.assert_almost_equal(s, [0, 0, -0])
hms = a1.hms
hours = hms[0] + hms[1] / 60. + hms[2] / 3600.
npt.assert_almost_equal(a1.hour, hours)
with pytest.warns(AstropyDeprecationWarning, match='hms_to_hours'):
a2 = Angle(hms, unit=u.hour)
npt.assert_almost_equal(a2.radian, a1.radian)
def test_array_coordinates_creation():
"""
Test creating coordinates from arrays.
"""
c = ICRS(np.array([1, 2])*u.deg, np.array([3, 4])*u.deg)
assert not c.ra.isscalar
with pytest.raises(ValueError):
c = ICRS(np.array([1, 2])*u.deg, np.array([3, 4, 5])*u.deg)
with pytest.raises(ValueError):
c = ICRS(np.array([1, 2, 4, 5])*u.deg, np.array([[3, 4], [5, 6]])*u.deg)
# make sure cartesian initialization also works
cart = CartesianRepresentation(x=[1., 2.]*u.kpc, y=[3., 4.]*u.kpc, z=[5., 6.]*u.kpc)
c = ICRS(cart)
# also ensure strings can be arrays
c = SkyCoord(['1d0m0s', '2h02m00.3s'], ['3d', '4d'])
# but invalid strings cannot
with pytest.raises(ValueError):
c = SkyCoord(Angle(['10m0s', '2h02m00.3s']), Angle(['3d', '4d']))
with pytest.raises(ValueError):
c = SkyCoord(Angle(['1d0m0s', '2h02m00.3s']), Angle(['3x', '4d']))
def test_array_coordinates_distances():
"""
Test creating coordinates from arrays and distances.
"""
# correct way
ICRS(ra=np.array([1, 2])*u.deg, dec=np.array([3, 4])*u.deg, distance=[.1, .2] * u.kpc)
with pytest.raises(ValueError):
# scalar distance and mismatched array coordinates
ICRS(ra=np.array([1, 2, 3])*u.deg, dec=np.array([[3, 4], [5, 6]])*u.deg, distance=2. * u.kpc)
with pytest.raises(ValueError):
# more distance values than coordinates
ICRS(ra=np.array([1, 2])*u.deg, dec=np.array([3, 4])*u.deg, distance=[.1, .2, 3.] * u.kpc)
@pytest.mark.parametrize(('arrshape', 'distance'), [((2, ), None), ((4, 2, 5), None), ((4, 2, 5), 2 * u.kpc)])
def test_array_coordinates_transformations(arrshape, distance):
"""
Test transformation on coordinates with array content (first length-2 1D, then a 3D array)
"""
# M31 coordinates from test_transformations
raarr = np.ones(arrshape) * 10.6847929
decarr = np.ones(arrshape) * 41.2690650
if distance is not None:
distance = np.ones(arrshape) * distance
print(raarr, decarr, distance)
c = ICRS(ra=raarr*u.deg, dec=decarr*u.deg, distance=distance)
g = c.transform_to(Galactic())
assert g.l.shape == arrshape
npt.assert_array_almost_equal(g.l.degree, 121.17440967)
npt.assert_array_almost_equal(g.b.degree, -21.57299631)
if distance is not None:
assert g.distance.unit == c.distance.unit
# now make sure round-tripping works through FK5
c2 = c.transform_to(FK5()).transform_to(ICRS())
npt.assert_array_almost_equal(c.ra.radian, c2.ra.radian)
npt.assert_array_almost_equal(c.dec.radian, c2.dec.radian)
assert c2.ra.shape == arrshape
if distance is not None:
assert c2.distance.unit == c.distance.unit
# also make sure it's possible to get to FK4, which uses a direct transform function.
fk4 = c.transform_to(FK4())
npt.assert_array_almost_equal(fk4.ra.degree, 10.0004, decimal=4)
npt.assert_array_almost_equal(fk4.dec.degree, 40.9953, decimal=4)
assert fk4.ra.shape == arrshape
if distance is not None:
assert fk4.distance.unit == c.distance.unit
# now check the reverse transforms run
cfk4 = fk4.transform_to(ICRS())
assert cfk4.ra.shape == arrshape
def test_array_precession():
"""
Ensures that FK5 coordinates as arrays precess their equinoxes
"""
j2000 = Time('J2000')
j1975 = Time('J1975')
fk5 = FK5([1, 1.1]*u.radian, [0.5, 0.6]*u.radian)
assert fk5.equinox.jyear == j2000.jyear
fk5_2 = fk5.transform_to(FK5(equinox=j1975))
assert fk5_2.equinox.jyear == j1975.jyear
npt.assert_array_less(0.05, np.abs(fk5.ra.degree - fk5_2.ra.degree))
npt.assert_array_less(0.05, np.abs(fk5.dec.degree - fk5_2.dec.degree))
def test_array_separation():
c1 = ICRS([0, 0]*u.deg, [0, 0]*u.deg)
c2 = ICRS([1, 2]*u.deg, [0, 0]*u.deg)
npt.assert_array_almost_equal(c1.separation(c2).degree, [1, 2])
c3 = ICRS([0, 3.]*u.deg, [0., 0]*u.deg, distance=[1, 1.] * u.kpc)
c4 = ICRS([1, 1.]*u.deg, [0., 0]*u.deg, distance=[1, 1.] * u.kpc)
# the 3-1 separation should be twice the 0-1 separation, but not *exactly* the same
sep = c3.separation_3d(c4)
sepdiff = sep[1] - (2 * sep[0])
assert abs(sepdiff.value) < 1e-5
assert sepdiff != 0
def test_array_indexing():
ra = np.linspace(0, 360, 10)
dec = np.linspace(-90, 90, 10)
j1975 = Time(1975, format='jyear')
c1 = FK5(ra*u.deg, dec*u.deg, equinox=j1975)
c2 = c1[4]
assert c2.ra.degree == 160
assert c2.dec.degree == -10
c3 = c1[2:5]
assert_allclose(c3.ra, [80, 120, 160] * u.deg)
assert_allclose(c3.dec, [-50, -30, -10] * u.deg)
c4 = c1[np.array([2, 5, 8])]
assert_allclose(c4.ra, [80, 200, 320] * u.deg)
assert_allclose(c4.dec, [-50, 10, 70] * u.deg)
# now make sure the equinox is preserved
assert c2.equinox == c1.equinox
assert c3.equinox == c1.equinox
assert c4.equinox == c1.equinox
def test_array_len():
input_length = [1, 5]
for length in input_length:
ra = np.linspace(0, 360, length)
dec = np.linspace(0, 90, length)
c = ICRS(ra*u.deg, dec*u.deg)
assert len(c) == length
assert c.shape == (length,)
with pytest.raises(TypeError):
c = ICRS(0*u.deg, 0*u.deg)
len(c)
assert c.shape == tuple()
def test_array_eq():
c1 = ICRS([1, 2]*u.deg, [3, 4]*u.deg)
c2 = ICRS([1, 2]*u.deg, [3, 5]*u.deg)
c3 = ICRS([1, 3]*u.deg, [3, 4]*u.deg)
c4 = ICRS([1, 2]*u.deg, [3, 4.2]*u.deg)
assert np.all(c1 == c1)
assert np.any(c1 != c2)
assert np.any(c1 != c3)
assert np.any(c1 != c4)
|
10fe8f481e13d291c02f0d37a72a3e14f0bcb103b0f59004f0f854b69232ac20 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from copy import deepcopy
import pytest
import numpy as np
from numpy.testing import assert_allclose, assert_array_equal
from astropy import units as u
from astropy.tests.helper import (assert_quantity_allclose as
assert_allclose_quantity)
from astropy.utils import isiterable
from astropy.utils.exceptions import DuplicateRepresentationWarning
from astropy.coordinates.angles import Longitude, Latitude, Angle
from astropy.coordinates.distances import Distance
from astropy.coordinates.matrix_utilities import rotation_matrix
from astropy.coordinates.representation import (
REPRESENTATION_CLASSES, DIFFERENTIAL_CLASSES, DUPLICATE_REPRESENTATIONS,
BaseRepresentation, SphericalRepresentation, UnitSphericalRepresentation,
SphericalCosLatDifferential, CartesianRepresentation, RadialRepresentation,
RadialDifferential, CylindricalRepresentation,
PhysicsSphericalRepresentation, CartesianDifferential,
SphericalDifferential, CylindricalDifferential,
PhysicsSphericalDifferential, UnitSphericalDifferential,
UnitSphericalCosLatDifferential)
# create matrices for use in testing ``.transform()`` methods
matrices = {
"rotation": rotation_matrix(-10, "z", u.deg),
"general": np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
}
# Preserve the original REPRESENTATION_CLASSES dict so that importing
# the test file doesn't add a persistent test subclass (LogDRepresentation)
def setup_function(func):
func.REPRESENTATION_CLASSES_ORIG = deepcopy(REPRESENTATION_CLASSES)
func.DUPLICATE_REPRESENTATIONS_ORIG = deepcopy(DUPLICATE_REPRESENTATIONS)
def teardown_function(func):
REPRESENTATION_CLASSES.clear()
REPRESENTATION_CLASSES.update(func.REPRESENTATION_CLASSES_ORIG)
DUPLICATE_REPRESENTATIONS.clear()
DUPLICATE_REPRESENTATIONS.update(func.DUPLICATE_REPRESENTATIONS_ORIG)
def components_equal(rep1, rep2):
result = True
if type(rep1) is not type(rep2):
return False
for component in rep1.components:
result &= getattr(rep1, component) == getattr(rep2, component)
return result
def components_allclose(rep1, rep2):
result = True
if type(rep1) is not type(rep2):
return False
for component in rep1.components:
result &= u.allclose(getattr(rep1, component), getattr(rep2, component))
return result
def representation_equal(rep1, rep2):
result = True
if type(rep1) is not type(rep2):
return False
if getattr(rep1, '_differentials', False):
if rep1._differentials.keys() != rep2._differentials.keys():
return False
for key, diff1 in rep1._differentials.items():
result &= components_equal(diff1, rep2._differentials[key])
elif getattr(rep2, '_differentials', False):
return False
return result & components_equal(rep1, rep2)
def representation_equal_up_to_angular_type(rep1, rep2):
result = True
if type(rep1) is not type(rep2):
return False
if getattr(rep1, '_differentials', False):
if rep1._differentials.keys() != rep2._differentials.keys():
return False
for key, diff1 in rep1._differentials.items():
result &= components_allclose(diff1, rep2._differentials[key])
elif getattr(rep2, '_differentials', False):
return False
return result & components_allclose(rep1, rep2)
class TestRadialRepresentation:
def test_transform(self):
"""Test the ``transform`` method. Only multiplication matrices pass."""
rep = RadialRepresentation(distance=10 * u.kpc)
# a rotation matrix does not work
matrix = rotation_matrix(10 * u.deg)
with pytest.raises(ValueError, match="scaled identity matrix"):
rep.transform(matrix)
# only a scaled identity matrix
matrix = 3 * np.identity(3)
newrep = rep.transform(matrix)
assert newrep.distance == 30 * u.kpc
# let's also check with differentials
dif = RadialDifferential(d_distance=-3 * u.km / u.s)
rep = rep.with_differentials(dict(s=dif))
newrep = rep.transform(matrix)
assert newrep.distance == 30 * u.kpc
assert newrep.differentials["s"].d_distance == -9 * u.km / u.s
class TestSphericalRepresentation:
def test_name(self):
assert SphericalRepresentation.get_name() == 'spherical'
assert SphericalRepresentation.get_name() in REPRESENTATION_CLASSES
def test_empty_init(self):
with pytest.raises(TypeError) as exc:
s = SphericalRepresentation()
def test_init_quantity(self):
s3 = SphericalRepresentation(lon=8 * u.hourangle, lat=5 * u.deg, distance=10 * u.kpc)
assert s3.lon == 8. * u.hourangle
assert s3.lat == 5. * u.deg
assert s3.distance == 10 * u.kpc
assert isinstance(s3.lon, Longitude)
assert isinstance(s3.lat, Latitude)
assert isinstance(s3.distance, Distance)
def test_init_no_mutate_input(self):
lon = -1 * u.hourangle
s = SphericalRepresentation(lon=lon, lat=-1 * u.deg, distance=1 * u.kpc, copy=True)
# The longitude component should be wrapped at 24 hours
assert_allclose_quantity(s.lon, 23 * u.hourangle)
# The input should not have been mutated by the constructor
assert_allclose_quantity(lon, -1 * u.hourangle)
def test_init_lonlat(self):
s2 = SphericalRepresentation(Longitude(8, u.hour),
Latitude(5, u.deg),
Distance(10, u.kpc))
assert s2.lon == 8. * u.hourangle
assert s2.lat == 5. * u.deg
assert s2.distance == 10. * u.kpc
assert isinstance(s2.lon, Longitude)
assert isinstance(s2.lat, Latitude)
assert isinstance(s2.distance, Distance)
# also test that wrap_angle is preserved
s3 = SphericalRepresentation(Longitude(-90, u.degree,
wrap_angle=180*u.degree),
Latitude(-45, u.degree),
Distance(1., u.Rsun))
assert s3.lon == -90. * u.degree
assert s3.lon.wrap_angle == 180 * u.degree
def test_init_subclass(self):
class Longitude180(Longitude):
_default_wrap_angle = 180*u.degree
s = SphericalRepresentation(Longitude180(-90, u.degree),
Latitude(-45, u.degree),
Distance(1., u.Rsun))
assert isinstance(s.lon, Longitude180)
assert s.lon == -90. * u.degree
assert s.lon.wrap_angle == 180 * u.degree
def test_init_array(self):
s1 = SphericalRepresentation(lon=[8, 9] * u.hourangle,
lat=[5, 6] * u.deg,
distance=[1, 2] * u.kpc)
assert_allclose(s1.lon.degree, [120, 135])
assert_allclose(s1.lat.degree, [5, 6])
assert_allclose(s1.distance.kpc, [1, 2])
assert isinstance(s1.lon, Longitude)
assert isinstance(s1.lat, Latitude)
assert isinstance(s1.distance, Distance)
def test_init_array_nocopy(self):
lon = Longitude([8, 9] * u.hourangle)
lat = Latitude([5, 6] * u.deg)
distance = Distance([1, 2] * u.kpc)
s1 = SphericalRepresentation(lon=lon, lat=lat, distance=distance, copy=False)
lon[:] = [1, 2] * u.rad
lat[:] = [3, 4] * u.arcmin
distance[:] = [8, 9] * u.Mpc
assert_allclose_quantity(lon, s1.lon)
assert_allclose_quantity(lat, s1.lat)
assert_allclose_quantity(distance, s1.distance)
def test_init_float32_array(self):
"""Regression test against #2983"""
lon = Longitude(np.float32([1., 2.]), u.degree)
lat = Latitude(np.float32([3., 4.]), u.degree)
s1 = UnitSphericalRepresentation(lon=lon, lat=lat, copy=False)
assert s1.lon.dtype == np.float32
assert s1.lat.dtype == np.float32
assert s1._values['lon'].dtype == np.float32
assert s1._values['lat'].dtype == np.float32
def test_reprobj(self):
s1 = SphericalRepresentation(lon=8 * u.hourangle, lat=5 * u.deg, distance=10 * u.kpc)
s2 = SphericalRepresentation.from_representation(s1)
assert_allclose_quantity(s2.lon, 8. * u.hourangle)
assert_allclose_quantity(s2.lat, 5. * u.deg)
assert_allclose_quantity(s2.distance, 10 * u.kpc)
s3 = SphericalRepresentation(s1)
assert representation_equal(s1, s3)
def test_broadcasting(self):
s1 = SphericalRepresentation(lon=[8, 9] * u.hourangle,
lat=[5, 6] * u.deg,
distance=10 * u.kpc)
assert_allclose_quantity(s1.lon, [120, 135] * u.degree)
assert_allclose_quantity(s1.lat, [5, 6] * u.degree)
assert_allclose_quantity(s1.distance, [10, 10] * u.kpc)
def test_broadcasting_mismatch(self):
with pytest.raises(ValueError) as exc:
s1 = SphericalRepresentation(lon=[8, 9, 10] * u.hourangle,
lat=[5, 6] * u.deg,
distance=[1, 2] * u.kpc)
assert exc.value.args[0] == "Input parameters lon, lat, and distance cannot be broadcast"
def test_broadcasting_and_nocopy(self):
s1 = SphericalRepresentation(lon=[200] * u.deg,
lat=[0] * u.deg,
distance=[0] * u.kpc,
copy=False)
# With no copying, we should be able to modify the wrap angle of the longitude component
s1.lon.wrap_angle = 180 * u.deg
s2 = SphericalRepresentation(lon=[200] * u.deg,
lat=0 * u.deg,
distance=0 * u.kpc,
copy=False)
# We should be able to modify the wrap angle of the longitude component even if other
# components need to be broadcasted
s2.lon.wrap_angle = 180 * u.deg
def test_readonly(self):
s1 = SphericalRepresentation(lon=8 * u.hourangle,
lat=5 * u.deg,
distance=1. * u.kpc)
with pytest.raises(AttributeError):
s1.lon = 1. * u.deg
with pytest.raises(AttributeError):
s1.lat = 1. * u.deg
with pytest.raises(AttributeError):
s1.distance = 1. * u.kpc
def test_getitem_len_iterable(self):
s = SphericalRepresentation(lon=np.arange(10) * u.deg,
lat=-np.arange(10) * u.deg,
distance=1 * u.kpc)
s_slc = s[2:8:2]
assert_allclose_quantity(s_slc.lon, [2, 4, 6] * u.deg)
assert_allclose_quantity(s_slc.lat, [-2, -4, -6] * u.deg)
assert_allclose_quantity(s_slc.distance, [1, 1, 1] * u.kpc)
assert len(s) == 10
assert isiterable(s)
def test_getitem_len_iterable_scalar(self):
s = SphericalRepresentation(lon=1 * u.deg,
lat=-2 * u.deg,
distance=3 * u.kpc)
with pytest.raises(TypeError):
s_slc = s[0]
with pytest.raises(TypeError):
len(s)
assert not isiterable(s)
def test_setitem(self):
s = SphericalRepresentation(lon=np.arange(5) * u.deg,
lat=-np.arange(5) * u.deg,
distance=1 * u.kpc)
s[:2] = SphericalRepresentation(lon=10.*u.deg, lat=2.*u.deg,
distance=5.*u.kpc)
assert_allclose_quantity(s.lon, [10, 10, 2, 3, 4] * u.deg)
assert_allclose_quantity(s.lat, [2, 2, -2, -3, -4] * u.deg)
assert_allclose_quantity(s.distance, [5, 5, 1, 1, 1] * u.kpc)
def test_negative_distance(self):
"""Only allowed if explicitly passed on."""
with pytest.raises(ValueError, match='allow_negative'):
SphericalRepresentation(10*u.deg, 20*u.deg, -10*u.m)
s1 = SphericalRepresentation(10*u.deg, 20*u.deg,
Distance(-10*u.m, allow_negative=True))
assert s1.distance == -10.*u.m
def test_nan_distance(self):
""" This is a regression test: calling represent_as() and passing in the
same class as the object shouldn't round-trip through cartesian.
"""
sph = SphericalRepresentation(1*u.deg, 2*u.deg, np.nan*u.kpc)
new_sph = sph.represent_as(SphericalRepresentation)
assert_allclose_quantity(new_sph.lon, sph.lon)
assert_allclose_quantity(new_sph.lat, sph.lat)
dif = SphericalCosLatDifferential(1*u.mas/u.yr, 2*u.mas/u.yr,
3*u.km/u.s)
sph = sph.with_differentials(dif)
new_sph = sph.represent_as(SphericalRepresentation)
assert_allclose_quantity(new_sph.lon, sph.lon)
assert_allclose_quantity(new_sph.lat, sph.lat)
def test_raise_on_extra_arguments(self):
with pytest.raises(TypeError, match='got multiple values'):
SphericalRepresentation(1*u.deg, 2*u.deg, 1.*u.kpc, lat=10)
with pytest.raises(TypeError, match='unexpected keyword.*parrot'):
SphericalRepresentation(1*u.deg, 2*u.deg, 1.*u.kpc, parrot=10)
def test_representation_shortcuts(self):
"""Test that shortcuts in ``represent_as`` don't fail."""
difs = SphericalCosLatDifferential(4*u.mas/u.yr,5*u.mas/u.yr,6*u.km/u.s)
sph = SphericalRepresentation(1*u.deg, 2*u.deg, 3*u.kpc,
differentials={'s': difs})
got = sph.represent_as(PhysicsSphericalRepresentation,
PhysicsSphericalDifferential)
assert np.may_share_memory(sph.lon, got.phi)
assert np.may_share_memory(sph.distance, got.r)
expected = BaseRepresentation.represent_as(
sph, PhysicsSphericalRepresentation, PhysicsSphericalDifferential)
# equal up to angular type
assert representation_equal_up_to_angular_type(got, expected)
got = sph.represent_as(UnitSphericalRepresentation,
UnitSphericalDifferential)
assert np.may_share_memory(sph.lon, got.lon)
assert np.may_share_memory(sph.lat, got.lat)
expected = BaseRepresentation.represent_as(
sph, UnitSphericalRepresentation, UnitSphericalDifferential)
assert representation_equal_up_to_angular_type(got, expected)
def test_transform(self):
"""Test ``.transform()`` on rotation and general matrices."""
# set up representation
ds1 = SphericalDifferential(
d_lon=[1, 2] * u.mas / u.yr, d_lat=[3, 4] * u.mas / u.yr,
d_distance=[-5, 6] * u.km / u.s)
s1 = SphericalRepresentation(lon=[1, 2] * u.deg, lat=[3, 4] * u.deg,
distance=[5, 6] * u.kpc, differentials=ds1)
# transform representation & get comparison (thru CartesianRep)
s2 = s1.transform(matrices["rotation"])
ds2 = s2.differentials["s"]
dexpected = SphericalDifferential.from_cartesian(
ds1.to_cartesian(base=s1).transform(matrices["rotation"]), base=s2)
assert_allclose_quantity(s2.lon, s1.lon + 10 * u.deg)
assert_allclose_quantity(s2.lat, s1.lat)
assert_allclose_quantity(s2.distance, s1.distance)
# check differentials. they shouldn't have changed.
assert_allclose_quantity(ds2.d_lon, ds1.d_lon)
assert_allclose_quantity(ds2.d_lat, ds1.d_lat)
assert_allclose_quantity(ds2.d_distance, ds1.d_distance)
assert_allclose_quantity(ds2.d_lon, dexpected.d_lon)
assert_allclose_quantity(ds2.d_lat, dexpected.d_lat)
assert_allclose_quantity(ds2.d_distance, dexpected.d_distance)
# now with a non rotation matrix
# transform representation & get comparison (thru CartesianRep)
s3 = s1.transform(matrices["general"])
ds3 = s3.differentials["s"]
expected = (s1.represent_as(CartesianRepresentation,
CartesianDifferential)
.transform(matrices["general"])
.represent_as(SphericalRepresentation,
SphericalDifferential))
dexpected = expected.differentials["s"]
assert_allclose_quantity(s3.lon, expected.lon)
assert_allclose_quantity(s3.lat, expected.lat)
assert_allclose_quantity(s3.distance, expected.distance)
assert_allclose_quantity(ds3.d_lon, dexpected.d_lon)
assert_allclose_quantity(ds3.d_lat, dexpected.d_lat)
assert_allclose_quantity(ds3.d_distance, dexpected.d_distance)
def test_transform_with_NaN(self):
# all over again, but with a NaN in the distance
ds1 = SphericalDifferential(
d_lon=[1, 2] * u.mas / u.yr, d_lat=[3, 4] * u.mas / u.yr,
d_distance=[-5, 6] * u.km / u.s)
s1 = SphericalRepresentation(lon=[1, 2] * u.deg, lat=[3, 4] * u.deg,
distance=[5, np.nan] * u.kpc,
differentials=ds1)
# transform representation & get comparison (thru CartesianRep)
s2 = s1.transform(matrices["rotation"])
ds2 = s2.differentials["s"]
dexpected = SphericalDifferential.from_cartesian(
ds1.to_cartesian(base=s1).transform(matrices["rotation"]), base=s2)
assert_allclose_quantity(s2.lon, s1.lon + 10 * u.deg)
assert_allclose_quantity(s2.lat, s1.lat)
assert_allclose_quantity(s2.distance, s1.distance)
assert_allclose_quantity(ds2.d_lon, dexpected.d_lon)
assert_allclose_quantity(ds2.d_lat, dexpected.d_lat)
assert_allclose_quantity(ds2.d_distance, dexpected.d_distance)
# the 2nd component is NaN since the 2nd distance is NaN
# TODO! this will change when ``.transform`` skips Cartesian
assert_array_equal(np.isnan(ds2.d_lon), (False, True))
assert_array_equal(np.isnan(ds2.d_lat), (False, True))
assert_array_equal(np.isnan(ds2.d_distance), (False, True))
# now with a non rotation matrix
s3 = s1.transform(matrices["general"])
ds3 = s3.differentials["s"]
thruC = (s1.represent_as(CartesianRepresentation,
CartesianDifferential)
.transform(matrices["general"])
.represent_as(SphericalRepresentation,
differential_class=SphericalDifferential))
dthruC = thruC.differentials["s"]
# s3 should not propagate Nan.
assert_array_equal(np.isnan(s3.lon), (False, False))
assert_array_equal(np.isnan(s3.lat), (False, False))
assert_array_equal(np.isnan(s3.distance), (False, True))
# ds3 does b/c currently aren't any shortcuts on the transform
assert_array_equal(np.isnan(ds3.d_lon), (False, True))
assert_array_equal(np.isnan(ds3.d_lat), (False, True))
assert_array_equal(np.isnan(ds3.d_distance), (False, True))
# through Cartesian should
assert_array_equal(np.isnan(thruC.lon), (False, True))
assert_array_equal(np.isnan(thruC.lat), (False, True))
assert_array_equal(np.isnan(thruC.distance), (False, True))
assert_array_equal(np.isnan(dthruC.d_lon), (False, True))
assert_array_equal(np.isnan(dthruC.d_lat), (False, True))
assert_array_equal(np.isnan(dthruC.d_distance), (False, True))
# test that they are close on the first value
assert_allclose_quantity(s3.lon[0], thruC.lon[0])
assert_allclose_quantity(s3.lat[0], thruC.lat[0])
assert_allclose_quantity(ds3.d_lon[0], dthruC.d_lon[0])
assert_allclose_quantity(ds3.d_lat[0], dthruC.d_lat[0])
class TestUnitSphericalRepresentation:
def test_name(self):
assert UnitSphericalRepresentation.get_name() == 'unitspherical'
assert UnitSphericalRepresentation.get_name() in REPRESENTATION_CLASSES
def test_empty_init(self):
with pytest.raises(TypeError) as exc:
s = UnitSphericalRepresentation()
def test_init_quantity(self):
s3 = UnitSphericalRepresentation(lon=8 * u.hourangle, lat=5 * u.deg)
assert s3.lon == 8. * u.hourangle
assert s3.lat == 5. * u.deg
assert isinstance(s3.lon, Longitude)
assert isinstance(s3.lat, Latitude)
def test_init_lonlat(self):
s2 = UnitSphericalRepresentation(Longitude(8, u.hour),
Latitude(5, u.deg))
assert s2.lon == 8. * u.hourangle
assert s2.lat == 5. * u.deg
assert isinstance(s2.lon, Longitude)
assert isinstance(s2.lat, Latitude)
def test_init_array(self):
s1 = UnitSphericalRepresentation(lon=[8, 9] * u.hourangle,
lat=[5, 6] * u.deg)
assert_allclose(s1.lon.degree, [120, 135])
assert_allclose(s1.lat.degree, [5, 6])
assert isinstance(s1.lon, Longitude)
assert isinstance(s1.lat, Latitude)
def test_init_array_nocopy(self):
lon = Longitude([8, 9] * u.hourangle)
lat = Latitude([5, 6] * u.deg)
s1 = UnitSphericalRepresentation(lon=lon, lat=lat, copy=False)
lon[:] = [1, 2] * u.rad
lat[:] = [3, 4] * u.arcmin
assert_allclose_quantity(lon, s1.lon)
assert_allclose_quantity(lat, s1.lat)
def test_reprobj(self):
s1 = UnitSphericalRepresentation(lon=8 * u.hourangle, lat=5 * u.deg)
s2 = UnitSphericalRepresentation.from_representation(s1)
assert_allclose_quantity(s2.lon, 8. * u.hourangle)
assert_allclose_quantity(s2.lat, 5. * u.deg)
s3 = UnitSphericalRepresentation(s1)
assert representation_equal(s3, s1)
def test_broadcasting(self):
s1 = UnitSphericalRepresentation(lon=[8, 9] * u.hourangle,
lat=[5, 6] * u.deg)
assert_allclose_quantity(s1.lon, [120, 135] * u.degree)
assert_allclose_quantity(s1.lat, [5, 6] * u.degree)
def test_broadcasting_mismatch(self):
with pytest.raises(ValueError) as exc:
s1 = UnitSphericalRepresentation(lon=[8, 9, 10] * u.hourangle,
lat=[5, 6] * u.deg)
assert exc.value.args[0] == "Input parameters lon and lat cannot be broadcast"
def test_readonly(self):
s1 = UnitSphericalRepresentation(lon=8 * u.hourangle,
lat=5 * u.deg)
with pytest.raises(AttributeError):
s1.lon = 1. * u.deg
with pytest.raises(AttributeError):
s1.lat = 1. * u.deg
def test_getitem(self):
s = UnitSphericalRepresentation(lon=np.arange(10) * u.deg,
lat=-np.arange(10) * u.deg)
s_slc = s[2:8:2]
assert_allclose_quantity(s_slc.lon, [2, 4, 6] * u.deg)
assert_allclose_quantity(s_slc.lat, [-2, -4, -6] * u.deg)
def test_getitem_scalar(self):
s = UnitSphericalRepresentation(lon=1 * u.deg,
lat=-2 * u.deg)
with pytest.raises(TypeError):
s_slc = s[0]
def test_representation_shortcuts(self):
"""Test that shortcuts in ``represent_as`` don't fail."""
# TODO! representation transformations with differentials cannot
# (currently) be implemented due to a mismatch between the UnitSpherical
# expected keys (e.g. "s") and that expected in the other class
# (here "s / m"). For more info, see PR #11467
# We leave the test code commented out for future use.
# diffs = UnitSphericalCosLatDifferential(4*u.mas/u.yr, 5*u.mas/u.yr,
# 6*u.km/u.s)
sph = UnitSphericalRepresentation(1*u.deg, 2*u.deg)
# , differentials={'s': diffs}
got = sph.represent_as(PhysicsSphericalRepresentation)
# , PhysicsSphericalDifferential)
assert np.may_share_memory(sph.lon, got.phi)
expected = BaseRepresentation.represent_as(
sph, PhysicsSphericalRepresentation) # PhysicsSphericalDifferential
assert representation_equal_up_to_angular_type(got, expected)
got = sph.represent_as(SphericalRepresentation)
# , SphericalDifferential)
assert np.may_share_memory(sph.lon, got.lon)
assert np.may_share_memory(sph.lat, got.lat)
expected = BaseRepresentation.represent_as(
sph, SphericalRepresentation) # , SphericalDifferential)
assert representation_equal_up_to_angular_type(got, expected)
def test_transform(self):
"""Test ``.transform()`` on rotation and general matrices."""
# set up representation
ds1 = UnitSphericalDifferential(d_lon=[1, 2] * u.mas / u.yr,
d_lat=[3, 4] * u.mas / u.yr,)
s1 = UnitSphericalRepresentation(lon=[1, 2] * u.deg, lat=[3, 4] * u.deg,
differentials=ds1)
# transform representation & get comparison (thru CartesianRep)
s2 = s1.transform(matrices["rotation"])
ds2 = s2.differentials["s"]
dexpected = UnitSphericalDifferential.from_cartesian(
ds1.to_cartesian(base=s1).transform(matrices["rotation"]), base=s2)
assert_allclose_quantity(s2.lon, s1.lon + 10 * u.deg)
assert_allclose_quantity(s2.lat, s1.lat)
# compare differentials. they should be unchanged (ds1).
assert_allclose_quantity(ds2.d_lon, ds1.d_lon)
assert_allclose_quantity(ds2.d_lat, ds1.d_lat)
assert_allclose_quantity(ds2.d_lon, dexpected.d_lon)
assert_allclose_quantity(ds2.d_lat, dexpected.d_lat)
assert not hasattr(ds2, "d_distance")
# now with a non rotation matrix
# note that the result will be a Spherical, not UnitSpherical
s3 = s1.transform(matrices["general"])
ds3 = s3.differentials["s"]
expected = (s1.represent_as(CartesianRepresentation,
CartesianDifferential)
.transform(matrices["general"])
.represent_as(SphericalRepresentation,
differential_class=SphericalDifferential))
dexpected = expected.differentials["s"]
assert_allclose_quantity(s3.lon, expected.lon)
assert_allclose_quantity(s3.lat, expected.lat)
assert_allclose_quantity(s3.distance, expected.distance)
assert_allclose_quantity(ds3.d_lon, dexpected.d_lon)
assert_allclose_quantity(ds3.d_lat, dexpected.d_lat)
assert_allclose_quantity(ds3.d_distance, dexpected.d_distance)
class TestPhysicsSphericalRepresentation:
def test_name(self):
assert PhysicsSphericalRepresentation.get_name() == 'physicsspherical'
assert PhysicsSphericalRepresentation.get_name() in REPRESENTATION_CLASSES
def test_empty_init(self):
with pytest.raises(TypeError) as exc:
s = PhysicsSphericalRepresentation()
def test_init_quantity(self):
s3 = PhysicsSphericalRepresentation(phi=8 * u.hourangle, theta=5 * u.deg, r=10 * u.kpc)
assert s3.phi == 8. * u.hourangle
assert s3.theta == 5. * u.deg
assert s3.r == 10 * u.kpc
assert isinstance(s3.phi, Angle)
assert isinstance(s3.theta, Angle)
assert isinstance(s3.r, Distance)
def test_init_phitheta(self):
s2 = PhysicsSphericalRepresentation(Angle(8, u.hour),
Angle(5, u.deg),
Distance(10, u.kpc))
assert s2.phi == 8. * u.hourangle
assert s2.theta == 5. * u.deg
assert s2.r == 10. * u.kpc
assert isinstance(s2.phi, Angle)
assert isinstance(s2.theta, Angle)
assert isinstance(s2.r, Distance)
def test_init_array(self):
s1 = PhysicsSphericalRepresentation(phi=[8, 9] * u.hourangle,
theta=[5, 6] * u.deg,
r=[1, 2] * u.kpc)
assert_allclose(s1.phi.degree, [120, 135])
assert_allclose(s1.theta.degree, [5, 6])
assert_allclose(s1.r.kpc, [1, 2])
assert isinstance(s1.phi, Angle)
assert isinstance(s1.theta, Angle)
assert isinstance(s1.r, Distance)
def test_init_array_nocopy(self):
phi = Angle([8, 9] * u.hourangle)
theta = Angle([5, 6] * u.deg)
r = Distance([1, 2] * u.kpc)
s1 = PhysicsSphericalRepresentation(phi=phi, theta=theta, r=r, copy=False)
phi[:] = [1, 2] * u.rad
theta[:] = [3, 4] * u.arcmin
r[:] = [8, 9] * u.Mpc
assert_allclose_quantity(phi, s1.phi)
assert_allclose_quantity(theta, s1.theta)
assert_allclose_quantity(r, s1.r)
def test_reprobj(self):
s1 = PhysicsSphericalRepresentation(phi=8 * u.hourangle, theta=5 * u.deg, r=10 * u.kpc)
s2 = PhysicsSphericalRepresentation.from_representation(s1)
assert_allclose_quantity(s2.phi, 8. * u.hourangle)
assert_allclose_quantity(s2.theta, 5. * u.deg)
assert_allclose_quantity(s2.r, 10 * u.kpc)
s3 = PhysicsSphericalRepresentation(s1)
assert representation_equal(s3, s1)
def test_broadcasting(self):
s1 = PhysicsSphericalRepresentation(phi=[8, 9] * u.hourangle,
theta=[5, 6] * u.deg,
r=10 * u.kpc)
assert_allclose_quantity(s1.phi, [120, 135] * u.degree)
assert_allclose_quantity(s1.theta, [5, 6] * u.degree)
assert_allclose_quantity(s1.r, [10, 10] * u.kpc)
def test_broadcasting_mismatch(self):
with pytest.raises(ValueError) as exc:
s1 = PhysicsSphericalRepresentation(phi=[8, 9, 10] * u.hourangle,
theta=[5, 6] * u.deg,
r=[1, 2] * u.kpc)
assert exc.value.args[0] == "Input parameters phi, theta, and r cannot be broadcast"
def test_readonly(self):
s1 = PhysicsSphericalRepresentation(phi=[8, 9] * u.hourangle,
theta=[5, 6] * u.deg,
r=[10, 20] * u.kpc)
with pytest.raises(AttributeError):
s1.phi = 1. * u.deg
with pytest.raises(AttributeError):
s1.theta = 1. * u.deg
with pytest.raises(AttributeError):
s1.r = 1. * u.kpc
def test_getitem(self):
s = PhysicsSphericalRepresentation(phi=np.arange(10) * u.deg,
theta=np.arange(5, 15) * u.deg,
r=1 * u.kpc)
s_slc = s[2:8:2]
assert_allclose_quantity(s_slc.phi, [2, 4, 6] * u.deg)
assert_allclose_quantity(s_slc.theta, [7, 9, 11] * u.deg)
assert_allclose_quantity(s_slc.r, [1, 1, 1] * u.kpc)
def test_getitem_scalar(self):
s = PhysicsSphericalRepresentation(phi=1 * u.deg,
theta=2 * u.deg,
r=3 * u.kpc)
with pytest.raises(TypeError):
s_slc = s[0]
def test_representation_shortcuts(self):
"""Test that shortcuts in ``represent_as`` don't fail."""
difs = PhysicsSphericalDifferential(4*u.mas/u.yr,5*u.mas/u.yr,6*u.km/u.s)
sph = PhysicsSphericalRepresentation(1*u.deg, 2*u.deg, 3*u.kpc,
differentials={'s': difs})
got = sph.represent_as(SphericalRepresentation,
SphericalDifferential)
assert np.may_share_memory(sph.phi, got.lon)
assert np.may_share_memory(sph.r, got.distance)
expected = BaseRepresentation.represent_as(
sph, SphericalRepresentation, SphericalDifferential)
assert representation_equal_up_to_angular_type(got, expected)
got = sph.represent_as(UnitSphericalRepresentation,
UnitSphericalDifferential)
assert np.may_share_memory(sph.phi, got.lon)
expected = BaseRepresentation.represent_as(
sph, UnitSphericalRepresentation, UnitSphericalDifferential)
assert representation_equal_up_to_angular_type(got, expected)
def test_initialize_with_nan(self):
# Regression test for gh-11558: initialization used to fail.
psr = PhysicsSphericalRepresentation([1., np.nan]*u.deg, [np.nan, 2.]*u.deg,
[3., np.nan]*u.m)
assert_array_equal(np.isnan(psr.phi), [False, True])
assert_array_equal(np.isnan(psr.theta), [True, False])
assert_array_equal(np.isnan(psr.r), [False, True])
def test_transform(self):
"""Test ``.transform()`` on rotation and general transform matrices."""
# set up representation
ds1 = PhysicsSphericalDifferential(
d_phi=[1, 2] * u.mas / u.yr, d_theta=[3, 4] * u.mas / u.yr,
d_r=[-5, 6] * u.km / u.s)
s1 = PhysicsSphericalRepresentation(
phi=[1, 2] * u.deg, theta=[3, 4] * u.deg, r=[5, 6] * u.kpc,
differentials=ds1)
# transform representation & get comparison (thru CartesianRep)
s2 = s1.transform(matrices["rotation"])
ds2 = s2.differentials["s"]
dexpected = PhysicsSphericalDifferential.from_cartesian(
ds1.to_cartesian(base=s1).transform(matrices["rotation"]), base=s2)
assert_allclose_quantity(s2.phi, s1.phi + 10 * u.deg)
assert_allclose_quantity(s2.theta, s1.theta)
assert_allclose_quantity(s2.r, s1.r)
# compare differentials. should be unchanged (ds1).
assert_allclose_quantity(ds2.d_phi, ds1.d_phi)
assert_allclose_quantity(ds2.d_theta, ds1.d_theta)
assert_allclose_quantity(ds2.d_r, ds1.d_r)
assert_allclose_quantity(ds2.d_phi, dexpected.d_phi)
assert_allclose_quantity(ds2.d_theta, dexpected.d_theta)
assert_allclose_quantity(ds2.d_r, dexpected.d_r)
# now with a non rotation matrix
# transform representation & get comparison (thru CartesianRep)
s3 = s1.transform(matrices["general"])
ds3 = s3.differentials["s"]
expected = (s1.represent_as(CartesianRepresentation,
CartesianDifferential)
.transform(matrices["general"])
.represent_as(PhysicsSphericalRepresentation,
PhysicsSphericalDifferential))
dexpected = expected.differentials["s"]
assert_allclose_quantity(s3.phi, expected.phi)
assert_allclose_quantity(s3.theta, expected.theta)
assert_allclose_quantity(s3.r, expected.r)
assert_allclose_quantity(ds3.d_phi, dexpected.d_phi)
assert_allclose_quantity(ds3.d_theta, dexpected.d_theta)
assert_allclose_quantity(ds3.d_r, dexpected.d_r)
def test_transform_with_NaN(self):
# all over again, but with a NaN in the distance
ds1 = PhysicsSphericalDifferential(
d_phi=[1, 2] * u.mas / u.yr, d_theta=[3, 4] * u.mas / u.yr,
d_r=[-5, 6] * u.km / u.s)
s1 = PhysicsSphericalRepresentation(
phi=[1, 2] * u.deg, theta=[3, 4] * u.deg, r=[5, np.nan] * u.kpc,
differentials=ds1)
# transform representation & get comparison (thru CartesianRep)
s2 = s1.transform(matrices["rotation"])
ds2 = s2.differentials["s"]
dexpected = PhysicsSphericalDifferential.from_cartesian(
ds1.to_cartesian(base=s1).transform(matrices["rotation"]), base=s2)
assert_allclose_quantity(s2.phi, s1.phi + 10 * u.deg)
assert_allclose_quantity(s2.theta, s1.theta)
assert_allclose_quantity(s2.r, s1.r)
assert_allclose_quantity(ds2.d_phi, dexpected.d_phi)
assert_allclose_quantity(ds2.d_theta, dexpected.d_theta)
assert_allclose_quantity(ds2.d_r, dexpected.d_r)
# now with a non rotation matrix
s3 = s1.transform(matrices["general"])
ds3 = s3.differentials["s"]
thruC = (s1.represent_as(CartesianRepresentation,
CartesianDifferential)
.transform(matrices["general"])
.represent_as(PhysicsSphericalRepresentation,
PhysicsSphericalDifferential))
dthruC = thruC.differentials["s"]
# s3 should not propagate Nan.
assert_array_equal(np.isnan(s3.phi), (False, False))
assert_array_equal(np.isnan(s3.theta), (False, False))
assert_array_equal(np.isnan(s3.r), (False, True))
# ds3 does b/c currently aren't any shortcuts on the transform
assert_array_equal(np.isnan(ds3.d_phi), (False, True))
assert_array_equal(np.isnan(ds3.d_theta), (False, True))
assert_array_equal(np.isnan(ds3.d_r), (False, True))
# through Cartesian does
assert_array_equal(np.isnan(thruC.phi), (False, True))
assert_array_equal(np.isnan(thruC.theta), (False, True))
assert_array_equal(np.isnan(thruC.r), (False, True))
# so only test on the first value
assert_allclose_quantity(s3.phi[0], thruC.phi[0])
assert_allclose_quantity(s3.theta[0], thruC.theta[0])
assert_allclose_quantity(ds3.d_phi[0], dthruC.d_phi[0])
assert_allclose_quantity(ds3.d_theta[0], dthruC.d_theta[0])
class TestCartesianRepresentation:
def test_name(self):
assert CartesianRepresentation.get_name() == 'cartesian'
assert CartesianRepresentation.get_name() in REPRESENTATION_CLASSES
def test_empty_init(self):
with pytest.raises(TypeError) as exc:
s = CartesianRepresentation()
def test_init_quantity(self):
s1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc)
assert s1.x.unit is u.kpc
assert s1.y.unit is u.kpc
assert s1.z.unit is u.kpc
assert_allclose(s1.x.value, 1)
assert_allclose(s1.y.value, 2)
assert_allclose(s1.z.value, 3)
def test_init_singleunit(self):
s1 = CartesianRepresentation(x=1, y=2, z=3, unit=u.kpc)
assert s1.x.unit is u.kpc
assert s1.y.unit is u.kpc
assert s1.z.unit is u.kpc
assert_allclose(s1.x.value, 1)
assert_allclose(s1.y.value, 2)
assert_allclose(s1.z.value, 3)
def test_init_array(self):
s1 = CartesianRepresentation(x=[1, 2, 3] * u.pc,
y=[2, 3, 4] * u.Mpc,
z=[3, 4, 5] * u.kpc)
assert s1.x.unit is u.pc
assert s1.y.unit is u.Mpc
assert s1.z.unit is u.kpc
assert_allclose(s1.x.value, [1, 2, 3])
assert_allclose(s1.y.value, [2, 3, 4])
assert_allclose(s1.z.value, [3, 4, 5])
def test_init_one_array(self):
s1 = CartesianRepresentation(x=[1, 2, 3] * u.pc)
assert s1.x.unit is u.pc
assert s1.y.unit is u.pc
assert s1.z.unit is u.pc
assert_allclose(s1.x.value, 1)
assert_allclose(s1.y.value, 2)
assert_allclose(s1.z.value, 3)
r = np.arange(27.).reshape(3, 3, 3) * u.kpc
s2 = CartesianRepresentation(r, xyz_axis=0)
assert s2.shape == (3, 3)
assert s2.x.unit == u.kpc
assert np.all(s2.x == r[0])
assert np.all(s2.xyz == r)
assert np.all(s2.get_xyz(xyz_axis=0) == r)
s3 = CartesianRepresentation(r, xyz_axis=1)
assert s3.shape == (3, 3)
assert np.all(s3.x == r[:, 0])
assert np.all(s3.y == r[:, 1])
assert np.all(s3.z == r[:, 2])
assert np.all(s3.get_xyz(xyz_axis=1) == r)
s4 = CartesianRepresentation(r, xyz_axis=2)
assert s4.shape == (3, 3)
assert np.all(s4.x == r[:, :, 0])
assert np.all(s4.get_xyz(xyz_axis=2) == r)
s5 = CartesianRepresentation(r, unit=u.pc)
assert s5.x.unit == u.pc
assert np.all(s5.xyz == r)
s6 = CartesianRepresentation(r.value, unit=u.pc, xyz_axis=2)
assert s6.x.unit == u.pc
assert np.all(s6.get_xyz(xyz_axis=2).value == r.value)
def test_init_one_array_size_fail(self):
with pytest.raises(ValueError) as exc:
CartesianRepresentation(x=[1, 2, 3, 4] * u.pc)
assert exc.value.args[0].startswith("too many values to unpack")
def test_init_xyz_but_more_than_one_array_fail(self):
with pytest.raises(ValueError) as exc:
CartesianRepresentation(x=[1, 2, 3] * u.pc, y=[2, 3, 4] * u.pc,
z=[3, 4, 5] * u.pc, xyz_axis=0)
assert 'xyz_axis should only be set' in str(exc.value)
def test_init_one_array_yz_fail(self):
with pytest.raises(ValueError) as exc:
CartesianRepresentation(x=[1, 2, 3, 4] * u.pc, y=[1, 2] * u.pc)
assert exc.value.args[0] == ("x, y, and z are required to instantiate "
"CartesianRepresentation")
def test_init_array_nocopy(self):
x = [8, 9, 10] * u.pc
y = [5, 6, 7] * u.Mpc
z = [2, 3, 4] * u.kpc
s1 = CartesianRepresentation(x=x, y=y, z=z, copy=False)
x[:] = [1, 2, 3] * u.kpc
y[:] = [9, 9, 8] * u.kpc
z[:] = [1, 2, 1] * u.kpc
assert_allclose_quantity(x, s1.x)
assert_allclose_quantity(y, s1.y)
assert_allclose_quantity(z, s1.z)
def test_xyz_is_view_if_possible(self):
xyz = np.arange(1., 10.).reshape(3, 3)
s1 = CartesianRepresentation(xyz, unit=u.kpc, copy=False)
s1_xyz = s1.xyz
assert s1_xyz.value[0, 0] == 1.
xyz[0, 0] = 0.
assert s1.x[0] == 0.
assert s1_xyz.value[0, 0] == 0.
# Not possible: we don't check that tuples are from the same array
xyz = np.arange(1., 10.).reshape(3, 3)
s2 = CartesianRepresentation(*xyz, unit=u.kpc, copy=False)
s2_xyz = s2.xyz
assert s2_xyz.value[0, 0] == 1.
xyz[0, 0] = 0.
assert s2.x[0] == 0.
assert s2_xyz.value[0, 0] == 1.
def test_reprobj(self):
s1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc)
s2 = CartesianRepresentation.from_representation(s1)
assert s2.x == 1 * u.kpc
assert s2.y == 2 * u.kpc
assert s2.z == 3 * u.kpc
s3 = CartesianRepresentation(s1)
assert representation_equal(s3, s1)
def test_broadcasting(self):
s1 = CartesianRepresentation(x=[1, 2] * u.kpc, y=[3, 4] * u.kpc, z=5 * u.kpc)
assert s1.x.unit == u.kpc
assert s1.y.unit == u.kpc
assert s1.z.unit == u.kpc
assert_allclose(s1.x.value, [1, 2])
assert_allclose(s1.y.value, [3, 4])
assert_allclose(s1.z.value, [5, 5])
def test_broadcasting_mismatch(self):
with pytest.raises(ValueError) as exc:
s1 = CartesianRepresentation(x=[1, 2] * u.kpc, y=[3, 4] * u.kpc, z=[5, 6, 7] * u.kpc)
assert exc.value.args[0] == "Input parameters x, y, and z cannot be broadcast"
def test_readonly(self):
s1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc)
with pytest.raises(AttributeError):
s1.x = 1. * u.kpc
with pytest.raises(AttributeError):
s1.y = 1. * u.kpc
with pytest.raises(AttributeError):
s1.z = 1. * u.kpc
def test_xyz(self):
s1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc)
assert isinstance(s1.xyz, u.Quantity)
assert s1.xyz.unit is u.kpc
assert_allclose(s1.xyz.value, [1, 2, 3])
def test_unit_mismatch(self):
q_len = u.Quantity([1], u.km)
q_nonlen = u.Quantity([1], u.kg)
with pytest.raises(u.UnitsError) as exc:
s1 = CartesianRepresentation(x=q_nonlen, y=q_len, z=q_len)
assert exc.value.args[0] == "x, y, and z should have matching physical types"
with pytest.raises(u.UnitsError) as exc:
s1 = CartesianRepresentation(x=q_len, y=q_nonlen, z=q_len)
assert exc.value.args[0] == "x, y, and z should have matching physical types"
with pytest.raises(u.UnitsError) as exc:
s1 = CartesianRepresentation(x=q_len, y=q_len, z=q_nonlen)
assert exc.value.args[0] == "x, y, and z should have matching physical types"
def test_unit_non_length(self):
s1 = CartesianRepresentation(x=1 * u.kg, y=2 * u.kg, z=3 * u.kg)
s2 = CartesianRepresentation(x=1 * u.km / u.s, y=2 * u.km / u.s, z=3 * u.km / u.s)
banana = u.def_unit('banana')
s3 = CartesianRepresentation(x=1 * banana, y=2 * banana, z=3 * banana)
def test_getitem(self):
s = CartesianRepresentation(x=np.arange(10) * u.m,
y=-np.arange(10) * u.m,
z=3 * u.km)
s_slc = s[2:8:2]
assert_allclose_quantity(s_slc.x, [2, 4, 6] * u.m)
assert_allclose_quantity(s_slc.y, [-2, -4, -6] * u.m)
assert_allclose_quantity(s_slc.z, [3, 3, 3] * u.km)
def test_getitem_scalar(self):
s = CartesianRepresentation(x=1 * u.m,
y=-2 * u.m,
z=3 * u.km)
with pytest.raises(TypeError):
s_slc = s[0]
def test_transform(self):
ds1 = CartesianDifferential(d_x=[1, 2] * u.km / u.s,
d_y=[3, 4] * u.km / u.s,
d_z=[5, 6] * u.km / u.s)
s1 = CartesianRepresentation(x=[1, 2] * u.kpc, y=[3, 4] * u.kpc,
z=[5, 6] * u.kpc, differentials=ds1)
# transform representation & get comparison (thru CartesianRep)
s2 = s1.transform(matrices["general"])
ds2 = s2.differentials["s"]
dexpected = CartesianDifferential.from_cartesian(
ds1.to_cartesian(base=s1).transform(matrices["general"]), base=s2)
assert_allclose_quantity(ds2.d_x, dexpected.d_x)
assert_allclose_quantity(ds2.d_y, dexpected.d_y)
assert_allclose_quantity(ds2.d_z, dexpected.d_z)
# also explicitly calculate, since we can
assert_allclose(s2.x.value, [1 * 1 + 2 * 3 + 3 * 5, 1 * 2 + 2 * 4 + 3 * 6])
assert_allclose(s2.y.value, [4 * 1 + 5 * 3 + 6 * 5, 4 * 2 + 5 * 4 + 6 * 6])
assert_allclose(s2.z.value, [7 * 1 + 8 * 3 + 9 * 5, 7 * 2 + 8 * 4 + 9 * 6])
assert_allclose(ds2.d_x.value, [1 * 1 + 2 * 3 + 3 * 5, 1 * 2 + 2 * 4 + 3 * 6])
assert_allclose(ds2.d_y.value, [4 * 1 + 5 * 3 + 6 * 5, 4 * 2 + 5 * 4 + 6 * 6])
assert_allclose(ds2.d_z.value, [7 * 1 + 8 * 3 + 9 * 5, 7 * 2 + 8 * 4 + 9 * 6])
assert s2.x.unit is u.kpc
assert s2.y.unit is u.kpc
assert s2.z.unit is u.kpc
assert ds2.d_x.unit == u.km / u.s
assert ds2.d_y.unit == u.km / u.s
assert ds2.d_z.unit == u.km / u.s
class TestCylindricalRepresentation:
def test_name(self):
assert CylindricalRepresentation.get_name() == 'cylindrical'
assert CylindricalRepresentation.get_name() in REPRESENTATION_CLASSES
def test_empty_init(self):
with pytest.raises(TypeError) as exc:
s = CylindricalRepresentation()
def test_init_quantity(self):
s1 = CylindricalRepresentation(rho=1 * u.kpc, phi=2 * u.deg, z=3 * u.kpc)
assert s1.rho.unit is u.kpc
assert s1.phi.unit is u.deg
assert s1.z.unit is u.kpc
assert_allclose(s1.rho.value, 1)
assert_allclose(s1.phi.value, 2)
assert_allclose(s1.z.value, 3)
def test_init_array(self):
s1 = CylindricalRepresentation(rho=[1, 2, 3] * u.pc,
phi=[2, 3, 4] * u.deg,
z=[3, 4, 5] * u.kpc)
assert s1.rho.unit is u.pc
assert s1.phi.unit is u.deg
assert s1.z.unit is u.kpc
assert_allclose(s1.rho.value, [1, 2, 3])
assert_allclose(s1.phi.value, [2, 3, 4])
assert_allclose(s1.z.value, [3, 4, 5])
def test_init_array_nocopy(self):
rho = [8, 9, 10] * u.pc
phi = [5, 6, 7] * u.deg
z = [2, 3, 4] * u.kpc
s1 = CylindricalRepresentation(rho=rho, phi=phi, z=z, copy=False)
rho[:] = [9, 2, 3] * u.kpc
phi[:] = [1, 2, 3] * u.arcmin
z[:] = [-2, 3, 8] * u.kpc
assert_allclose_quantity(rho, s1.rho)
assert_allclose_quantity(phi, s1.phi)
assert_allclose_quantity(z, s1.z)
def test_reprobj(self):
s1 = CylindricalRepresentation(rho=1 * u.kpc, phi=2 * u.deg, z=3 * u.kpc)
s2 = CylindricalRepresentation.from_representation(s1)
assert s2.rho == 1 * u.kpc
assert s2.phi == 2 * u.deg
assert s2.z == 3 * u.kpc
s3 = CylindricalRepresentation(s1)
assert representation_equal(s3, s1)
def test_broadcasting(self):
s1 = CylindricalRepresentation(rho=[1, 2] * u.kpc, phi=[3, 4] * u.deg, z=5 * u.kpc)
assert s1.rho.unit == u.kpc
assert s1.phi.unit == u.deg
assert s1.z.unit == u.kpc
assert_allclose(s1.rho.value, [1, 2])
assert_allclose(s1.phi.value, [3, 4])
assert_allclose(s1.z.value, [5, 5])
def test_broadcasting_mismatch(self):
with pytest.raises(ValueError) as exc:
s1 = CylindricalRepresentation(rho=[1, 2] * u.kpc, phi=[3, 4] * u.deg, z=[5, 6, 7] * u.kpc)
assert exc.value.args[0] == "Input parameters rho, phi, and z cannot be broadcast"
def test_readonly(self):
s1 = CylindricalRepresentation(rho=1 * u.kpc,
phi=20 * u.deg,
z=3 * u.kpc)
with pytest.raises(AttributeError):
s1.rho = 1. * u.kpc
with pytest.raises(AttributeError):
s1.phi = 20 * u.deg
with pytest.raises(AttributeError):
s1.z = 1. * u.kpc
def unit_mismatch(self):
q_len = u.Quantity([1], u.kpc)
q_nonlen = u.Quantity([1], u.kg)
with pytest.raises(u.UnitsError) as exc:
s1 = CylindricalRepresentation(rho=q_nonlen, phi=10 * u.deg, z=q_len)
assert exc.value.args[0] == "rho and z should have matching physical types"
with pytest.raises(u.UnitsError) as exc:
s1 = CylindricalRepresentation(rho=q_len, phi=10 * u.deg, z=q_nonlen)
assert exc.value.args[0] == "rho and z should have matching physical types"
def test_getitem(self):
s = CylindricalRepresentation(rho=np.arange(10) * u.pc,
phi=-np.arange(10) * u.deg,
z=1 * u.kpc)
s_slc = s[2:8:2]
assert_allclose_quantity(s_slc.rho, [2, 4, 6] * u.pc)
assert_allclose_quantity(s_slc.phi, [-2, -4, -6] * u.deg)
assert_allclose_quantity(s_slc.z, [1, 1, 1] * u.kpc)
def test_getitem_scalar(self):
s = CylindricalRepresentation(rho=1 * u.pc,
phi=-2 * u.deg,
z=3 * u.kpc)
with pytest.raises(TypeError):
s_slc = s[0]
def test_transform(self):
s1 = CylindricalRepresentation(phi=[1, 2] * u.deg, z=[3, 4] * u.pc,
rho=[5, 6] * u.kpc)
s2 = s1.transform(matrices["rotation"])
assert_allclose_quantity(s2.phi, s1.phi + 10 * u.deg)
assert_allclose_quantity(s2.z, s1.z)
assert_allclose_quantity(s2.rho, s1.rho)
assert s2.phi.unit is u.rad
assert s2.z.unit is u.kpc
assert s2.rho.unit is u.kpc
# now with a non rotation matrix
s3 = s1.transform(matrices["general"])
expected = (s1.to_cartesian().transform(matrices["general"])
).represent_as(CylindricalRepresentation)
assert_allclose_quantity(s3.phi, expected.phi)
assert_allclose_quantity(s3.z, expected.z)
assert_allclose_quantity(s3.rho, expected.rho)
class TestUnitSphericalCosLatDifferential:
@pytest.mark.parametrize("matrix", list(matrices.values()))
def test_transform(self, matrix):
"""Test ``.transform()`` on rotation and general matrices."""
# set up representation
ds1 = UnitSphericalCosLatDifferential(d_lon_coslat=[1, 2] * u.mas / u.yr,
d_lat=[3, 4] * u.mas / u.yr,)
s1 = UnitSphericalRepresentation(lon=[1, 2] * u.deg, lat=[3, 4] * u.deg)
# transform representation & get comparison (thru CartesianRep)
s2 = s1.transform(matrix)
ds2 = ds1.transform(matrix, s1, s2)
dexpected = UnitSphericalCosLatDifferential.from_cartesian(
ds1.to_cartesian(base=s1).transform(matrix), base=s2)
assert_allclose_quantity(ds2.d_lon_coslat, dexpected.d_lon_coslat)
assert_allclose_quantity(ds2.d_lat, dexpected.d_lat)
def test_cartesian_spherical_roundtrip():
s1 = CartesianRepresentation(x=[1, 2000.] * u.kpc,
y=[3000., 4.] * u.pc,
z=[5., 6000.] * u.pc)
s2 = SphericalRepresentation.from_representation(s1)
s3 = CartesianRepresentation.from_representation(s2)
s4 = SphericalRepresentation.from_representation(s3)
assert_allclose_quantity(s1.x, s3.x)
assert_allclose_quantity(s1.y, s3.y)
assert_allclose_quantity(s1.z, s3.z)
assert_allclose_quantity(s2.lon, s4.lon)
assert_allclose_quantity(s2.lat, s4.lat)
assert_allclose_quantity(s2.distance, s4.distance)
def test_cartesian_setting_with_other():
s1 = CartesianRepresentation(x=[1, 2000.] * u.kpc,
y=[3000., 4.] * u.pc,
z=[5., 6000.] * u.pc)
s1[0] = SphericalRepresentation(0.*u.deg, 0.*u.deg, 1*u.kpc)
assert_allclose_quantity(s1.x, [1., 2000.] * u.kpc)
assert_allclose_quantity(s1.y, [0., 4.] * u.pc)
assert_allclose_quantity(s1.z, [0., 6000.] * u.pc)
with pytest.raises(ValueError, match='loss of information'):
s1[1] = UnitSphericalRepresentation(0.*u.deg, 10.*u.deg)
def test_cartesian_physics_spherical_roundtrip():
s1 = CartesianRepresentation(x=[1, 2000.] * u.kpc,
y=[3000., 4.] * u.pc,
z=[5., 6000.] * u.pc)
s2 = PhysicsSphericalRepresentation.from_representation(s1)
s3 = CartesianRepresentation.from_representation(s2)
s4 = PhysicsSphericalRepresentation.from_representation(s3)
assert_allclose_quantity(s1.x, s3.x)
assert_allclose_quantity(s1.y, s3.y)
assert_allclose_quantity(s1.z, s3.z)
assert_allclose_quantity(s2.phi, s4.phi)
assert_allclose_quantity(s2.theta, s4.theta)
assert_allclose_quantity(s2.r, s4.r)
def test_spherical_physics_spherical_roundtrip():
s1 = SphericalRepresentation(lon=3 * u.deg, lat=4 * u.deg, distance=3 * u.kpc)
s2 = PhysicsSphericalRepresentation.from_representation(s1)
s3 = SphericalRepresentation.from_representation(s2)
s4 = PhysicsSphericalRepresentation.from_representation(s3)
assert_allclose_quantity(s1.lon, s3.lon)
assert_allclose_quantity(s1.lat, s3.lat)
assert_allclose_quantity(s1.distance, s3.distance)
assert_allclose_quantity(s2.phi, s4.phi)
assert_allclose_quantity(s2.theta, s4.theta)
assert_allclose_quantity(s2.r, s4.r)
assert_allclose_quantity(s1.lon, s4.phi)
assert_allclose_quantity(s1.lat, 90. * u.deg - s4.theta)
assert_allclose_quantity(s1.distance, s4.r)
def test_cartesian_cylindrical_roundtrip():
s1 = CartesianRepresentation(x=np.array([1., 2000.]) * u.kpc,
y=np.array([3000., 4.]) * u.pc,
z=np.array([5., 600.]) * u.cm)
s2 = CylindricalRepresentation.from_representation(s1)
s3 = CartesianRepresentation.from_representation(s2)
s4 = CylindricalRepresentation.from_representation(s3)
assert_allclose_quantity(s1.x, s3.x)
assert_allclose_quantity(s1.y, s3.y)
assert_allclose_quantity(s1.z, s3.z)
assert_allclose_quantity(s2.rho, s4.rho)
assert_allclose_quantity(s2.phi, s4.phi)
assert_allclose_quantity(s2.z, s4.z)
def test_unit_spherical_roundtrip():
s1 = UnitSphericalRepresentation(lon=[10., 30.] * u.deg,
lat=[5., 6.] * u.arcmin)
s2 = CartesianRepresentation.from_representation(s1)
s3 = SphericalRepresentation.from_representation(s2)
s4 = UnitSphericalRepresentation.from_representation(s3)
assert_allclose_quantity(s1.lon, s4.lon)
assert_allclose_quantity(s1.lat, s4.lat)
def test_no_unnecessary_copies():
s1 = UnitSphericalRepresentation(lon=[10., 30.] * u.deg,
lat=[5., 6.] * u.arcmin)
s2 = s1.represent_as(UnitSphericalRepresentation)
assert s2 is s1
assert np.may_share_memory(s1.lon, s2.lon)
assert np.may_share_memory(s1.lat, s2.lat)
s3 = s1.represent_as(SphericalRepresentation)
assert np.may_share_memory(s1.lon, s3.lon)
assert np.may_share_memory(s1.lat, s3.lat)
s4 = s1.represent_as(CartesianRepresentation)
s5 = s4.represent_as(CylindricalRepresentation)
assert np.may_share_memory(s5.z, s4.z)
def test_representation_repr():
r1 = SphericalRepresentation(lon=1 * u.deg, lat=2.5 * u.deg, distance=1 * u.kpc)
assert repr(r1) == ('<SphericalRepresentation (lon, lat, distance) in (deg, deg, kpc)\n'
' (1., 2.5, 1.)>')
r2 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc)
assert repr(r2) == ('<CartesianRepresentation (x, y, z) in kpc\n'
' (1., 2., 3.)>')
r3 = CartesianRepresentation(x=[1, 2, 3] * u.kpc, y=4 * u.kpc, z=[9, 10, 11] * u.kpc)
assert repr(r3) == ('<CartesianRepresentation (x, y, z) in kpc\n'
' [(1., 4., 9.), (2., 4., 10.), (3., 4., 11.)]>')
def test_representation_repr_multi_d():
"""Regression test for #5889."""
cr = CartesianRepresentation(np.arange(27).reshape(3, 3, 3), unit='m')
assert repr(cr) == (
'<CartesianRepresentation (x, y, z) in m\n'
' [[(0., 9., 18.), (1., 10., 19.), (2., 11., 20.)],\n'
' [(3., 12., 21.), (4., 13., 22.), (5., 14., 23.)],\n'
' [(6., 15., 24.), (7., 16., 25.), (8., 17., 26.)]]>')
# This was broken before.
assert repr(cr.T) == (
'<CartesianRepresentation (x, y, z) in m\n'
' [[(0., 9., 18.), (3., 12., 21.), (6., 15., 24.)],\n'
' [(1., 10., 19.), (4., 13., 22.), (7., 16., 25.)],\n'
' [(2., 11., 20.), (5., 14., 23.), (8., 17., 26.)]]>')
def test_representation_str():
r1 = SphericalRepresentation(lon=1 * u.deg, lat=2.5 * u.deg, distance=1 * u.kpc)
assert str(r1) == '(1., 2.5, 1.) (deg, deg, kpc)'
r2 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc)
assert str(r2) == '(1., 2., 3.) kpc'
r3 = CartesianRepresentation(x=[1, 2, 3] * u.kpc, y=4 * u.kpc, z=[9, 10, 11] * u.kpc)
assert str(r3) == '[(1., 4., 9.), (2., 4., 10.), (3., 4., 11.)] kpc'
def test_representation_str_multi_d():
"""Regression test for #5889."""
cr = CartesianRepresentation(np.arange(27).reshape(3, 3, 3), unit='m')
assert str(cr) == (
'[[(0., 9., 18.), (1., 10., 19.), (2., 11., 20.)],\n'
' [(3., 12., 21.), (4., 13., 22.), (5., 14., 23.)],\n'
' [(6., 15., 24.), (7., 16., 25.), (8., 17., 26.)]] m')
# This was broken before.
assert str(cr.T) == (
'[[(0., 9., 18.), (3., 12., 21.), (6., 15., 24.)],\n'
' [(1., 10., 19.), (4., 13., 22.), (7., 16., 25.)],\n'
' [(2., 11., 20.), (5., 14., 23.), (8., 17., 26.)]] m')
def test_subclass_representation():
from astropy.coordinates.builtin_frames import ICRS
class Longitude180(Longitude):
def __new__(cls, angle, unit=None, wrap_angle=180 * u.deg, **kwargs):
self = super().__new__(cls, angle, unit=unit, wrap_angle=wrap_angle,
**kwargs)
return self
class SphericalWrap180Representation(SphericalRepresentation):
attr_classes = {'lon': Longitude180,
'lat': Latitude,
'distance': u.Quantity}
class ICRSWrap180(ICRS):
frame_specific_representation_info = ICRS._frame_specific_representation_info.copy()
frame_specific_representation_info[SphericalWrap180Representation] = \
frame_specific_representation_info[SphericalRepresentation]
default_representation = SphericalWrap180Representation
c = ICRSWrap180(ra=-1 * u.deg, dec=-2 * u.deg, distance=1 * u.m)
assert c.ra.value == -1
assert c.ra.unit is u.deg
assert c.dec.value == -2
assert c.dec.unit is u.deg
def test_minimal_subclass():
# Basically to check what we document works;
# see doc/coordinates/representations.rst
class LogDRepresentation(BaseRepresentation):
attr_classes = {'lon': Longitude,
'lat': Latitude,
'logd': u.Dex}
def to_cartesian(self):
d = self.logd.physical
x = d * np.cos(self.lat) * np.cos(self.lon)
y = d * np.cos(self.lat) * np.sin(self.lon)
z = d * np.sin(self.lat)
return CartesianRepresentation(x=x, y=y, z=z, copy=False)
@classmethod
def from_cartesian(cls, cart):
s = np.hypot(cart.x, cart.y)
r = np.hypot(s, cart.z)
lon = np.arctan2(cart.y, cart.x)
lat = np.arctan2(cart.z, s)
return cls(lon=lon, lat=lat, logd=u.Dex(r), copy=False)
ld1 = LogDRepresentation(90.*u.deg, 0.*u.deg, 1.*u.dex(u.kpc))
ld2 = LogDRepresentation(lon=90.*u.deg, lat=0.*u.deg, logd=1.*u.dex(u.kpc))
assert np.all(ld1.lon == ld2.lon)
assert np.all(ld1.lat == ld2.lat)
assert np.all(ld1.logd == ld2.logd)
c = ld1.to_cartesian()
assert_allclose_quantity(c.xyz, [0., 10., 0.] * u.kpc, atol=1.*u.npc)
ld3 = LogDRepresentation.from_cartesian(c)
assert np.all(ld3.lon == ld2.lon)
assert np.all(ld3.lat == ld2.lat)
assert np.all(ld3.logd == ld2.logd)
s = ld1.represent_as(SphericalRepresentation)
assert_allclose_quantity(s.lon, ld1.lon)
assert_allclose_quantity(s.distance, 10.*u.kpc)
assert_allclose_quantity(s.lat, ld1.lat)
with pytest.raises(TypeError):
LogDRepresentation(0.*u.deg, 1.*u.deg)
with pytest.raises(TypeError):
LogDRepresentation(0.*u.deg, 1.*u.deg, 1.*u.dex(u.kpc), lon=1.*u.deg)
with pytest.raises(TypeError):
LogDRepresentation(0.*u.deg, 1.*u.deg, 1.*u.dex(u.kpc), True, False)
with pytest.raises(TypeError):
LogDRepresentation(0.*u.deg, 1.*u.deg, 1.*u.dex(u.kpc), foo='bar')
# if we define it a second time, even the qualnames are the same,
# so we raise
with pytest.raises(ValueError):
class LogDRepresentation(BaseRepresentation):
attr_classes = {'lon': Longitude,
'lat': Latitude,
'logr': u.Dex}
def test_duplicate_warning():
from astropy.coordinates.representation import DUPLICATE_REPRESENTATIONS
from astropy.coordinates.representation import REPRESENTATION_CLASSES
with pytest.warns(DuplicateRepresentationWarning):
class UnitSphericalRepresentation(BaseRepresentation):
attr_classes = {'lon': Longitude,
'lat': Latitude}
assert 'unitspherical' in DUPLICATE_REPRESENTATIONS
assert 'unitspherical' not in REPRESENTATION_CLASSES
assert 'astropy.coordinates.representation.UnitSphericalRepresentation' in REPRESENTATION_CLASSES
assert __name__ + '.test_duplicate_warning.<locals>.UnitSphericalRepresentation' in REPRESENTATION_CLASSES
class TestCartesianRepresentationWithDifferential:
def test_init_differential(self):
diff = CartesianDifferential(d_x=1 * u.km/u.s,
d_y=2 * u.km/u.s,
d_z=3 * u.km/u.s)
# Check that a single differential gets turned into a 1-item dict.
s1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc,
differentials=diff)
assert s1.x.unit is u.kpc
assert s1.y.unit is u.kpc
assert s1.z.unit is u.kpc
assert len(s1.differentials) == 1
assert s1.differentials['s'] is diff
# can also pass in an explicit dictionary
s1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc,
differentials={'s': diff})
assert len(s1.differentials) == 1
assert s1.differentials['s'] is diff
# using the wrong key will cause it to fail
with pytest.raises(ValueError):
s1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc,
differentials={'1 / s2': diff})
# make sure other kwargs are handled properly
s1 = CartesianRepresentation(x=1, y=2, z=3,
differentials=diff, copy=False, unit=u.kpc)
assert len(s1.differentials) == 1
assert s1.differentials['s'] is diff
with pytest.raises(TypeError): # invalid type passed to differentials
CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc,
differentials='garmonbozia')
# And that one can add it to another representation.
s1 = CartesianRepresentation(
CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc),
differentials=diff)
assert len(s1.differentials) == 1
assert s1.differentials['s'] is diff
# make sure differentials can't accept differentials
with pytest.raises(TypeError):
CartesianDifferential(d_x=1 * u.km/u.s, d_y=2 * u.km/u.s,
d_z=3 * u.km/u.s, differentials=diff)
def test_init_differential_compatible(self):
# TODO: more extensive checking of this
# should fail - representation and differential not compatible
diff = SphericalDifferential(d_lon=1 * u.mas/u.yr,
d_lat=2 * u.mas/u.yr,
d_distance=3 * u.km/u.s)
with pytest.raises(TypeError):
CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc,
differentials=diff)
# should succeed - representation and differential are compatible
diff = SphericalCosLatDifferential(d_lon_coslat=1 * u.mas/u.yr,
d_lat=2 * u.mas/u.yr,
d_distance=3 * u.km/u.s)
r1 = SphericalRepresentation(lon=15*u.deg, lat=21*u.deg,
distance=1*u.pc,
differentials=diff)
def test_init_differential_multiple_equivalent_keys(self):
d1 = CartesianDifferential(*[1, 2, 3] * u.km/u.s)
d2 = CartesianDifferential(*[4, 5, 6] * u.km/u.s)
# verify that the check against expected_unit validates against passing
# in two different but equivalent keys
with pytest.raises(ValueError):
r1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc,
differentials={'s': d1, 'yr': d2})
def test_init_array_broadcasting(self):
arr1 = np.arange(8).reshape(4, 2) * u.km/u.s
diff = CartesianDifferential(d_x=arr1, d_y=arr1, d_z=arr1)
# shapes aren't compatible
arr2 = np.arange(27).reshape(3, 9) * u.kpc
with pytest.raises(ValueError):
rep = CartesianRepresentation(x=arr2, y=arr2, z=arr2,
differentials=diff)
arr2 = np.arange(8).reshape(4, 2) * u.kpc
rep = CartesianRepresentation(x=arr2, y=arr2, z=arr2,
differentials=diff)
assert rep.x.unit is u.kpc
assert rep.y.unit is u.kpc
assert rep.z.unit is u.kpc
assert len(rep.differentials) == 1
assert rep.differentials['s'] is diff
assert rep.xyz.shape == rep.differentials['s'].d_xyz.shape
def test_reprobj(self):
# should succeed - representation and differential are compatible
diff = SphericalCosLatDifferential(d_lon_coslat=1 * u.mas/u.yr,
d_lat=2 * u.mas/u.yr,
d_distance=3 * u.km/u.s)
r1 = SphericalRepresentation(lon=15*u.deg, lat=21*u.deg,
distance=1*u.pc,
differentials=diff)
r2 = CartesianRepresentation.from_representation(r1)
assert r2.get_name() == 'cartesian'
assert not r2.differentials
r3 = SphericalRepresentation(r1)
assert r3.differentials
assert representation_equal(r3, r1)
def test_readonly(self):
s1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc)
with pytest.raises(AttributeError): # attribute is not settable
s1.differentials = 'thing'
def test_represent_as(self):
diff = CartesianDifferential(d_x=1 * u.km/u.s,
d_y=2 * u.km/u.s,
d_z=3 * u.km/u.s)
rep1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc,
differentials=diff)
# Only change the representation, drop the differential
new_rep = rep1.represent_as(SphericalRepresentation)
assert new_rep.get_name() == 'spherical'
assert not new_rep.differentials # dropped
# Pass in separate classes for representation, differential
new_rep = rep1.represent_as(SphericalRepresentation,
SphericalCosLatDifferential)
assert new_rep.get_name() == 'spherical'
assert new_rep.differentials['s'].get_name() == 'sphericalcoslat'
# Pass in a dictionary for the differential classes
new_rep = rep1.represent_as(SphericalRepresentation,
{'s': SphericalCosLatDifferential})
assert new_rep.get_name() == 'spherical'
assert new_rep.differentials['s'].get_name() == 'sphericalcoslat'
# make sure represent_as() passes through the differentials
for name in REPRESENTATION_CLASSES:
if name == 'radial':
# TODO: Converting a CartesianDifferential to a
# RadialDifferential fails, even on `main`
continue
elif name.endswith("geodetic"):
# TODO: Geodetic representations do not have differentials yet
continue
new_rep = rep1.represent_as(REPRESENTATION_CLASSES[name],
DIFFERENTIAL_CLASSES[name])
assert new_rep.get_name() == name
assert len(new_rep.differentials) == 1
assert new_rep.differentials['s'].get_name() == name
with pytest.raises(ValueError) as excinfo:
rep1.represent_as('name')
assert 'use frame object' in str(excinfo.value)
@pytest.mark.parametrize('sph_diff,usph_diff', [
(SphericalDifferential, UnitSphericalDifferential),
(SphericalCosLatDifferential, UnitSphericalCosLatDifferential)])
def test_represent_as_unit_spherical_with_diff(self, sph_diff, usph_diff):
"""Test that differential angles are correctly reduced."""
diff = CartesianDifferential(d_x=1 * u.km/u.s,
d_y=2 * u.km/u.s,
d_z=3 * u.km/u.s)
rep = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc,
differentials=diff)
sph = rep.represent_as(SphericalRepresentation, sph_diff)
usph = rep.represent_as(UnitSphericalRepresentation, usph_diff)
assert components_equal(usph, sph.represent_as(UnitSphericalRepresentation))
assert components_equal(usph.differentials['s'],
sph.differentials['s'].represent_as(usph_diff))
# Just to be sure components_equal and the represent_as work as advertised,
# a sanity check: d_lat is always defined and should be the same.
assert_array_equal(sph.differentials['s'].d_lat,
usph.differentials['s'].d_lat)
def test_getitem(self):
d = CartesianDifferential(d_x=np.arange(10) * u.m/u.s,
d_y=-np.arange(10) * u.m/u.s,
d_z=1. * u.m/u.s)
s = CartesianRepresentation(x=np.arange(10) * u.m,
y=-np.arange(10) * u.m,
z=3 * u.km,
differentials=d)
s_slc = s[2:8:2]
s_dif = s_slc.differentials['s']
assert_allclose_quantity(s_slc.x, [2, 4, 6] * u.m)
assert_allclose_quantity(s_slc.y, [-2, -4, -6] * u.m)
assert_allclose_quantity(s_slc.z, [3, 3, 3] * u.km)
assert_allclose_quantity(s_dif.d_x, [2, 4, 6] * u.m/u.s)
assert_allclose_quantity(s_dif.d_y, [-2, -4, -6] * u.m/u.s)
assert_allclose_quantity(s_dif.d_z, [1, 1, 1] * u.m/u.s)
def test_setitem(self):
d = CartesianDifferential(d_x=np.arange(5) * u.m/u.s,
d_y=-np.arange(5) * u.m/u.s,
d_z=1. * u.m/u.s)
s = CartesianRepresentation(x=np.arange(5) * u.m,
y=-np.arange(5) * u.m,
z=3 * u.km,
differentials=d)
s[:2] = s[2]
assert_array_equal(s.x, [2, 2, 2, 3, 4] * u.m)
assert_array_equal(s.y, [-2, -2, -2, -3, -4] * u.m)
assert_array_equal(s.z, [3, 3, 3, 3, 3] * u.km)
assert_array_equal(s.differentials['s'].d_x,
[2, 2, 2, 3, 4] * u.m/u.s)
assert_array_equal(s.differentials['s'].d_y,
[-2, -2, -2, -3, -4] * u.m/u.s)
assert_array_equal(s.differentials['s'].d_z,
[1, 1, 1, 1, 1] * u.m/u.s)
s2 = s.represent_as(SphericalRepresentation,
SphericalDifferential)
s[0] = s2[3]
assert_allclose_quantity(s.x, [3, 2, 2, 3, 4] * u.m)
assert_allclose_quantity(s.y, [-3, -2, -2, -3, -4] * u.m)
assert_allclose_quantity(s.z, [3, 3, 3, 3, 3] * u.km)
assert_allclose_quantity(s.differentials['s'].d_x,
[3, 2, 2, 3, 4] * u.m/u.s)
assert_allclose_quantity(s.differentials['s'].d_y,
[-3, -2, -2, -3, -4] * u.m/u.s)
assert_allclose_quantity(s.differentials['s'].d_z,
[1, 1, 1, 1, 1] * u.m/u.s)
s3 = CartesianRepresentation(s.xyz, differentials={
's': d,
's2': CartesianDifferential(np.ones((3, 5))*u.m/u.s**2)})
with pytest.raises(ValueError, match='same differentials'):
s[0] = s3[2]
s4 = SphericalRepresentation(0.*u.deg, 0.*u.deg, 1.*u.kpc,
differentials=RadialDifferential(
10*u.km/u.s))
with pytest.raises(ValueError, match='loss of information'):
s[0] = s4
def test_transform(self):
d1 = CartesianDifferential(d_x=[1, 2] * u.km/u.s,
d_y=[3, 4] * u.km/u.s,
d_z=[5, 6] * u.km/u.s)
r1 = CartesianRepresentation(x=[1, 2] * u.kpc,
y=[3, 4] * u.kpc,
z=[5, 6] * u.kpc,
differentials=d1)
r2 = r1.transform(matrices["general"])
d2 = r2.differentials['s']
assert_allclose_quantity(d2.d_x, [22., 28]*u.km/u.s)
assert_allclose_quantity(d2.d_y, [49, 64]*u.km/u.s)
assert_allclose_quantity(d2.d_z, [76, 100.]*u.km/u.s)
def test_with_differentials(self):
# make sure with_differential correctly creates a new copy with the same
# differential
cr = CartesianRepresentation([1, 2, 3]*u.kpc)
diff = CartesianDifferential([.1, .2, .3]*u.km/u.s)
cr2 = cr.with_differentials(diff)
assert cr.differentials != cr2.differentials
assert cr2.differentials['s'] is diff
# make sure it works even if a differential is present already
diff2 = CartesianDifferential([.1, .2, .3]*u.m/u.s)
cr3 = CartesianRepresentation([1, 2, 3]*u.kpc, differentials=diff)
cr4 = cr3.with_differentials(diff2)
assert cr4.differentials['s'] != cr3.differentials['s']
assert cr4.differentials['s'] == diff2
# also ensure a *scalar* differential will works
cr5 = cr.with_differentials(diff)
assert len(cr5.differentials) == 1
assert cr5.differentials['s'] == diff
# make sure we don't update the original representation's dict
d1 = CartesianDifferential(*np.random.random((3, 5)), unit=u.km/u.s)
d2 = CartesianDifferential(*np.random.random((3, 5)), unit=u.km/u.s**2)
r1 = CartesianRepresentation(*np.random.random((3, 5)), unit=u.pc,
differentials=d1)
r2 = r1.with_differentials(d2)
assert r1.differentials['s'] is r2.differentials['s']
assert 's2' not in r1.differentials
assert 's2' in r2.differentials
def test_repr_with_differentials():
diff = CartesianDifferential([.1, .2, .3]*u.km/u.s)
cr = CartesianRepresentation([1, 2, 3]*u.kpc, differentials=diff)
assert "has differentials w.r.t.: 's'" in repr(cr)
def test_to_cartesian():
"""
Test that to_cartesian drops the differential.
"""
sd = SphericalDifferential(d_lat=1*u.deg, d_lon=2*u.deg, d_distance=10*u.m)
sr = SphericalRepresentation(lat=1*u.deg, lon=2*u.deg, distance=10*u.m,
differentials=sd)
cart = sr.to_cartesian()
assert cart.get_name() == 'cartesian'
assert not cart.differentials
@pytest.fixture
def unitphysics():
"""
This fixture is used
"""
had_unit = False
if hasattr(PhysicsSphericalRepresentation, '_unit_representation'):
orig = PhysicsSphericalRepresentation._unit_representation
had_unit = True
class UnitPhysicsSphericalRepresentation(BaseRepresentation):
attr_classes = {'phi': Angle,
'theta': Angle}
def __init__(self, *args, copy=True, **kwargs):
super().__init__(*args, copy=copy, **kwargs)
# Wrap/validate phi/theta
if copy:
self._phi = self._phi.wrap_at(360 * u.deg)
else:
# necessary because the above version of `wrap_at` has to be a copy
self._phi.wrap_at(360 * u.deg, inplace=True)
if np.any(self._theta < 0.*u.deg) or np.any(self._theta > 180.*u.deg):
raise ValueError('Inclination angle(s) must be within '
'0 deg <= angle <= 180 deg, '
'got {}'.format(self._theta.to(u.degree)))
@property
def phi(self):
return self._phi
@property
def theta(self):
return self._theta
def unit_vectors(self):
sinphi, cosphi = np.sin(self.phi), np.cos(self.phi)
sintheta, costheta = np.sin(self.theta), np.cos(self.theta)
return {
'phi': CartesianRepresentation(-sinphi, cosphi, 0., copy=False),
'theta': CartesianRepresentation(costheta*cosphi,
costheta*sinphi,
-sintheta, copy=False)}
def scale_factors(self):
sintheta = np.sin(self.theta)
l = np.broadcast_to(1.*u.one, self.shape, subok=True)
return {'phi', sintheta,
'theta', l}
def to_cartesian(self):
x = np.sin(self.theta) * np.cos(self.phi)
y = np.sin(self.theta) * np.sin(self.phi)
z = np.cos(self.theta)
return CartesianRepresentation(x=x, y=y, z=z, copy=False)
@classmethod
def from_cartesian(cls, cart):
"""
Converts 3D rectangular cartesian coordinates to spherical polar
coordinates.
"""
s = np.hypot(cart.x, cart.y)
phi = np.arctan2(cart.y, cart.x)
theta = np.arctan2(s, cart.z)
return cls(phi=phi, theta=theta, copy=False)
def norm(self):
return u.Quantity(np.ones(self.shape), u.dimensionless_unscaled,
copy=False)
PhysicsSphericalRepresentation._unit_representation = UnitPhysicsSphericalRepresentation
yield UnitPhysicsSphericalRepresentation
if had_unit:
PhysicsSphericalRepresentation._unit_representation = orig
else:
del PhysicsSphericalRepresentation._unit_representation
# remove from the module-level representations, if present
REPRESENTATION_CLASSES.pop(UnitPhysicsSphericalRepresentation.get_name(), None)
def test_unitphysics(unitphysics):
obj = unitphysics(phi=0*u.deg, theta=10*u.deg)
objkw = unitphysics(phi=0*u.deg, theta=10*u.deg)
assert objkw.phi == obj.phi
assert objkw.theta == obj.theta
asphys = obj.represent_as(PhysicsSphericalRepresentation)
assert asphys.phi == obj.phi
assert_allclose(asphys.theta, obj.theta)
assert_allclose_quantity(asphys.r, 1*u.dimensionless_unscaled)
assph = obj.represent_as(SphericalRepresentation)
assert assph.lon == obj.phi
assert_allclose_quantity(assph.lat, 80*u.deg)
assert_allclose_quantity(assph.distance, 1*u.dimensionless_unscaled)
with pytest.raises(TypeError, match='got multiple values'):
unitphysics(1*u.deg, 2*u.deg, theta=10)
with pytest.raises(TypeError, match='unexpected keyword.*parrot'):
unitphysics(1*u.deg, 2*u.deg, parrot=10)
def test_distance_warning(recwarn):
SphericalRepresentation(1*u.deg, 2*u.deg, 1*u.kpc)
with pytest.raises(ValueError) as excinfo:
SphericalRepresentation(1*u.deg, 2*u.deg, -1*u.kpc)
assert 'Distance must be >= 0' in str(excinfo.value)
# second check is because the "originating" ValueError says the above,
# while the representation one includes the below
assert 'you must explicitly pass' in str(excinfo.value)
def test_dtype_preservation_in_indexing():
# Regression test for issue #8614 (fixed in #8876)
xyz = np.array([[1, 0, 0], [0.9, 0.1, 0]], dtype='f4')
cr = CartesianRepresentation(xyz, xyz_axis=-1, unit="km")
assert cr.xyz.dtype == xyz.dtype
cr0 = cr[0]
# This used to fail.
assert cr0.xyz.dtype == xyz.dtype
class TestInfo:
def setup_class(cls):
cls.rep = SphericalRepresentation([0, 1]*u.deg, [2, 3]*u.deg,
10*u.pc)
cls.diff = SphericalDifferential([10, 20]*u.mas/u.yr,
[30, 40]*u.mas/u.yr,
[50, 60]*u.km/u.s)
cls.rep_w_diff = SphericalRepresentation(cls.rep,
differentials=cls.diff)
def test_info_unit(self):
assert self.rep.info.unit == 'deg, deg, pc'
assert self.diff.info.unit == 'mas / yr, mas / yr, km / s'
assert self.rep_w_diff.info.unit == 'deg, deg, pc'
@pytest.mark.parametrize('item', ['rep', 'diff', 'rep_w_diff'])
def test_roundtrip(self, item):
rep_or_diff = getattr(self, item)
as_dict = rep_or_diff.info._represent_as_dict()
new = rep_or_diff.__class__.info._construct_from_dict(as_dict)
assert np.all(representation_equal(new, rep_or_diff))
@pytest.mark.parametrize('cls',
[SphericalDifferential,
SphericalCosLatDifferential,
CylindricalDifferential,
PhysicsSphericalDifferential,
UnitSphericalDifferential,
UnitSphericalCosLatDifferential])
def test_differential_norm_noncartesian(cls):
# The norm of a non-Cartesian differential without specifying `base` should error
rep = cls(0, 0, 0)
with pytest.raises(ValueError, match=r"`base` must be provided .* " + cls.__name__):
rep.norm()
def test_differential_norm_radial():
# Unlike most non-Cartesian differentials, the norm of a radial differential does not require `base`
rep = RadialDifferential(1*u.km/u.s)
assert_allclose_quantity(rep.norm(), 1*u.km/u.s)
|
84b642579cb16d92dc8501d9f798ff99a25cf11f904a4390ab011dfa7aace7be | import pytest
from astropy.tests.helper import assert_quantity_allclose
from astropy.units import allclose as quantity_allclose
from astropy import units as u
from astropy.coordinates import Longitude, Latitude, EarthLocation
from astropy.coordinates.sites import get_builtin_sites, get_downloaded_sites, SiteRegistry
def test_builtin_sites():
reg = get_builtin_sites()
greenwich = reg['greenwich']
lon, lat, el = greenwich.to_geodetic()
assert_quantity_allclose(lon, Longitude('0:0:0', unit=u.deg),
atol=10*u.arcsec)
assert_quantity_allclose(lat, Latitude('51:28:40', unit=u.deg),
atol=1*u.arcsec)
assert_quantity_allclose(el, 46*u.m, atol=1*u.m)
names = reg.names
assert 'greenwich' in names
assert 'example_site' in names
with pytest.raises(KeyError) as exc:
reg['nonexistent site']
assert exc.value.args[0] == "Site 'nonexistent site' not in database. Use the 'names' attribute to see available sites."
@pytest.mark.remote_data(source='astropy')
def test_online_sites():
reg = get_downloaded_sites()
keck = reg['keck']
lon, lat, el = keck.to_geodetic()
assert_quantity_allclose(lon, -Longitude('155:28.7', unit=u.deg),
atol=0.001*u.deg)
assert_quantity_allclose(lat, Latitude('19:49.7', unit=u.deg),
atol=0.001*u.deg)
assert_quantity_allclose(el, 4160*u.m, atol=1*u.m)
names = reg.names
assert 'keck' in names
assert 'ctio' in names
# The JSON file contains `name` and `aliases` for each site, and astropy
# should use names from both, but not empty strings [#12721].
assert '' not in names
assert 'Royal Observatory Greenwich' in names
with pytest.raises(KeyError) as exc:
reg['nonexistent site']
assert exc.value.args[0] == "Site 'nonexistent site' not in database. Use the 'names' attribute to see available sites."
with pytest.raises(KeyError) as exc:
reg['kec']
assert exc.value.args[0] == "Site 'kec' not in database. Use the 'names' attribute to see available sites. Did you mean one of: 'keck'?'"
@pytest.mark.remote_data(source='astropy')
# this will *try* the online so we have to make it remote_data, even though it
# could fall back on the non-remote version
def test_EarthLocation_basic():
greenwichel = EarthLocation.of_site('greenwich')
lon, lat, el = greenwichel.to_geodetic()
assert_quantity_allclose(lon, Longitude('0:0:0', unit=u.deg),
atol=10*u.arcsec)
assert_quantity_allclose(lat, Latitude('51:28:40', unit=u.deg),
atol=1*u.arcsec)
assert_quantity_allclose(el, 46*u.m, atol=1*u.m)
names = EarthLocation.get_site_names()
assert 'greenwich' in names
assert 'example_site' in names
with pytest.raises(KeyError) as exc:
EarthLocation.of_site('nonexistent site')
assert exc.value.args[0] == "Site 'nonexistent site' not in database. Use EarthLocation.get_site_names to see available sites."
def test_EarthLocation_state_offline():
EarthLocation._site_registry = None
EarthLocation._get_site_registry(force_builtin=True)
assert EarthLocation._site_registry is not None
oldreg = EarthLocation._site_registry
newreg = EarthLocation._get_site_registry()
assert oldreg is newreg
newreg = EarthLocation._get_site_registry(force_builtin=True)
assert oldreg is not newreg
@pytest.mark.remote_data(source='astropy')
def test_EarthLocation_state_online():
EarthLocation._site_registry = None
EarthLocation._get_site_registry(force_download=True)
assert EarthLocation._site_registry is not None
oldreg = EarthLocation._site_registry
newreg = EarthLocation._get_site_registry()
assert oldreg is newreg
newreg = EarthLocation._get_site_registry(force_download=True)
assert oldreg is not newreg
def test_registry():
reg = SiteRegistry()
assert len(reg.names) == 0
names = ['sitea', 'site A']
loc = EarthLocation.from_geodetic(lat=1*u.deg, lon=2*u.deg, height=3*u.km)
reg.add_site(names, loc)
assert len(reg.names) == 2
loc1 = reg['SIteA']
assert loc1 is loc
loc2 = reg['sIte a']
assert loc2 is loc
def test_non_EarthLocation():
"""
A regression test for a typo bug pointed out at the bottom of
https://github.com/astropy/astropy/pull/4042
"""
class EarthLocation2(EarthLocation):
pass
# This lets keeps us from needing to do remote_data
# note that this does *not* mess up the registry for EarthLocation because
# registry is cached on a per-class basis
EarthLocation2._get_site_registry(force_builtin=True)
el2 = EarthLocation2.of_site('greenwich')
assert type(el2) is EarthLocation2
assert el2.info.name == 'Royal Observatory Greenwich'
def check_builtin_matches_remote(download_url=True):
"""
This function checks that the builtin sites registry is consistent with the
remote registry (or a registry at some other location).
Note that current this is *not* run by the testing suite (because it
doesn't start with "test", and is instead meant to be used as a check
before merging changes in astropy-data)
"""
builtin_registry = EarthLocation._get_site_registry(force_builtin=True)
dl_registry = EarthLocation._get_site_registry(force_download=download_url)
in_dl = {}
matches = {}
for name in builtin_registry.names:
in_dl[name] = name in dl_registry
if in_dl[name]:
matches[name] = quantity_allclose(builtin_registry[name].geocentric, dl_registry[name].geocentric)
else:
matches[name] = False
if not all(matches.values()):
# this makes sure we actually see which don't match
print("In builtin registry but not in download:")
for name in in_dl:
if not in_dl[name]:
print(' ', name)
print("In both but not the same value:")
for name in matches:
if not matches[name] and in_dl[name]:
print(' ', name, 'builtin:', builtin_registry[name], 'download:', dl_registry[name])
assert False, "Builtin and download registry aren't consistent - failures printed to stdout"
def test_meta_present():
reg = get_builtin_sites()
greenwich = reg['greenwich']
assert greenwich.info.meta['source'] == ('Ordnance Survey via '
'http://gpsinformation.net/main/greenwich.htm and UNESCO')
|
498a945061db3565b103afa3abb32c1ce347d223f39cc11e8d3f1c943393c536 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for miscellaneous functionality in the `funcs` module
"""
import pytest
import numpy as np
from numpy import testing as npt
from astropy import units as u
from astropy.time import Time
def test_sun():
"""
Test that `get_sun` works and it behaves roughly as it should (in GCRS)
"""
from astropy.coordinates.funcs import get_sun
northern_summer_solstice = Time('2010-6-21')
northern_winter_solstice = Time('2010-12-21')
equinox_1 = Time('2010-3-21')
equinox_2 = Time('2010-9-21')
gcrs1 = get_sun(equinox_1)
assert np.abs(gcrs1.dec.deg) < 1
gcrs2 = get_sun(Time([northern_summer_solstice, equinox_2, northern_winter_solstice]))
assert np.all(np.abs(gcrs2.dec - [23.5, 0, -23.5]*u.deg) < 1*u.deg)
def test_constellations(recwarn):
from astropy.coordinates import ICRS, FK5, SkyCoord
from astropy.coordinates.funcs import get_constellation
inuma = ICRS(9*u.hour, 65*u.deg)
n_prewarn = len(recwarn)
res = get_constellation(inuma)
res_short = get_constellation(inuma, short_name=True)
assert len(recwarn) == n_prewarn # neither version should not make warnings
assert res == 'Ursa Major'
assert res_short == 'UMa'
assert isinstance(res, str) or getattr(res, 'shape', None) == tuple()
# these are taken from the ReadMe for Roman 1987
ras = [9, 23.5, 5.12, 9.4555, 12.8888, 15.6687, 19, 6.2222]
decs = [65, -20, 9.12, -19.9, 22, -12.1234, -40, -81.1234]
shortnames = ['UMa', 'Aqr', 'Ori', 'Hya', 'Com', 'Lib', 'CrA', 'Men']
testcoos = FK5(ras*u.hour, decs*u.deg, equinox='B1950')
npt.assert_equal(get_constellation(testcoos, short_name=True), shortnames)
# test on a SkyCoord, *and* test Boötes, which is special in that it has a
# non-ASCII character
bootest = SkyCoord(15*u.hour, 30*u.deg, frame='icrs')
boores = get_constellation(bootest)
assert boores == 'Boötes'
assert isinstance(boores, str) or getattr(boores, 'shape', None) == tuple()
@pytest.mark.xfail
def test_constellation_edge_cases():
from astropy.coordinates import FK5
from astropy.coordinates.funcs import get_constellation
# Test edge cases close to borders, using B1875.0 coordinates
# Look for HMS / DMS roundoff-to-decimal issues from Roman (1987) data,
# and misuse of PrecessedGeocentric, as documented in
# https://github.com/astropy/astropy/issues/9855
# Define eight test points.
# The first four cross the boundary at 06h14m30 == 6.2416666666666... hours
# with Monoceros on the west side of Orion at Dec +3.0.
ras = [6.24100, 6.24160, 6.24166, 6.24171]
# aka ['6h14m27.6s' '6h14m29.76s' '6h14m29.976s' '6h14m30.156s']
decs = [3.0, 3.0, 3.0, 3.0]
# Correct constellations for given RA/Dec coordinates
shortnames = ['Ori', 'Ori', 'Ori', 'Mon']
# The second four sample northward along RA 22 hours, crossing the boundary
# at 86° 10' == 86.1666... degrees between Cepheus and Ursa Minor
decs += [86.16, 86.1666, 86.16668, 86.1668]
ras += [22.0, 22.0, 22.0, 22.0]
shortnames += ['Cep', 'Cep', 'Umi', 'Umi']
testcoos = FK5(ras*u.hour, decs*u.deg, equinox='B1875')
npt.assert_equal(get_constellation(testcoos, short_name=True), shortnames,
"get_constellation() error: misusing Roman approximations, vs IAU boundaries from Delporte?")
# TODO: When that's fixed, add other tests with coords that are in different constellations
# depending on equinox
def test_concatenate():
from astropy.coordinates import FK5, SkyCoord, ICRS
from astropy.coordinates.funcs import concatenate
# Just positions
fk5 = FK5(1*u.deg, 2*u.deg)
sc = SkyCoord(3*u.deg, 4*u.deg, frame='fk5')
res = concatenate([fk5, sc])
np.testing.assert_allclose(res.ra, [1, 3]*u.deg)
np.testing.assert_allclose(res.dec, [2, 4]*u.deg)
with pytest.raises(TypeError):
concatenate(fk5)
with pytest.raises(TypeError):
concatenate(1*u.deg)
# positions and velocities
fr = ICRS(ra=10*u.deg, dec=11.*u.deg,
pm_ra_cosdec=12*u.mas/u.yr,
pm_dec=13*u.mas/u.yr)
sc = SkyCoord(ra=20*u.deg, dec=21.*u.deg,
pm_ra_cosdec=22*u.mas/u.yr,
pm_dec=23*u.mas/u.yr)
res = concatenate([fr, sc])
with pytest.raises(ValueError):
concatenate([fr, fk5])
fr2 = ICRS(ra=10*u.deg, dec=11.*u.deg)
with pytest.raises(ValueError):
concatenate([fr, fr2])
def test_concatenate_representations():
from astropy.coordinates.funcs import concatenate_representations
from astropy.coordinates import representation as r
reps = [r.CartesianRepresentation([1, 2, 3.]*u.kpc),
r.SphericalRepresentation(lon=1*u.deg, lat=2.*u.deg,
distance=10*u.pc),
r.UnitSphericalRepresentation(lon=1*u.deg, lat=2.*u.deg),
r.CartesianRepresentation(np.ones((3, 100)) * u.kpc),
r.CartesianRepresentation(np.ones((3, 16, 8)) * u.kpc)]
reps.append(reps[0].with_differentials(
r.CartesianDifferential([1, 2, 3.] * u.km/u.s)))
reps.append(reps[1].with_differentials(
r.SphericalCosLatDifferential(1*u.mas/u.yr, 2*u.mas/u.yr, 3*u.km/u.s)))
reps.append(reps[2].with_differentials(
r.SphericalCosLatDifferential(1*u.mas/u.yr, 2*u.mas/u.yr, 3*u.km/u.s)))
reps.append(reps[2].with_differentials(
r.UnitSphericalCosLatDifferential(1*u.mas/u.yr, 2*u.mas/u.yr)))
reps.append(reps[2].with_differentials(
{'s': r.RadialDifferential(1*u.km/u.s)}))
reps.append(reps[3].with_differentials(
r.CartesianDifferential(*np.ones((3, 100)) * u.km/u.s)))
reps.append(reps[4].with_differentials(
r.CartesianDifferential(*np.ones((3, 16, 8)) * u.km/u.s)))
# Test that combining all of the above with itself succeeds
for rep in reps:
if not rep.shape:
expected_shape = (2, )
else:
expected_shape = (2 * rep.shape[0], ) + rep.shape[1:]
tmp = concatenate_representations((rep, rep))
assert tmp.shape == expected_shape
if 's' in rep.differentials:
assert tmp.differentials['s'].shape == expected_shape
# Try combining 4, just for something different
for rep in reps:
if not rep.shape:
expected_shape = (4, )
else:
expected_shape = (4 * rep.shape[0], ) + rep.shape[1:]
tmp = concatenate_representations((rep, rep, rep, rep))
assert tmp.shape == expected_shape
if 's' in rep.differentials:
assert tmp.differentials['s'].shape == expected_shape
# Test that combining pairs fails
with pytest.raises(TypeError):
concatenate_representations((reps[0], reps[1]))
with pytest.raises(ValueError):
concatenate_representations((reps[0], reps[5]))
# Check that passing in a single object fails
with pytest.raises(TypeError):
concatenate_representations(reps[0])
def test_concatenate_representations_different_units():
from astropy.coordinates.funcs import concatenate_representations
from astropy.coordinates import representation as r
reps = [r.CartesianRepresentation([1, 2, 3.]*u.pc),
r.CartesianRepresentation([1, 2, 3.]*u.kpc)]
concat = concatenate_representations(reps)
assert concat.shape == (2,)
assert np.all(concat.xyz ==
([[1., 2., 3.], [1000., 2000., 3000.]] * u.pc).T)
|
4e43078833cbd52530dfb1a1a0bda5c8052d9cf850e2a23da285568e253f9877 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for the SkyCoord class. Note that there are also SkyCoord tests in
test_api_ape5.py
"""
import copy
from copy import deepcopy
import pytest
import numpy as np
import numpy.testing as npt
from erfa import ErfaWarning
from astropy import units as u
from astropy.tests.helper import assert_quantity_allclose as assert_allclose
from astropy.coordinates.representation import REPRESENTATION_CLASSES, DUPLICATE_REPRESENTATIONS
from astropy.coordinates import (ICRS, FK4, FK5, Galactic, GCRS, SkyCoord, Angle,
SphericalRepresentation, CartesianRepresentation,
UnitSphericalRepresentation, AltAz,
BaseCoordinateFrame, Attribute,
frame_transform_graph, RepresentationMapping)
from astropy.coordinates import Latitude, EarthLocation
from astropy.coordinates.transformations import FunctionTransform
from astropy.time import Time
from astropy.utils import minversion, isiterable
from astropy.units import allclose as quantity_allclose
from astropy.io import fits
from astropy.wcs import WCS
from astropy.coordinates.tests.helper import skycoord_equal
from astropy.utils.compat.optional_deps import HAS_SCIPY # noqa
RA = 1.0 * u.deg
DEC = 2.0 * u.deg
C_ICRS = ICRS(RA, DEC)
C_FK5 = C_ICRS.transform_to(FK5())
J2001 = Time('J2001')
def allclose(a, b, rtol=0.0, atol=None):
if atol is None:
atol = 1.e-8 * getattr(a, 'unit', 1.)
return quantity_allclose(a, b, rtol, atol)
def setup_function(func):
func.REPRESENTATION_CLASSES_ORIG = deepcopy(REPRESENTATION_CLASSES)
func.DUPLICATE_REPRESENTATIONS_ORIG = deepcopy(DUPLICATE_REPRESENTATIONS)
def teardown_function(func):
REPRESENTATION_CLASSES.clear()
REPRESENTATION_CLASSES.update(func.REPRESENTATION_CLASSES_ORIG)
DUPLICATE_REPRESENTATIONS.clear()
DUPLICATE_REPRESENTATIONS.update(func.DUPLICATE_REPRESENTATIONS_ORIG)
def test_is_transformable_to_str_input():
"""Test method ``is_transformable_to`` with string input.
The only difference from the frame method of the same name is that
strings are allowed. As the frame tests cover ``is_transform_to``, here
we only test the added string option.
"""
# make example SkyCoord
c = SkyCoord(90*u.deg, -11*u.deg)
# iterate through some frames, checking consistency
names = frame_transform_graph.get_names()
for name in names:
frame = frame_transform_graph.lookup_name(name)()
assert c.is_transformable_to(name) == c.is_transformable_to(frame)
def test_transform_to():
for frame in (FK5(), FK5(equinox=Time('J1975.0')),
FK4(), FK4(equinox=Time('J1975.0')),
SkyCoord(RA, DEC, frame='fk4', equinox='J1980')):
c_frame = C_ICRS.transform_to(frame)
s_icrs = SkyCoord(RA, DEC, frame='icrs')
s_frame = s_icrs.transform_to(frame)
assert allclose(c_frame.ra, s_frame.ra)
assert allclose(c_frame.dec, s_frame.dec)
assert allclose(c_frame.distance, s_frame.distance)
# set up for parametrized test
rt_sets = []
rt_frames = [ICRS, FK4, FK5, Galactic]
for rt_frame0 in rt_frames:
for rt_frame1 in rt_frames:
for equinox0 in (None, 'J1975.0'):
for obstime0 in (None, 'J1980.0'):
for equinox1 in (None, 'J1975.0'):
for obstime1 in (None, 'J1980.0'):
rt_sets.append((rt_frame0, rt_frame1,
equinox0, equinox1,
obstime0, obstime1))
rt_args = ('frame0', 'frame1', 'equinox0', 'equinox1', 'obstime0', 'obstime1')
@pytest.mark.parametrize(rt_args, rt_sets)
def test_round_tripping(frame0, frame1, equinox0, equinox1, obstime0, obstime1):
"""
Test round tripping out and back using transform_to in every combination.
"""
attrs0 = {'equinox': equinox0, 'obstime': obstime0}
attrs1 = {'equinox': equinox1, 'obstime': obstime1}
# Remove None values
attrs0 = {k: v for k, v in attrs0.items() if v is not None}
attrs1 = {k: v for k, v in attrs1.items() if v is not None}
# Go out and back
sc = SkyCoord(RA, DEC, frame=frame0, **attrs0)
# Keep only frame attributes for frame1
attrs1 = {attr: val for attr, val in attrs1.items()
if attr in frame1.get_frame_attr_names()}
sc2 = sc.transform_to(frame1(**attrs1))
# When coming back only keep frame0 attributes for transform_to
attrs0 = {attr: val for attr, val in attrs0.items()
if attr in frame0.get_frame_attr_names()}
# also, if any are None, fill in with defaults
for attrnm in frame0.get_frame_attr_names():
if attrs0.get(attrnm, None) is None:
if attrnm == 'obstime' and frame0.get_frame_attr_names()[attrnm] is None:
if 'equinox' in attrs0:
attrs0[attrnm] = attrs0['equinox']
else:
attrs0[attrnm] = frame0.get_frame_attr_names()[attrnm]
sc_rt = sc2.transform_to(frame0(**attrs0))
if frame0 is Galactic:
assert allclose(sc.l, sc_rt.l)
assert allclose(sc.b, sc_rt.b)
else:
assert allclose(sc.ra, sc_rt.ra)
assert allclose(sc.dec, sc_rt.dec)
if equinox0:
assert type(sc.equinox) is Time and sc.equinox == sc_rt.equinox
if obstime0:
assert type(sc.obstime) is Time and sc.obstime == sc_rt.obstime
def test_coord_init_string():
"""
Spherical or Cartesian representation input coordinates.
"""
sc = SkyCoord('1d 2d')
assert allclose(sc.ra, 1 * u.deg)
assert allclose(sc.dec, 2 * u.deg)
sc = SkyCoord('1d', '2d')
assert allclose(sc.ra, 1 * u.deg)
assert allclose(sc.dec, 2 * u.deg)
sc = SkyCoord('1°2′3″', '2°3′4″')
assert allclose(sc.ra, Angle('1°2′3″'))
assert allclose(sc.dec, Angle('2°3′4″'))
sc = SkyCoord('1°2′3″ 2°3′4″')
assert allclose(sc.ra, Angle('1°2′3″'))
assert allclose(sc.dec, Angle('2°3′4″'))
with pytest.raises(ValueError) as err:
SkyCoord('1d 2d 3d')
assert "Cannot parse first argument data" in str(err.value)
sc1 = SkyCoord('8 00 00 +5 00 00.0', unit=(u.hour, u.deg), frame='icrs')
assert isinstance(sc1, SkyCoord)
assert allclose(sc1.ra, Angle(120 * u.deg))
assert allclose(sc1.dec, Angle(5 * u.deg))
sc11 = SkyCoord('8h00m00s+5d00m00.0s', unit=(u.hour, u.deg), frame='icrs')
assert isinstance(sc11, SkyCoord)
assert allclose(sc1.ra, Angle(120 * u.deg))
assert allclose(sc1.dec, Angle(5 * u.deg))
sc2 = SkyCoord('8 00 -5 00 00.0', unit=(u.hour, u.deg), frame='icrs')
assert isinstance(sc2, SkyCoord)
assert allclose(sc2.ra, Angle(120 * u.deg))
assert allclose(sc2.dec, Angle(-5 * u.deg))
sc3 = SkyCoord('8 00 -5 00.6', unit=(u.hour, u.deg), frame='icrs')
assert isinstance(sc3, SkyCoord)
assert allclose(sc3.ra, Angle(120 * u.deg))
assert allclose(sc3.dec, Angle(-5.01 * u.deg))
sc4 = SkyCoord('J080000.00-050036.00', unit=(u.hour, u.deg), frame='icrs')
assert isinstance(sc4, SkyCoord)
assert allclose(sc4.ra, Angle(120 * u.deg))
assert allclose(sc4.dec, Angle(-5.01 * u.deg))
sc41 = SkyCoord('J080000+050036', unit=(u.hour, u.deg), frame='icrs')
assert isinstance(sc41, SkyCoord)
assert allclose(sc41.ra, Angle(120 * u.deg))
assert allclose(sc41.dec, Angle(+5.01 * u.deg))
sc5 = SkyCoord('8h00.6m -5d00.6m', unit=(u.hour, u.deg), frame='icrs')
assert isinstance(sc5, SkyCoord)
assert allclose(sc5.ra, Angle(120.15 * u.deg))
assert allclose(sc5.dec, Angle(-5.01 * u.deg))
sc6 = SkyCoord('8h00.6m -5d00.6m', unit=(u.hour, u.deg), frame='fk4')
assert isinstance(sc6, SkyCoord)
assert allclose(sc6.ra, Angle(120.15 * u.deg))
assert allclose(sc6.dec, Angle(-5.01 * u.deg))
sc61 = SkyCoord('8h00.6m-5d00.6m', unit=(u.hour, u.deg), frame='fk4')
assert isinstance(sc61, SkyCoord)
assert allclose(sc6.ra, Angle(120.15 * u.deg))
assert allclose(sc6.dec, Angle(-5.01 * u.deg))
sc61 = SkyCoord('8h00.6-5d00.6', unit=(u.hour, u.deg), frame='fk4')
assert isinstance(sc61, SkyCoord)
assert allclose(sc6.ra, Angle(120.15 * u.deg))
assert allclose(sc6.dec, Angle(-5.01 * u.deg))
sc7 = SkyCoord("J1874221.60+122421.6", unit=u.deg)
assert isinstance(sc7, SkyCoord)
assert allclose(sc7.ra, Angle(187.706 * u.deg))
assert allclose(sc7.dec, Angle(12.406 * u.deg))
with pytest.raises(ValueError):
SkyCoord('8 00 -5 00.6', unit=(u.deg, u.deg), frame='galactic')
def test_coord_init_unit():
"""
Test variations of the unit keyword.
"""
for unit in ('deg', 'deg,deg', ' deg , deg ', u.deg, (u.deg, u.deg),
np.array(['deg', 'deg'])):
sc = SkyCoord(1, 2, unit=unit)
assert allclose(sc.ra, Angle(1 * u.deg))
assert allclose(sc.dec, Angle(2 * u.deg))
for unit in ('hourangle', 'hourangle,hourangle', ' hourangle , hourangle ',
u.hourangle, [u.hourangle, u.hourangle]):
sc = SkyCoord(1, 2, unit=unit)
assert allclose(sc.ra, Angle(15 * u.deg))
assert allclose(sc.dec, Angle(30 * u.deg))
for unit in ('hourangle,deg', (u.hourangle, u.deg)):
sc = SkyCoord(1, 2, unit=unit)
assert allclose(sc.ra, Angle(15 * u.deg))
assert allclose(sc.dec, Angle(2 * u.deg))
for unit in ('deg,deg,deg,deg', [u.deg, u.deg, u.deg, u.deg], None):
with pytest.raises(ValueError) as err:
SkyCoord(1, 2, unit=unit)
assert 'Unit keyword must have one to three unit values' in str(err.value)
for unit in ('m', (u.m, u.deg), ''):
with pytest.raises(u.UnitsError) as err:
SkyCoord(1, 2, unit=unit)
def test_coord_init_list():
"""
Spherical or Cartesian representation input coordinates.
"""
sc = SkyCoord([('1d', '2d'),
(1 * u.deg, 2 * u.deg),
'1d 2d',
('1°', '2°'),
'1° 2°'], unit='deg')
assert allclose(sc.ra, Angle('1d'))
assert allclose(sc.dec, Angle('2d'))
with pytest.raises(ValueError) as err:
SkyCoord(['1d 2d 3d'])
assert "Cannot parse first argument data" in str(err.value)
with pytest.raises(ValueError) as err:
SkyCoord([('1d', '2d', '3d')])
assert "Cannot parse first argument data" in str(err.value)
sc = SkyCoord([1 * u.deg, 1 * u.deg], [2 * u.deg, 2 * u.deg])
assert allclose(sc.ra, Angle('1d'))
assert allclose(sc.dec, Angle('2d'))
with pytest.raises(ValueError) as err:
SkyCoord([1 * u.deg, 2 * u.deg]) # this list is taken as RA w/ missing dec
assert "One or more elements of input sequence does not have a length" in str(err.value)
def test_coord_init_array():
"""
Input in the form of a list array or numpy array
"""
for a in (['1 2', '3 4'],
[['1', '2'], ['3', '4']],
[[1, 2], [3, 4]]):
sc = SkyCoord(a, unit='deg')
assert allclose(sc.ra - [1, 3] * u.deg, 0 * u.deg)
assert allclose(sc.dec - [2, 4] * u.deg, 0 * u.deg)
sc = SkyCoord(np.array(a), unit='deg')
assert allclose(sc.ra - [1, 3] * u.deg, 0 * u.deg)
assert allclose(sc.dec - [2, 4] * u.deg, 0 * u.deg)
def test_coord_init_representation():
"""
Spherical or Cartesian representation input coordinates.
"""
coord = SphericalRepresentation(lon=8 * u.deg, lat=5 * u.deg, distance=1 * u.kpc)
sc = SkyCoord(coord, frame='icrs')
assert allclose(sc.ra, coord.lon)
assert allclose(sc.dec, coord.lat)
assert allclose(sc.distance, coord.distance)
with pytest.raises(ValueError) as err:
SkyCoord(coord, frame='icrs', ra='1d')
assert "conflicts with keyword argument 'ra'" in str(err.value)
coord = CartesianRepresentation(1 * u.one, 2 * u.one, 3 * u.one)
sc = SkyCoord(coord, frame='icrs')
sc_cart = sc.represent_as(CartesianRepresentation)
assert allclose(sc_cart.x, 1.0)
assert allclose(sc_cart.y, 2.0)
assert allclose(sc_cart.z, 3.0)
def test_frame_init():
"""
Different ways of providing the frame.
"""
sc = SkyCoord(RA, DEC, frame='icrs')
assert sc.frame.name == 'icrs'
sc = SkyCoord(RA, DEC, frame=ICRS)
assert sc.frame.name == 'icrs'
sc = SkyCoord(sc)
assert sc.frame.name == 'icrs'
sc = SkyCoord(C_ICRS)
assert sc.frame.name == 'icrs'
SkyCoord(C_ICRS, frame='icrs')
assert sc.frame.name == 'icrs'
with pytest.raises(ValueError) as err:
SkyCoord(C_ICRS, frame='galactic')
assert 'Cannot override frame=' in str(err.value)
def test_equal():
obstime = 'B1955'
sc1 = SkyCoord([1, 2]*u.deg, [3, 4]*u.deg, obstime=obstime)
sc2 = SkyCoord([1, 20]*u.deg, [3, 4]*u.deg, obstime=obstime)
# Compare arrays and scalars
eq = sc1 == sc2
ne = sc1 != sc2
assert np.all(eq == [True, False])
assert np.all(ne == [False, True])
assert (sc1[0] == sc2[0]) == True # noqa (numpy True not Python True)
assert (sc1[0] != sc2[0]) == False # noqa
# Broadcasting
eq = sc1[0] == sc2
ne = sc1[0] != sc2
assert np.all(eq == [True, False])
assert np.all(ne == [False, True])
# With diff only in velocity
sc1 = SkyCoord([1, 2]*u.deg, [3, 4]*u.deg, radial_velocity=[1, 2]*u.km/u.s)
sc2 = SkyCoord([1, 2]*u.deg, [3, 4]*u.deg, radial_velocity=[1, 20]*u.km/u.s)
eq = sc1 == sc2
ne = sc1 != sc2
assert np.all(eq == [True, False])
assert np.all(ne == [False, True])
assert (sc1[0] == sc2[0]) == True # noqa
assert (sc1[0] != sc2[0]) == False # noqa
def test_equal_different_type():
sc1 = SkyCoord([1, 2]*u.deg, [3, 4]*u.deg, obstime='B1955')
# Test equals and not equals operators against different types
assert sc1 != 'a string'
assert not (sc1 == 'a string')
def test_equal_exceptions():
sc1 = SkyCoord(1*u.deg, 2*u.deg, obstime='B1955')
sc2 = SkyCoord(1*u.deg, 2*u.deg)
with pytest.raises(ValueError, match=r"cannot compare: extra frame "
r"attribute 'obstime' is not equivalent \(perhaps compare the "
r"frames directly to avoid this exception\)"):
sc1 == sc2
# Note that this exception is the only one raised directly in SkyCoord.
# All others come from lower-level classes and are tested in test_frames.py.
def test_attr_inheritance():
"""
When initializing from an existing coord the representation attrs like
equinox should be inherited to the SkyCoord. If there is a conflict
then raise an exception.
"""
sc = SkyCoord(1, 2, frame='icrs', unit='deg', equinox='J1999', obstime='J2001')
sc2 = SkyCoord(sc)
assert sc2.equinox == sc.equinox
assert sc2.obstime == sc.obstime
assert allclose(sc2.ra, sc.ra)
assert allclose(sc2.dec, sc.dec)
assert allclose(sc2.distance, sc.distance)
sc2 = SkyCoord(sc.frame) # Doesn't have equinox there so we get FK4 defaults
assert sc2.equinox != sc.equinox
assert sc2.obstime != sc.obstime
assert allclose(sc2.ra, sc.ra)
assert allclose(sc2.dec, sc.dec)
assert allclose(sc2.distance, sc.distance)
sc = SkyCoord(1, 2, frame='fk4', unit='deg', equinox='J1999', obstime='J2001')
sc2 = SkyCoord(sc)
assert sc2.equinox == sc.equinox
assert sc2.obstime == sc.obstime
assert allclose(sc2.ra, sc.ra)
assert allclose(sc2.dec, sc.dec)
assert allclose(sc2.distance, sc.distance)
sc2 = SkyCoord(sc.frame) # sc.frame has equinox, obstime
assert sc2.equinox == sc.equinox
assert sc2.obstime == sc.obstime
assert allclose(sc2.ra, sc.ra)
assert allclose(sc2.dec, sc.dec)
assert allclose(sc2.distance, sc.distance)
@pytest.mark.parametrize('frame', ['fk4', 'fk5', 'icrs'])
def test_setitem_no_velocity(frame):
"""Test different flavors of item setting for a SkyCoord without a velocity
for different frames. Include a frame attribute that is sometimes an
actual frame attribute and sometimes an extra frame attribute.
"""
sc0 = SkyCoord([1, 2]*u.deg, [3, 4]*u.deg, obstime='B1955', frame=frame)
sc2 = SkyCoord([10, 20]*u.deg, [30, 40]*u.deg, obstime='B1955', frame=frame)
sc1 = sc0.copy()
sc1[1] = sc2[0]
assert np.allclose(sc1.ra.to_value(u.deg), [1, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [3, 30])
assert sc1.obstime == Time('B1955')
assert sc1.frame.name == frame
sc1 = sc0.copy()
sc1[:] = sc2[0]
assert np.allclose(sc1.ra.to_value(u.deg), [10, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [30, 30])
sc1 = sc0.copy()
sc1[:] = sc2[:]
assert np.allclose(sc1.ra.to_value(u.deg), [10, 20])
assert np.allclose(sc1.dec.to_value(u.deg), [30, 40])
sc1 = sc0.copy()
sc1[[1, 0]] = sc2[:]
assert np.allclose(sc1.ra.to_value(u.deg), [20, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [40, 30])
def test_setitem_initially_broadcast():
sc = SkyCoord(np.ones((2, 1))*u.deg, np.ones((1, 3))*u.deg)
sc[1, 1] = SkyCoord(0*u.deg, 0*u.deg)
expected = np.ones((2, 3))*u.deg
expected[1, 1] = 0.
assert np.all(sc.ra == expected)
assert np.all(sc.dec == expected)
def test_setitem_velocities():
"""Test different flavors of item setting for a SkyCoord with a velocity.
"""
sc0 = SkyCoord([1, 2]*u.deg, [3, 4]*u.deg, radial_velocity=[1, 2]*u.km/u.s,
obstime='B1950', frame='fk4')
sc2 = SkyCoord([10, 20]*u.deg, [30, 40]*u.deg, radial_velocity=[10, 20]*u.km/u.s,
obstime='B1950', frame='fk4')
sc1 = sc0.copy()
sc1[1] = sc2[0]
assert np.allclose(sc1.ra.to_value(u.deg), [1, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [3, 30])
assert np.allclose(sc1.radial_velocity.to_value(u.km / u.s), [1, 10])
assert sc1.obstime == Time('B1950')
assert sc1.frame.name == 'fk4'
sc1 = sc0.copy()
sc1[:] = sc2[0]
assert np.allclose(sc1.ra.to_value(u.deg), [10, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [30, 30])
assert np.allclose(sc1.radial_velocity.to_value(u.km / u.s), [10, 10])
sc1 = sc0.copy()
sc1[:] = sc2[:]
assert np.allclose(sc1.ra.to_value(u.deg), [10, 20])
assert np.allclose(sc1.dec.to_value(u.deg), [30, 40])
assert np.allclose(sc1.radial_velocity.to_value(u.km / u.s), [10, 20])
sc1 = sc0.copy()
sc1[[1, 0]] = sc2[:]
assert np.allclose(sc1.ra.to_value(u.deg), [20, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [40, 30])
assert np.allclose(sc1.radial_velocity.to_value(u.km / u.s), [20, 10])
def test_setitem_exceptions():
class SkyCoordSub(SkyCoord):
pass
obstime = 'B1955'
sc0 = SkyCoord([1, 2]*u.deg, [3, 4]*u.deg, frame='fk4')
sc2 = SkyCoord([10, 20]*u.deg, [30, 40]*u.deg, frame='fk4', obstime=obstime)
sc1 = SkyCoordSub(sc0)
with pytest.raises(TypeError, match='an only set from object of same class: '
'SkyCoordSub vs. SkyCoord'):
sc1[0] = sc2[0]
sc1 = SkyCoord(sc0.ra, sc0.dec, frame='fk4', obstime='B2001')
with pytest.raises(ValueError, match='can only set frame item from an equivalent frame'):
sc1.frame[0] = sc2.frame[0]
sc1 = SkyCoord(sc0.ra[0], sc0.dec[0], frame='fk4', obstime=obstime)
with pytest.raises(TypeError, match="scalar 'FK4' frame object does not support "
'item assignment'):
sc1[0] = sc2[0]
# Different differentials
sc1 = SkyCoord([1, 2]*u.deg, [3, 4]*u.deg,
pm_ra_cosdec=[1, 2]*u.mas/u.yr, pm_dec=[3, 4]*u.mas/u.yr)
sc2 = SkyCoord([10, 20]*u.deg, [30, 40]*u.deg, radial_velocity=[10, 20]*u.km/u.s)
with pytest.raises(TypeError, match='can only set from object of same class: '
'UnitSphericalCosLatDifferential vs. RadialDifferential'):
sc1[0] = sc2[0]
def test_insert():
sc0 = SkyCoord([1, 2]*u.deg, [3, 4]*u.deg)
sc1 = SkyCoord(5*u.deg, 6*u.deg)
sc3 = SkyCoord([10, 20]*u.deg, [30, 40]*u.deg)
sc4 = SkyCoord([[1, 2], [3, 4]]*u.deg,
[[5, 6], [7, 8]]*u.deg)
sc5 = SkyCoord([[10, 2], [30, 4]]*u.deg,
[[50, 6], [70, 8]]*u.deg)
# Insert a scalar
sc = sc0.insert(1, sc1)
assert skycoord_equal(sc, SkyCoord([1, 5, 2]*u.deg, [3, 6, 4]*u.deg))
# Insert length=2 array at start of array
sc = sc0.insert(0, sc3)
assert skycoord_equal(sc, SkyCoord([10, 20, 1, 2]*u.deg, [30, 40, 3, 4]*u.deg))
# Insert length=2 array at end of array
sc = sc0.insert(2, sc3)
assert skycoord_equal(sc, SkyCoord([1, 2, 10, 20]*u.deg, [3, 4, 30, 40]*u.deg))
# Multidimensional
sc = sc4.insert(1, sc5)
assert skycoord_equal(sc, SkyCoord([[1, 2], [10, 2], [30, 4], [3, 4]]*u.deg,
[[5, 6], [50, 6], [70, 8], [7, 8]]*u.deg))
def test_insert_exceptions():
sc0 = SkyCoord([1, 2]*u.deg, [3, 4]*u.deg)
sc1 = SkyCoord(5*u.deg, 6*u.deg)
# sc3 = SkyCoord([10, 20]*u.deg, [30, 40]*u.deg)
sc4 = SkyCoord([[1, 2], [3, 4]]*u.deg,
[[5, 6], [7, 8]]*u.deg)
with pytest.raises(TypeError, match='cannot insert into scalar'):
sc1.insert(0, sc0)
with pytest.raises(ValueError, match='axis must be 0'):
sc0.insert(0, sc1, axis=1)
with pytest.raises(TypeError, match='obj arg must be an integer'):
sc0.insert(slice(None), sc0)
with pytest.raises(IndexError, match='index -100 is out of bounds for axis 0 '
'with size 2'):
sc0.insert(-100, sc0)
# Bad shape
with pytest.raises(ValueError, match='could not broadcast input array from '
r'shape \(2,2\) into shape \(2,?\)'):
sc0.insert(0, sc4)
def test_attr_conflicts():
"""
Check conflicts resolution between coordinate attributes and init kwargs.
"""
sc = SkyCoord(1, 2, frame='icrs', unit='deg', equinox='J1999', obstime='J2001')
# OK if attrs both specified but with identical values
SkyCoord(sc, equinox='J1999', obstime='J2001')
# OK because sc.frame doesn't have obstime
SkyCoord(sc.frame, equinox='J1999', obstime='J2100')
# Not OK if attrs don't match
with pytest.raises(ValueError) as err:
SkyCoord(sc, equinox='J1999', obstime='J2002')
assert "Coordinate attribute 'obstime'=" in str(err.value)
# Same game but with fk4 which has equinox and obstime frame attrs
sc = SkyCoord(1, 2, frame='fk4', unit='deg', equinox='J1999', obstime='J2001')
# OK if attrs both specified but with identical values
SkyCoord(sc, equinox='J1999', obstime='J2001')
# Not OK if SkyCoord attrs don't match
with pytest.raises(ValueError) as err:
SkyCoord(sc, equinox='J1999', obstime='J2002')
assert "Frame attribute 'obstime' has conflicting" in str(err.value)
# Not OK because sc.frame has different attrs
with pytest.raises(ValueError) as err:
SkyCoord(sc.frame, equinox='J1999', obstime='J2002')
assert "Frame attribute 'obstime' has conflicting" in str(err.value)
def test_frame_attr_getattr():
"""
When accessing frame attributes like equinox, the value should come
from self.frame when that object has the relevant attribute, otherwise
from self.
"""
sc = SkyCoord(1, 2, frame='icrs', unit='deg', equinox='J1999', obstime='J2001')
assert sc.equinox == 'J1999' # Just the raw value (not validated)
assert sc.obstime == 'J2001'
sc = SkyCoord(1, 2, frame='fk4', unit='deg', equinox='J1999', obstime='J2001')
assert sc.equinox == Time('J1999') # Coming from the self.frame object
assert sc.obstime == Time('J2001')
sc = SkyCoord(1, 2, frame='fk4', unit='deg', equinox='J1999')
assert sc.equinox == Time('J1999')
assert sc.obstime == Time('J1999')
def test_to_string():
"""
Basic testing of converting SkyCoord to strings. This just tests
for a single input coordinate and and 1-element list. It does not
test the underlying `Angle.to_string` method itself.
"""
coord = '1h2m3s 1d2m3s'
for wrap in (lambda x: x, lambda x: [x]):
sc = SkyCoord(wrap(coord))
assert sc.to_string() == wrap('15.5125 1.03417')
assert sc.to_string('dms') == wrap('15d30m45s 1d02m03s')
assert sc.to_string('hmsdms') == wrap('01h02m03s +01d02m03s')
with_kwargs = sc.to_string('hmsdms', precision=3, pad=True, alwayssign=True)
assert with_kwargs == wrap('+01h02m03.000s +01d02m03.000s')
@pytest.mark.parametrize('cls_other', [SkyCoord, ICRS])
def test_seps(cls_other):
sc1 = SkyCoord(0 * u.deg, 1 * u.deg)
sc2 = cls_other(0 * u.deg, 2 * u.deg)
sep = sc1.separation(sc2)
assert (sep - 1 * u.deg)/u.deg < 1e-10
with pytest.raises(ValueError):
sc1.separation_3d(sc2)
sc3 = SkyCoord(1 * u.deg, 1 * u.deg, distance=1 * u.kpc)
sc4 = cls_other(1 * u.deg, 1 * u.deg, distance=2 * u.kpc)
sep3d = sc3.separation_3d(sc4)
assert sep3d == 1 * u.kpc
def test_repr():
sc1 = SkyCoord(0 * u.deg, 1 * u.deg, frame='icrs')
sc2 = SkyCoord(1 * u.deg, 1 * u.deg, frame='icrs', distance=1 * u.kpc)
assert repr(sc1) == ('<SkyCoord (ICRS): (ra, dec) in deg\n'
' (0., 1.)>')
assert repr(sc2) == ('<SkyCoord (ICRS): (ra, dec, distance) in (deg, deg, kpc)\n'
' (1., 1., 1.)>')
sc3 = SkyCoord(0.25 * u.deg, [1, 2.5] * u.deg, frame='icrs')
assert repr(sc3).startswith('<SkyCoord (ICRS): (ra, dec) in deg\n')
sc_default = SkyCoord(0 * u.deg, 1 * u.deg)
assert repr(sc_default) == ('<SkyCoord (ICRS): (ra, dec) in deg\n'
' (0., 1.)>')
def test_repr_altaz():
sc2 = SkyCoord(1 * u.deg, 1 * u.deg, frame='icrs', distance=1 * u.kpc)
loc = EarthLocation(-2309223 * u.m, -3695529 * u.m, -4641767 * u.m)
time = Time('2005-03-21 00:00:00')
sc4 = sc2.transform_to(AltAz(location=loc, obstime=time))
assert repr(sc4).startswith("<SkyCoord (AltAz: obstime=2005-03-21 00:00:00.000, "
"location=(-2309223., -3695529., "
"-4641767.) m, pressure=0.0 hPa, "
"temperature=0.0 deg_C, relative_humidity=0.0, "
"obswl=1.0 micron): (az, alt, distance) in "
"(deg, deg, kpc)\n")
def test_ops():
"""
Tests miscellaneous operations like `len`
"""
sc = SkyCoord(0 * u.deg, 1 * u.deg, frame='icrs')
sc_arr = SkyCoord(0 * u.deg, [1, 2] * u.deg, frame='icrs')
sc_empty = SkyCoord([] * u.deg, [] * u.deg, frame='icrs')
assert sc.isscalar
assert not sc_arr.isscalar
assert not sc_empty.isscalar
with pytest.raises(TypeError):
len(sc)
assert len(sc_arr) == 2
assert len(sc_empty) == 0
assert bool(sc)
assert bool(sc_arr)
assert not bool(sc_empty)
assert sc_arr[0].isscalar
assert len(sc_arr[:1]) == 1
# A scalar shouldn't be indexable
with pytest.raises(TypeError):
sc[0:]
# but it should be possible to just get an item
sc_item = sc[()]
assert sc_item.shape == ()
# and to turn it into an array
sc_1d = sc[np.newaxis]
assert sc_1d.shape == (1,)
with pytest.raises(TypeError):
iter(sc)
assert not isiterable(sc)
assert isiterable(sc_arr)
assert isiterable(sc_empty)
it = iter(sc_arr)
assert next(it).dec == sc_arr[0].dec
assert next(it).dec == sc_arr[1].dec
with pytest.raises(StopIteration):
next(it)
def test_none_transform():
"""
Ensure that transforming from a SkyCoord with no frame provided works like
ICRS
"""
sc = SkyCoord(0 * u.deg, 1 * u.deg)
sc_arr = SkyCoord(0 * u.deg, [1, 2] * u.deg)
sc2 = sc.transform_to(ICRS)
assert sc.ra == sc2.ra and sc.dec == sc2.dec
sc5 = sc.transform_to('fk5')
assert sc5.ra == sc2.transform_to('fk5').ra
sc_arr2 = sc_arr.transform_to(ICRS)
sc_arr5 = sc_arr.transform_to('fk5')
npt.assert_array_equal(sc_arr5.ra, sc_arr2.transform_to('fk5').ra)
def test_position_angle():
c1 = SkyCoord(0*u.deg, 0*u.deg)
c2 = SkyCoord(1*u.deg, 0*u.deg)
assert_allclose(c1.position_angle(c2) - 90.0 * u.deg, 0*u.deg)
c3 = SkyCoord(1*u.deg, 0.1*u.deg)
assert c1.position_angle(c3) < 90*u.deg
c4 = SkyCoord(0*u.deg, 1*u.deg)
assert_allclose(c1.position_angle(c4), 0*u.deg)
carr1 = SkyCoord(0*u.deg, [0, 1, 2]*u.deg)
carr2 = SkyCoord([-1, -2, -3]*u.deg, [0.1, 1.1, 2.1]*u.deg)
res = carr1.position_angle(carr2)
assert res.shape == (3,)
assert np.all(res < 360*u.degree)
assert np.all(res > 270*u.degree)
cicrs = SkyCoord(0*u.deg, 0*u.deg, frame='icrs')
cfk5 = SkyCoord(1*u.deg, 0*u.deg, frame='fk5')
# because of the frame transform, it's just a *bit* more than 90 degrees
assert cicrs.position_angle(cfk5) > 90.0 * u.deg
assert cicrs.position_angle(cfk5) < 91.0 * u.deg
def test_position_angle_directly():
"""Regression check for #3800: position_angle should accept floats."""
from astropy.coordinates.angle_utilities import position_angle
result = position_angle(10., 20., 10., 20.)
assert result.unit is u.radian
assert result.value == 0.
def test_sep_pa_equivalence():
"""Regression check for bug in #5702.
PA and separation from object 1 to 2 should be consistent with those
from 2 to 1
"""
cfk5 = SkyCoord(1*u.deg, 0*u.deg, frame='fk5')
cfk5B1950 = SkyCoord(1*u.deg, 0*u.deg, frame='fk5', equinox='B1950')
# test with both default and explicit equinox #5722 and #3106
sep_forward = cfk5.separation(cfk5B1950)
sep_backward = cfk5B1950.separation(cfk5)
assert sep_forward != 0 and sep_backward != 0
assert_allclose(sep_forward, sep_backward)
posang_forward = cfk5.position_angle(cfk5B1950)
posang_backward = cfk5B1950.position_angle(cfk5)
assert posang_forward != 0 and posang_backward != 0
assert 179 < (posang_forward - posang_backward).wrap_at(360*u.deg).degree < 181
dcfk5 = SkyCoord(1*u.deg, 0*u.deg, frame='fk5', distance=1*u.pc)
dcfk5B1950 = SkyCoord(1*u.deg, 0*u.deg, frame='fk5', equinox='B1950',
distance=1.*u.pc)
sep3d_forward = dcfk5.separation_3d(dcfk5B1950)
sep3d_backward = dcfk5B1950.separation_3d(dcfk5)
assert sep3d_forward != 0 and sep3d_backward != 0
assert_allclose(sep3d_forward, sep3d_backward)
def test_directional_offset_by():
# Round-trip tests: where is sc2 from sc1?
# Use those offsets from sc1 and verify you get to sc2.
npoints = 7 # How many points when doing vectors of SkyCoords
for sc1 in [SkyCoord(0*u.deg,-90*u.deg), # South pole
SkyCoord(0 * u.deg, 90 * u.deg), # North pole
SkyCoord(1*u.deg,2*u.deg),
SkyCoord(np.linspace(0,359,npoints),np.linspace(-90, 90,npoints),
unit=u.deg, frame='fk4'),
SkyCoord(np.linspace(359,0,npoints),np.linspace(-90, 90,npoints),
unit=u.deg, frame='icrs'),
SkyCoord(np.linspace(-3,3,npoints),np.linspace(-90, 90,npoints),
unit=(u.rad, u.deg), frame='barycentricmeanecliptic')]:
for sc2 in [SkyCoord(5*u.deg,10*u.deg),
SkyCoord(np.linspace(0, 359, npoints), np.linspace(-90, 90, npoints),
unit=u.deg, frame='galactic')]:
# Find the displacement from sc1 to sc2,
posang = sc1.position_angle(sc2)
sep = sc1.separation(sc2)
# then do the offset from sc1 and verify that you are at sc2
sc2a = sc1.directional_offset_by(position_angle=posang, separation=sep)
assert np.max(np.abs(sc2.separation(sc2a).arcsec)) < 1e-3
# Specific test cases
# Go over the North pole a little way, and
# over the South pole a long way, to get to same spot
sc1 = SkyCoord(0*u.deg, 89*u.deg)
for posang,sep in [(0*u.deg, 2*u.deg), (180*u.deg, 358*u.deg)]:
sc2 = sc1.directional_offset_by(posang, sep)
assert allclose([sc2.ra.degree, sc2.dec.degree], [180, 89])
# Go twice as far to ensure that dec is actually changing
# and that >360deg is supported
sc2 = sc1.directional_offset_by(posang, 2*sep)
assert allclose([sc2.ra.degree, sc2.dec.degree], [180, 87])
# Verify that a separation of 180 deg in any direction gets to the antipode
# and 360 deg returns to start
sc1 = SkyCoord(10*u.deg, 47*u.deg)
for posang in np.linspace(0, 377, npoints):
sc2 = sc1.directional_offset_by(posang, 180*u.deg)
assert allclose([sc2.ra.degree, sc2.dec.degree], [190, -47])
sc2 = sc1.directional_offset_by(posang, 360*u.deg)
assert allclose([sc2.ra.degree, sc2.dec.degree], [10, 47])
# Verify that a 90 degree posang, which means East
# corresponds to an increase in RA, by ~separation/cos(dec) and
# a slight convergence to equator
sc1 = SkyCoord(10*u.deg, 60*u.deg)
sc2 = sc1.directional_offset_by(90*u.deg, 1.0*u.deg)
assert 11.9 < sc2.ra.degree < 12.0
assert 59.9 < sc2.dec.degree < 60.0
def test_table_to_coord():
"""
Checks "end-to-end" use of `Table` with `SkyCoord` - the `Quantity`
initializer is the intermediary that translate the table columns into
something coordinates understands.
(Regression test for #1762 )
"""
from astropy.table import Table, Column
t = Table()
t.add_column(Column(data=[1, 2, 3], name='ra', unit=u.deg))
t.add_column(Column(data=[4, 5, 6], name='dec', unit=u.deg))
c = SkyCoord(t['ra'], t['dec'])
assert allclose(c.ra.to(u.deg), [1, 2, 3] * u.deg)
assert allclose(c.dec.to(u.deg), [4, 5, 6] * u.deg)
def assert_quantities_allclose(coord, q1s, attrs):
"""
Compare two tuples of quantities. This assumes that the values in q1 are of
order(1) and uses atol=1e-13, rtol=0. It also asserts that the units of the
two quantities are the *same*, in order to check that the representation
output has the expected units.
"""
q2s = [getattr(coord, attr) for attr in attrs]
assert len(q1s) == len(q2s)
for q1, q2 in zip(q1s, q2s):
assert q1.shape == q2.shape
assert allclose(q1, q2, rtol=0, atol=1e-13 * q1.unit)
# Sets of inputs corresponding to Galactic frame
base_unit_attr_sets = [
('spherical', u.karcsec, u.karcsec, u.kpc, Latitude, 'l', 'b', 'distance'),
('unitspherical', u.karcsec, u.karcsec, None, Latitude, 'l', 'b', None),
('physicsspherical', u.karcsec, u.karcsec, u.kpc, Angle, 'phi', 'theta', 'r'),
('cartesian', u.km, u.km, u.km, u.Quantity, 'u', 'v', 'w'),
('cylindrical', u.km, u.karcsec, u.km, Angle, 'rho', 'phi', 'z')
]
units_attr_sets = []
for base_unit_attr_set in base_unit_attr_sets:
repr_name = base_unit_attr_set[0]
for representation in (repr_name, REPRESENTATION_CLASSES[repr_name]):
for c1, c2, c3 in ((1, 2, 3), ([1], [2], [3])):
for arrayify in True, False:
if arrayify:
c1 = np.array(c1)
c2 = np.array(c2)
c3 = np.array(c3)
units_attr_sets.append(base_unit_attr_set + (representation, c1, c2, c3))
units_attr_args = ('repr_name', 'unit1', 'unit2', 'unit3', 'cls2', 'attr1', 'attr2', 'attr3', 'representation', 'c1', 'c2', 'c3')
@pytest.mark.parametrize(units_attr_args,
[x for x in units_attr_sets if x[0] != 'unitspherical'])
def test_skycoord_three_components(repr_name, unit1, unit2, unit3, cls2, attr1, attr2, attr3,
representation, c1, c2, c3):
"""
Tests positional inputs using components (COMP1, COMP2, COMP3)
and various representations. Use weird units and Galactic frame.
"""
sc = SkyCoord(c1, c2, c3, unit=(unit1, unit2, unit3),
representation_type=representation,
frame=Galactic)
assert_quantities_allclose(sc, (c1*unit1, c2*unit2, c3*unit3),
(attr1, attr2, attr3))
sc = SkyCoord(1000*c1*u.Unit(unit1/1000), cls2(c2, unit=unit2),
1000*c3*u.Unit(unit3/1000), frame=Galactic,
unit=(unit1, unit2, unit3), representation_type=representation)
assert_quantities_allclose(sc, (c1*unit1, c2*unit2, c3*unit3),
(attr1, attr2, attr3))
kwargs = {attr3: c3}
sc = SkyCoord(c1, c2, unit=(unit1, unit2, unit3),
frame=Galactic,
representation_type=representation, **kwargs)
assert_quantities_allclose(sc, (c1*unit1, c2*unit2, c3*unit3),
(attr1, attr2, attr3))
kwargs = {attr1: c1, attr2: c2, attr3: c3}
sc = SkyCoord(frame=Galactic, unit=(unit1, unit2, unit3),
representation_type=representation, **kwargs)
assert_quantities_allclose(sc, (c1*unit1, c2*unit2, c3*unit3),
(attr1, attr2, attr3))
@pytest.mark.parametrize(units_attr_args,
[x for x in units_attr_sets
if x[0] in ('spherical', 'unitspherical')])
def test_skycoord_spherical_two_components(repr_name, unit1, unit2, unit3, cls2,
attr1, attr2, attr3, representation, c1, c2, c3):
"""
Tests positional inputs using components (COMP1, COMP2) for spherical
representations. Use weird units and Galactic frame.
"""
sc = SkyCoord(c1, c2, unit=(unit1, unit2), frame=Galactic,
representation_type=representation)
assert_quantities_allclose(sc, (c1*unit1, c2*unit2),
(attr1, attr2))
sc = SkyCoord(1000*c1*u.Unit(unit1/1000), cls2(c2, unit=unit2),
frame=Galactic,
unit=(unit1, unit2, unit3), representation_type=representation)
assert_quantities_allclose(sc, (c1*unit1, c2*unit2),
(attr1, attr2))
kwargs = {attr1: c1, attr2: c2}
sc = SkyCoord(frame=Galactic, unit=(unit1, unit2),
representation_type=representation, **kwargs)
assert_quantities_allclose(sc, (c1*unit1, c2*unit2),
(attr1, attr2))
@pytest.mark.parametrize(units_attr_args,
[x for x in units_attr_sets if x[0] != 'unitspherical'])
def test_galactic_three_components(repr_name, unit1, unit2, unit3, cls2, attr1, attr2, attr3,
representation, c1, c2, c3):
"""
Tests positional inputs using components (COMP1, COMP2, COMP3)
and various representations. Use weird units and Galactic frame.
"""
sc = Galactic(1000*c1*u.Unit(unit1/1000), cls2(c2, unit=unit2),
1000*c3*u.Unit(unit3/1000), representation_type=representation)
assert_quantities_allclose(sc, (c1*unit1, c2*unit2, c3*unit3),
(attr1, attr2, attr3))
kwargs = {attr3: c3*unit3}
sc = Galactic(c1*unit1, c2*unit2,
representation_type=representation, **kwargs)
assert_quantities_allclose(sc, (c1*unit1, c2*unit2, c3*unit3),
(attr1, attr2, attr3))
kwargs = {attr1: c1*unit1, attr2: c2*unit2, attr3: c3*unit3}
sc = Galactic(representation_type=representation, **kwargs)
assert_quantities_allclose(sc, (c1*unit1, c2*unit2, c3*unit3),
(attr1, attr2, attr3))
@pytest.mark.parametrize(units_attr_args,
[x for x in units_attr_sets
if x[0] in ('spherical', 'unitspherical')])
def test_galactic_spherical_two_components(repr_name, unit1, unit2, unit3, cls2,
attr1, attr2, attr3, representation, c1, c2, c3):
"""
Tests positional inputs using components (COMP1, COMP2) for spherical
representations. Use weird units and Galactic frame.
"""
sc = Galactic(1000*c1*u.Unit(unit1/1000), cls2(c2, unit=unit2), representation_type=representation)
assert_quantities_allclose(sc, (c1*unit1, c2*unit2), (attr1, attr2))
sc = Galactic(c1*unit1, c2*unit2, representation_type=representation)
assert_quantities_allclose(sc, (c1*unit1, c2*unit2), (attr1, attr2))
kwargs = {attr1: c1*unit1, attr2: c2*unit2}
sc = Galactic(representation_type=representation, **kwargs)
assert_quantities_allclose(sc, (c1*unit1, c2*unit2), (attr1, attr2))
@pytest.mark.parametrize(('repr_name', 'unit1', 'unit2', 'unit3', 'cls2', 'attr1', 'attr2', 'attr3'),
[x for x in base_unit_attr_sets if x[0] != 'unitspherical'])
def test_skycoord_coordinate_input(repr_name, unit1, unit2, unit3, cls2, attr1, attr2, attr3):
c1, c2, c3 = 1, 2, 3
sc = SkyCoord([(c1, c2, c3)], unit=(unit1, unit2, unit3), representation_type=repr_name,
frame='galactic')
assert_quantities_allclose(sc, ([c1]*unit1, [c2]*unit2, [c3]*unit3), (attr1, attr2, attr3))
c1, c2, c3 = 1*unit1, 2*unit2, 3*unit3
sc = SkyCoord([(c1, c2, c3)], representation_type=repr_name, frame='galactic')
assert_quantities_allclose(sc, ([1]*unit1, [2]*unit2, [3]*unit3), (attr1, attr2, attr3))
def test_skycoord_string_coordinate_input():
sc = SkyCoord('01 02 03 +02 03 04', unit='deg', representation_type='unitspherical')
assert_quantities_allclose(sc, (Angle('01:02:03', unit='deg'),
Angle('02:03:04', unit='deg')),
('ra', 'dec'))
sc = SkyCoord(['01 02 03 +02 03 04'], unit='deg', representation_type='unitspherical')
assert_quantities_allclose(sc, (Angle(['01:02:03'], unit='deg'),
Angle(['02:03:04'], unit='deg')),
('ra', 'dec'))
def test_units():
sc = SkyCoord(1, 2, 3, unit='m', representation_type='cartesian') # All get meters
assert sc.x.unit is u.m
assert sc.y.unit is u.m
assert sc.z.unit is u.m
sc = SkyCoord(1, 2*u.km, 3, unit='m', representation_type='cartesian') # All get u.m
assert sc.x.unit is u.m
assert sc.y.unit is u.m
assert sc.z.unit is u.m
sc = SkyCoord(1, 2, 3, unit=u.m, representation_type='cartesian') # All get u.m
assert sc.x.unit is u.m
assert sc.y.unit is u.m
assert sc.z.unit is u.m
sc = SkyCoord(1, 2, 3, unit='m, km, pc', representation_type='cartesian')
assert_quantities_allclose(sc, (1*u.m, 2*u.km, 3*u.pc), ('x', 'y', 'z'))
with pytest.raises(u.UnitsError) as err:
SkyCoord(1, 2, 3, unit=(u.m, u.m), representation_type='cartesian')
assert 'should have matching physical types' in str(err.value)
SkyCoord(1, 2, 3, unit=(u.m, u.km, u.pc), representation_type='cartesian')
assert_quantities_allclose(sc, (1*u.m, 2*u.km, 3*u.pc), ('x', 'y', 'z'))
@pytest.mark.xfail
def test_units_known_fail():
# should fail but doesn't => corner case oddity
with pytest.raises(u.UnitsError):
SkyCoord(1, 2, 3, unit=u.deg, representation_type='spherical')
def test_nodata_failure():
with pytest.raises(ValueError):
SkyCoord()
@pytest.mark.parametrize(('mode', 'origin'), [('wcs', 0),
('all', 0),
('all', 1)])
def test_wcs_methods(mode, origin):
from astropy.wcs import WCS
from astropy.utils.data import get_pkg_data_contents
from astropy.wcs.utils import pixel_to_skycoord
header = get_pkg_data_contents('../../wcs/tests/data/maps/1904-66_TAN.hdr', encoding='binary')
wcs = WCS(header)
ref = SkyCoord(0.1 * u.deg, -89. * u.deg, frame='icrs')
xp, yp = ref.to_pixel(wcs, mode=mode, origin=origin)
# WCS is in FK5 so we need to transform back to ICRS
new = pixel_to_skycoord(xp, yp, wcs, mode=mode, origin=origin).transform_to('icrs')
assert_allclose(new.ra.degree, ref.ra.degree)
assert_allclose(new.dec.degree, ref.dec.degree)
# also try to round-trip with `from_pixel`
scnew = SkyCoord.from_pixel(xp, yp, wcs, mode=mode, origin=origin).transform_to('icrs')
assert_allclose(scnew.ra.degree, ref.ra.degree)
assert_allclose(scnew.dec.degree, ref.dec.degree)
# Also make sure the right type comes out
class SkyCoord2(SkyCoord):
pass
scnew2 = SkyCoord2.from_pixel(xp, yp, wcs, mode=mode, origin=origin)
assert scnew.__class__ is SkyCoord
assert scnew2.__class__ is SkyCoord2
def test_frame_attr_transform_inherit():
"""
Test that frame attributes get inherited as expected during transform.
Driven by #3106.
"""
c = SkyCoord(1 * u.deg, 2 * u.deg, frame=FK5)
c2 = c.transform_to(FK4)
assert c2.equinox.value == 'B1950.000'
assert c2.obstime.value == 'B1950.000'
c2 = c.transform_to(FK4(equinox='J1975', obstime='J1980'))
assert c2.equinox.value == 'J1975.000'
assert c2.obstime.value == 'J1980.000'
c = SkyCoord(1 * u.deg, 2 * u.deg, frame=FK4)
c2 = c.transform_to(FK5)
assert c2.equinox.value == 'J2000.000'
assert c2.obstime is None
c = SkyCoord(1 * u.deg, 2 * u.deg, frame=FK4, obstime='J1980')
c2 = c.transform_to(FK5)
assert c2.equinox.value == 'J2000.000'
assert c2.obstime.value == 'J1980.000'
c = SkyCoord(1 * u.deg, 2 * u.deg, frame=FK4, equinox='J1975', obstime='J1980')
c2 = c.transform_to(FK5)
assert c2.equinox.value == 'J1975.000'
assert c2.obstime.value == 'J1980.000'
c2 = c.transform_to(FK5(equinox='J1990'))
assert c2.equinox.value == 'J1990.000'
assert c2.obstime.value == 'J1980.000'
# The work-around for #5722
c = SkyCoord(1 * u.deg, 2 * u.deg, frame='fk5')
c1 = SkyCoord(1 * u.deg, 2 * u.deg, frame='fk5', equinox='B1950.000')
c2 = c1.transform_to(c)
assert not c2.is_equivalent_frame(c) # counterintuitive, but documented
assert c2.equinox.value == 'B1950.000'
c3 = c1.transform_to(c, merge_attributes=False)
assert c3.equinox.value == 'J2000.000'
assert c3.is_equivalent_frame(c)
def test_deepcopy():
c1 = SkyCoord(1 * u.deg, 2 * u.deg)
c2 = copy.copy(c1)
c3 = copy.deepcopy(c1)
c4 = SkyCoord([1, 2] * u.m, [2, 3] * u.m, [3, 4] * u.m, representation_type='cartesian', frame='fk5',
obstime='J1999.9', equinox='J1988.8')
c5 = copy.deepcopy(c4)
assert np.all(c5.x == c4.x) # and y and z
assert c5.frame.name == c4.frame.name
assert c5.obstime == c4.obstime
assert c5.equinox == c4.equinox
assert c5.representation_type == c4.representation_type
def test_no_copy():
c1 = SkyCoord(np.arange(10.) * u.hourangle, np.arange(20., 30.) * u.deg)
c2 = SkyCoord(c1, copy=False)
# Note: c1.ra and c2.ra will *not* share memory, as these are recalculated
# to be in "preferred" units. See discussion in #4883.
assert np.may_share_memory(c1.data.lon, c2.data.lon)
c3 = SkyCoord(c1, copy=True)
assert not np.may_share_memory(c1.data.lon, c3.data.lon)
def test_immutable():
c1 = SkyCoord(1 * u.deg, 2 * u.deg)
with pytest.raises(AttributeError):
c1.ra = 3.0
c1.foo = 42
assert c1.foo == 42
@pytest.mark.skipif('not HAS_SCIPY')
def test_search_around():
"""
Test the search_around_* methods
Here we don't actually test the values are right, just that the methods of
SkyCoord work. The accuracy tests are in ``test_matching.py``
"""
from astropy.utils import NumpyRNGContext
with NumpyRNGContext(987654321):
sc1 = SkyCoord(np.random.rand(20) * 360.*u.degree,
(np.random.rand(20) * 180. - 90.)*u.degree)
sc2 = SkyCoord(np.random.rand(100) * 360. * u.degree,
(np.random.rand(100) * 180. - 90.)*u.degree)
sc1ds = SkyCoord(ra=sc1.ra, dec=sc1.dec, distance=np.random.rand(20)*u.kpc)
sc2ds = SkyCoord(ra=sc2.ra, dec=sc2.dec, distance=np.random.rand(100)*u.kpc)
idx1_sky, idx2_sky, d2d_sky, d3d_sky = sc1.search_around_sky(sc2, 10*u.deg)
idx1_3d, idx2_3d, d2d_3d, d3d_3d = sc1ds.search_around_3d(sc2ds, 250*u.pc)
def test_init_with_frame_instance_keyword():
# Frame instance
c1 = SkyCoord(3 * u.deg, 4 * u.deg,
frame=FK5(equinox='J2010'))
assert c1.equinox == Time('J2010')
# Frame instance with data (data gets ignored)
c2 = SkyCoord(3 * u.deg, 4 * u.deg,
frame=FK5(1. * u.deg, 2 * u.deg,
equinox='J2010'))
assert c2.equinox == Time('J2010')
assert allclose(c2.ra.degree, 3)
assert allclose(c2.dec.degree, 4)
# SkyCoord instance
c3 = SkyCoord(3 * u.deg, 4 * u.deg, frame=c1)
assert c3.equinox == Time('J2010')
# Check duplicate arguments
with pytest.raises(ValueError) as err:
c = SkyCoord(3 * u.deg, 4 * u.deg, frame=FK5(equinox='J2010'), equinox='J2001')
assert "Cannot specify frame attribute 'equinox'" in str(err.value)
def test_guess_from_table():
from astropy.table import Table, Column
from astropy.utils import NumpyRNGContext
tab = Table()
with NumpyRNGContext(987654321):
tab.add_column(Column(data=np.random.rand(10), unit='deg', name='RA[J2000]'))
tab.add_column(Column(data=np.random.rand(10), unit='deg', name='DEC[J2000]'))
sc = SkyCoord.guess_from_table(tab)
npt.assert_array_equal(sc.ra.deg, tab['RA[J2000]'])
npt.assert_array_equal(sc.dec.deg, tab['DEC[J2000]'])
# try without units in the table
tab['RA[J2000]'].unit = None
tab['DEC[J2000]'].unit = None
# should fail if not given explicitly
with pytest.raises(u.UnitsError):
sc2 = SkyCoord.guess_from_table(tab)
# but should work if provided
sc2 = SkyCoord.guess_from_table(tab, unit=u.deg)
npt.assert_array_equal(sc2.ra.deg, tab['RA[J2000]'])
npt.assert_array_equal(sc2.dec.deg, tab['DEC[J2000]'])
# should fail if two options are available - ambiguity bad!
tab.add_column(Column(data=np.random.rand(10), name='RA_J1900'))
with pytest.raises(ValueError) as excinfo:
SkyCoord.guess_from_table(tab, unit=u.deg)
assert 'J1900' in excinfo.value.args[0] and 'J2000' in excinfo.value.args[0]
tab.remove_column('RA_J1900')
tab['RA[J2000]'].unit = u.deg
tab['DEC[J2000]'].unit = u.deg
# but should succeed if the ambiguity can be broken b/c one of the matches
# is the name of a different component
tab.add_column(Column(data=np.random.rand(10)*u.mas/u.yr,
name='pm_ra_cosdec'))
tab.add_column(Column(data=np.random.rand(10)*u.mas/u.yr,
name='pm_dec'))
sc3 = SkyCoord.guess_from_table(tab)
assert u.allclose(sc3.ra, tab['RA[J2000]'])
assert u.allclose(sc3.dec, tab['DEC[J2000]'])
assert u.allclose(sc3.pm_ra_cosdec, tab['pm_ra_cosdec'])
assert u.allclose(sc3.pm_dec, tab['pm_dec'])
# should fail if stuff doesn't have proper units
tab['RA[J2000]'].unit = None
tab['DEC[J2000]'].unit = None
with pytest.raises(u.UnitTypeError, match="no unit was given."):
SkyCoord.guess_from_table(tab)
tab.remove_column('pm_ra_cosdec')
tab.remove_column('pm_dec')
# should also fail if user specifies something already in the table, but
# should succeed even if the user has to give one of the components
with pytest.raises(ValueError):
SkyCoord.guess_from_table(tab, ra=tab['RA[J2000]'], unit=u.deg)
oldra = tab['RA[J2000]']
tab.remove_column('RA[J2000]')
sc3 = SkyCoord.guess_from_table(tab, ra=oldra, unit=u.deg)
npt.assert_array_equal(sc3.ra.deg, oldra)
npt.assert_array_equal(sc3.dec.deg, tab['DEC[J2000]'])
# check a few non-ICRS/spherical systems
x, y, z = np.arange(3).reshape(3, 1) * u.pc
l, b = np.arange(2).reshape(2, 1) * u.deg
tabcart = Table([x, y, z], names=('x', 'y', 'z'))
tabgal = Table([b, l], names=('b', 'l'))
sc_cart = SkyCoord.guess_from_table(tabcart, representation_type='cartesian')
npt.assert_array_equal(sc_cart.x, x)
npt.assert_array_equal(sc_cart.y, y)
npt.assert_array_equal(sc_cart.z, z)
sc_gal = SkyCoord.guess_from_table(tabgal, frame='galactic')
npt.assert_array_equal(sc_gal.l, l)
npt.assert_array_equal(sc_gal.b, b)
# also try some column names that *end* with the attribute name
tabgal['b'].name = 'gal_b'
tabgal['l'].name = 'gal_l'
SkyCoord.guess_from_table(tabgal, frame='galactic')
tabgal['gal_b'].name = 'blob'
tabgal['gal_l'].name = 'central'
with pytest.raises(ValueError):
SkyCoord.guess_from_table(tabgal, frame='galactic')
def test_skycoord_list_creation():
"""
Test that SkyCoord can be created in a reasonable way with lists of SkyCoords
(regression for #2702)
"""
sc = SkyCoord(ra=[1, 2, 3]*u.deg, dec=[4, 5, 6]*u.deg)
sc0 = sc[0]
sc2 = sc[2]
scnew = SkyCoord([sc0, sc2])
assert np.all(scnew.ra == [1, 3]*u.deg)
assert np.all(scnew.dec == [4, 6]*u.deg)
# also check ranges
sc01 = sc[:2]
scnew2 = SkyCoord([sc01, sc2])
assert np.all(scnew2.ra == sc.ra)
assert np.all(scnew2.dec == sc.dec)
# now try with a mix of skycoord, frame, and repr objects
frobj = ICRS(2*u.deg, 5*u.deg)
reprobj = UnitSphericalRepresentation(3*u.deg, 6*u.deg)
scnew3 = SkyCoord([sc0, frobj, reprobj])
assert np.all(scnew3.ra == sc.ra)
assert np.all(scnew3.dec == sc.dec)
# should *fail* if different frame attributes or types are passed in
scfk5_j2000 = SkyCoord(1*u.deg, 4*u.deg, frame='fk5')
with pytest.raises(ValueError):
SkyCoord([sc0, scfk5_j2000])
scfk5_j2010 = SkyCoord(1*u.deg, 4*u.deg, frame='fk5', equinox='J2010')
with pytest.raises(ValueError):
SkyCoord([scfk5_j2000, scfk5_j2010])
# but they should inherit if they're all consistent
scfk5_2_j2010 = SkyCoord(2*u.deg, 5*u.deg, frame='fk5', equinox='J2010')
scfk5_3_j2010 = SkyCoord(3*u.deg, 6*u.deg, frame='fk5', equinox='J2010')
scnew4 = SkyCoord([scfk5_j2010, scfk5_2_j2010, scfk5_3_j2010])
assert np.all(scnew4.ra == sc.ra)
assert np.all(scnew4.dec == sc.dec)
assert scnew4.equinox == Time('J2010')
def test_nd_skycoord_to_string():
c = SkyCoord(np.ones((2, 2)), 1, unit=('deg', 'deg'))
ts = c.to_string()
assert np.all(ts.shape == c.shape)
assert np.all(ts == '1 1')
def test_equiv_skycoord():
sci1 = SkyCoord(1*u.deg, 2*u.deg, frame='icrs')
sci2 = SkyCoord(1*u.deg, 3*u.deg, frame='icrs')
assert sci1.is_equivalent_frame(sci1)
assert sci1.is_equivalent_frame(sci2)
assert sci1.is_equivalent_frame(ICRS())
assert not sci1.is_equivalent_frame(FK5())
with pytest.raises(TypeError):
sci1.is_equivalent_frame(10)
scf1 = SkyCoord(1*u.deg, 2*u.deg, frame='fk5')
scf2 = SkyCoord(1*u.deg, 2*u.deg, frame='fk5', equinox='J2005')
# obstime is *not* an FK5 attribute, but we still want scf1 and scf3 to come
# to come out different because they're part of SkyCoord
scf3 = SkyCoord(1*u.deg, 2*u.deg, frame='fk5', obstime='J2005')
assert scf1.is_equivalent_frame(scf1)
assert not scf1.is_equivalent_frame(sci1)
assert scf1.is_equivalent_frame(FK5())
assert not scf1.is_equivalent_frame(scf2)
assert scf2.is_equivalent_frame(FK5(equinox='J2005'))
assert not scf3.is_equivalent_frame(scf1)
assert not scf3.is_equivalent_frame(FK5(equinox='J2005'))
def test_equiv_skycoord_with_extra_attrs():
"""Regression test for #10658."""
# GCRS has a CartesianRepresentationAttribute called obsgeoloc
gcrs = GCRS(1*u.deg, 2*u.deg, obsgeoloc=CartesianRepresentation([1, 2, 3], unit=u.m))
# Create a SkyCoord where obsgeoloc tags along as an extra attribute
sc1 = SkyCoord(gcrs).transform_to(ICRS)
# Now create a SkyCoord with an equivalent frame but without the extra attribute
sc2 = SkyCoord(sc1.frame)
# The SkyCoords are therefore not equivalent, but check both directions
assert not sc1.is_equivalent_frame(sc2)
# This way around raised a TypeError which is fixed by #10658
assert not sc2.is_equivalent_frame(sc1)
def test_constellations():
# the actual test for accuracy is in test_funcs - this is just meant to make
# sure we get sensible answers
sc = SkyCoord(135*u.deg, 65*u.deg)
assert sc.get_constellation() == 'Ursa Major'
assert sc.get_constellation(short_name=True) == 'UMa'
scs = SkyCoord([135]*2*u.deg, [65]*2*u.deg)
npt.assert_equal(scs.get_constellation(), ['Ursa Major']*2)
npt.assert_equal(scs.get_constellation(short_name=True), ['UMa']*2)
@pytest.mark.remote_data
def test_constellations_with_nameresolve():
assert SkyCoord.from_name('And I').get_constellation(short_name=True) == 'And'
# you'd think "And ..." should be in Andromeda. But you'd be wrong.
assert SkyCoord.from_name('And VI').get_constellation() == 'Pegasus'
# maybe it's because And VI isn't really a galaxy?
assert SkyCoord.from_name('And XXII').get_constellation() == 'Pisces'
assert SkyCoord.from_name('And XXX').get_constellation() == 'Cassiopeia'
# ok maybe not
# ok, but at least some of the others do make sense...
assert SkyCoord.from_name('Coma Cluster').get_constellation(short_name=True) == 'Com'
assert SkyCoord.from_name('Orion Nebula').get_constellation() == 'Orion'
assert SkyCoord.from_name('Triangulum Galaxy').get_constellation() == 'Triangulum'
def test_getitem_representation():
"""
Make sure current representation survives __getitem__ even if different
from data representation.
"""
sc = SkyCoord([1, 1] * u.deg, [2, 2] * u.deg)
sc.representation_type = 'cartesian'
assert sc[0].representation_type is CartesianRepresentation
def test_spherical_offsets_to_api():
i00 = SkyCoord(0*u.arcmin, 0*u.arcmin, frame='icrs')
fk5 = SkyCoord(0*u.arcmin, 0*u.arcmin, frame='fk5')
with pytest.raises(ValueError):
# different frames should fail
i00.spherical_offsets_to(fk5)
i1deg = ICRS(1*u.deg, 1*u.deg)
dra, ddec = i00.spherical_offsets_to(i1deg)
assert_allclose(dra, 1*u.deg)
assert_allclose(ddec, 1*u.deg)
# make sure an abbreviated array-based version of the above also works
i00s = SkyCoord([0]*4*u.arcmin, [0]*4*u.arcmin, frame='icrs')
i01s = SkyCoord([0]*4*u.arcmin, np.arange(4)*u.arcmin, frame='icrs')
dra, ddec = i00s.spherical_offsets_to(i01s)
assert_allclose(dra, 0*u.arcmin)
assert_allclose(ddec, np.arange(4)*u.arcmin)
@pytest.mark.parametrize('frame', ['icrs', 'galactic'])
@pytest.mark.parametrize('comparison_data', [(0*u.arcmin, 1*u.arcmin),
(1*u.arcmin, 0*u.arcmin),
(1*u.arcmin, 1*u.arcmin)])
def test_spherical_offsets_roundtrip(frame, comparison_data):
i00 = SkyCoord(0*u.arcmin, 0*u.arcmin, frame=frame)
comparison = SkyCoord(*comparison_data, frame=frame)
dlon, dlat = i00.spherical_offsets_to(comparison)
assert_allclose(dlon, comparison.data.lon)
assert_allclose(dlat, comparison.data.lat)
i00_back = comparison.spherical_offsets_by(-dlon, -dlat)
# This reaches machine precision when only one component is changed, but for
# the third parametrized case (both lon and lat change), the transformation
# will have finite accuracy:
assert_allclose(i00_back.data.lon, i00.data.lon, atol=1e-10*u.rad)
assert_allclose(i00_back.data.lat, i00.data.lat, atol=1e-10*u.rad)
# Test roundtripping the other direction:
init_c = SkyCoord(40.*u.deg, 40.*u.deg, frame=frame)
new_c = init_c.spherical_offsets_by(3.534*u.deg, 2.2134*u.deg)
dlon, dlat = new_c.spherical_offsets_to(init_c)
back_c = new_c.spherical_offsets_by(dlon, dlat)
assert init_c.separation(back_c) < 1e-10*u.deg
def test_frame_attr_changes():
"""
This tests the case where a frame is added with a new frame attribute after
a SkyCoord has been created. This is necessary because SkyCoords get the
attributes set at creation time, but the set of attributes can change as
frames are added or removed from the transform graph. This makes sure that
everything continues to work consistently.
"""
sc_before = SkyCoord(1*u.deg, 2*u.deg, frame='icrs')
assert 'fakeattr' not in dir(sc_before)
class FakeFrame(BaseCoordinateFrame):
fakeattr = Attribute()
# doesn't matter what this does as long as it just puts the frame in the
# transform graph
transset = (ICRS, FakeFrame, lambda c, f: c)
frame_transform_graph.add_transform(*transset)
try:
assert 'fakeattr' in dir(sc_before)
assert sc_before.fakeattr is None
sc_after1 = SkyCoord(1*u.deg, 2*u.deg, frame='icrs')
assert 'fakeattr' in dir(sc_after1)
assert sc_after1.fakeattr is None
sc_after2 = SkyCoord(1*u.deg, 2*u.deg, frame='icrs', fakeattr=1)
assert sc_after2.fakeattr == 1
finally:
frame_transform_graph.remove_transform(*transset)
assert 'fakeattr' not in dir(sc_before)
assert 'fakeattr' not in dir(sc_after1)
assert 'fakeattr' not in dir(sc_after2)
def test_cache_clear_sc():
from astropy.coordinates import SkyCoord
i = SkyCoord(1*u.deg, 2*u.deg)
# Add an in frame units version of the rep to the cache.
repr(i)
assert len(i.cache['representation']) == 2
i.cache.clear()
assert len(i.cache['representation']) == 0
def test_set_attribute_exceptions():
"""Ensure no attrbute for any frame can be set directly.
Though it is fine if the current frame does not have it."""
sc = SkyCoord(1.*u.deg, 2.*u.deg, frame='fk5')
assert hasattr(sc.frame, 'equinox')
with pytest.raises(AttributeError):
sc.equinox = 'B1950'
assert sc.relative_humidity is None
sc.relative_humidity = 0.5
assert sc.relative_humidity == 0.5
assert not hasattr(sc.frame, 'relative_humidity')
def test_extra_attributes():
"""Ensure any extra attributes are dealt with correctly.
Regression test against #5743.
"""
obstime_string = ['2017-01-01T00:00', '2017-01-01T00:10']
obstime = Time(obstime_string)
sc = SkyCoord([5, 10], [20, 30], unit=u.deg, obstime=obstime_string)
assert not hasattr(sc.frame, 'obstime')
assert type(sc.obstime) is Time
assert sc.obstime.shape == (2,)
assert np.all(sc.obstime == obstime)
# ensure equivalency still works for more than one obstime.
assert sc.is_equivalent_frame(sc)
sc_1 = sc[1]
assert sc_1.obstime == obstime[1]
# Transforming to FK4 should use sc.obstime.
sc_fk4 = sc.transform_to('fk4')
assert np.all(sc_fk4.frame.obstime == obstime)
# And transforming back should not loose it.
sc2 = sc_fk4.transform_to('icrs')
assert not hasattr(sc2.frame, 'obstime')
assert np.all(sc2.obstime == obstime)
# Ensure obstime get taken from the SkyCoord if passed in directly.
# (regression test for #5749).
sc3 = SkyCoord([0., 1.], [2., 3.], unit='deg', frame=sc)
assert np.all(sc3.obstime == obstime)
# Finally, check that we can delete such attributes.
del sc3.obstime
assert sc3.obstime is None
def test_apply_space_motion():
# use this 12 year period because it's a multiple of 4 to avoid the quirks
# of leap years while having 2 leap seconds in it
t1 = Time('2000-01-01T00:00')
t2 = Time('2012-01-01T00:00')
# Check a very simple case first:
frame = ICRS(ra=10.*u.deg, dec=0*u.deg,
distance=10.*u.pc,
pm_ra_cosdec=0.1*u.deg/u.yr,
pm_dec=0*u.mas/u.yr,
radial_velocity=0*u.km/u.s)
# Cases that should work (just testing input for now):
c1 = SkyCoord(frame, obstime=t1, pressure=101*u.kPa)
with pytest.warns(ErfaWarning, match='ERFA function "pmsafe" yielded .*'):
# warning raised due to high PM chosen above
applied1 = c1.apply_space_motion(new_obstime=t2)
applied2 = c1.apply_space_motion(dt=12*u.year)
assert isinstance(applied1.frame, c1.frame.__class__)
assert isinstance(applied2.frame, c1.frame.__class__)
assert_allclose(applied1.ra, applied2.ra)
assert_allclose(applied1.pm_ra_cosdec, applied2.pm_ra_cosdec)
assert_allclose(applied1.dec, applied2.dec)
assert_allclose(applied1.distance, applied2.distance)
# ensure any frame attributes that were there before get passed through
assert applied1.pressure == c1.pressure
# there were 2 leap seconds between 2000 and 2010, so the difference in
# the two forms of time evolution should be ~2 sec
adt = np.abs(applied2.obstime - applied1.obstime)
assert 1.9*u.second < adt.to(u.second) < 2.1*u.second
c2 = SkyCoord(frame)
with pytest.warns(ErfaWarning, match='ERFA function "pmsafe" yielded .*'):
# warning raised due to high PM chosen above
applied3 = c2.apply_space_motion(dt=6*u.year)
assert isinstance(applied3.frame, c1.frame.__class__)
assert applied3.obstime is None
# this should *not* be .6 deg due to space-motion on a sphere, but it
# should be fairly close
assert 0.5*u.deg < applied3.ra-c1.ra < .7*u.deg
# the two cases should only match somewhat due to it being space motion, but
# they should be at least this close
assert quantity_allclose(applied1.ra-c1.ra, (applied3.ra-c1.ra)*2, atol=1e-3*u.deg)
# but *not* this close
assert not quantity_allclose(applied1.ra-c1.ra, (applied3.ra-c1.ra)*2, atol=1e-4*u.deg)
with pytest.raises(ValueError):
c2.apply_space_motion(new_obstime=t2)
def test_custom_frame_skycoord():
# also regression check for the case from #7069
class BlahBleeBlopFrame(BaseCoordinateFrame):
default_representation = SphericalRepresentation
# without a differential, SkyCoord creation fails
# default_differential = SphericalDifferential
_frame_specific_representation_info = {
'spherical': [
RepresentationMapping('lon', 'lon', 'recommended'),
RepresentationMapping('lat', 'lat', 'recommended'),
RepresentationMapping('distance', 'radius', 'recommended')
]
}
SkyCoord(lat=1*u.deg, lon=2*u.deg, frame=BlahBleeBlopFrame)
def test_user_friendly_pm_error():
"""
This checks that a more user-friendly error message is raised for the user
if they pass, e.g., pm_ra instead of pm_ra_cosdec
"""
with pytest.raises(ValueError) as e:
SkyCoord(ra=150*u.deg, dec=-11*u.deg,
pm_ra=100*u.mas/u.yr, pm_dec=10*u.mas/u.yr)
assert 'pm_ra_cosdec' in str(e.value)
with pytest.raises(ValueError) as e:
SkyCoord(l=150*u.deg, b=-11*u.deg,
pm_l=100*u.mas/u.yr, pm_b=10*u.mas/u.yr,
frame='galactic')
assert 'pm_l_cosb' in str(e.value)
# The special error should not turn on here:
with pytest.raises(ValueError) as e:
SkyCoord(x=1*u.pc, y=2*u.pc, z=3*u.pc,
pm_ra=100*u.mas/u.yr, pm_dec=10*u.mas/u.yr,
representation_type='cartesian')
assert 'pm_ra_cosdec' not in str(e.value)
def test_contained_by():
"""
Test Skycoord.contained(wcs,image)
"""
header = """
WCSAXES = 2 / Number of coordinate axes
CRPIX1 = 1045.0 / Pixel coordinate of reference point
CRPIX2 = 1001.0 / Pixel coordinate of reference point
PC1_1 = -0.00556448550786 / Coordinate transformation matrix element
PC1_2 = -0.001042120133257 / Coordinate transformation matrix element
PC2_1 = 0.001181477028705 / Coordinate transformation matrix element
PC2_2 = -0.005590809742987 / Coordinate transformation matrix element
CDELT1 = 1.0 / [deg] Coordinate increment at reference point
CDELT2 = 1.0 / [deg] Coordinate increment at reference point
CUNIT1 = 'deg' / Units of coordinate increment and value
CUNIT2 = 'deg' / Units of coordinate increment and value
CTYPE1 = 'RA---TAN' / TAN (gnomonic) projection + SIP distortions
CTYPE2 = 'DEC--TAN' / TAN (gnomonic) projection + SIP distortions
CRVAL1 = 250.34971683647 / [deg] Coordinate value at reference point
CRVAL2 = 2.2808772582495 / [deg] Coordinate value at reference point
LONPOLE = 180.0 / [deg] Native longitude of celestial pole
LATPOLE = 2.2808772582495 / [deg] Native latitude of celestial pole
RADESYS = 'ICRS' / Equatorial coordinate system
MJD-OBS = 58612.339199259 / [d] MJD of observation matching DATE-OBS
DATE-OBS= '2019-05-09T08:08:26.816Z' / ISO-8601 observation date matching MJD-OB
NAXIS = 2 / NAXIS
NAXIS1 = 2136 / length of first array dimension
NAXIS2 = 2078 / length of second array dimension
"""
header = fits.Header.fromstring(header.strip(),'\n')
test_wcs = WCS(header)
coord = SkyCoord(254,2,unit='deg')
assert coord.contained_by(test_wcs) == True
coord = SkyCoord(240,2,unit='deg')
assert coord.contained_by(test_wcs) == False
img = np.zeros((2136,2078))
coord = SkyCoord(250,2,unit='deg')
assert coord.contained_by(test_wcs, img) == True
coord = SkyCoord(240,2,unit='deg')
assert coord.contained_by(test_wcs, img) == False
ra = np.array([254.2, 254.1])
dec = np.array([2, 12.1])
coords = SkyCoord(ra, dec, unit='deg')
assert np.all(test_wcs.footprint_contains(coords) == np.array([True, False]))
def test_none_differential_type():
"""
This is a regression test for #8021
"""
from astropy.coordinates import BaseCoordinateFrame
class MockHeliographicStonyhurst(BaseCoordinateFrame):
default_representation = SphericalRepresentation
frame_specific_representation_info = {
SphericalRepresentation: [RepresentationMapping(reprname='lon',
framename='lon',
defaultunit=u.deg),
RepresentationMapping(reprname='lat',
framename='lat',
defaultunit=u.deg),
RepresentationMapping(reprname='distance',
framename='radius',
defaultunit=None)]
}
fr = MockHeliographicStonyhurst(lon=1*u.deg, lat=2*u.deg, radius=10*u.au)
SkyCoord(0*u.deg, fr.lat, fr.radius, frame=fr) # this was the failure
def test_multiple_aliases():
# Define a frame with multiple aliases
class MultipleAliasesFrame(BaseCoordinateFrame):
name = ['alias_1', 'alias_2']
default_representation = SphericalRepresentation
# Register a transform, which adds the aliases to the transform graph
tfun = lambda c, f: f.__class__(lon=c.lon, lat=c.lat)
ftrans = FunctionTransform(tfun, MultipleAliasesFrame, MultipleAliasesFrame,
register_graph=frame_transform_graph)
coord = SkyCoord(lon=1*u.deg, lat=2*u.deg, frame=MultipleAliasesFrame)
# Test attribute-style access returns self (not a copy)
assert coord.alias_1 is coord
assert coord.alias_2 is coord
# Test for aliases in __dir__()
assert 'alias_1' in coord.__dir__()
assert 'alias_2' in coord.__dir__()
# Test transform_to() calls
assert isinstance(coord.transform_to('alias_1').frame, MultipleAliasesFrame)
assert isinstance(coord.transform_to('alias_2').frame, MultipleAliasesFrame)
ftrans.unregister(frame_transform_graph)
@pytest.mark.parametrize("kwargs, error_message", [
(
{"ra": 1, "dec": 1, "distance": 1 * u.pc, "unit": "deg"},
r"Unit 'deg' \(angle\) could not be applied to 'distance'. ",
),
(
{"rho": 1 * u.m, "phi": 1, "z": 1 * u.m, "unit": "deg", "representation_type": "cylindrical"},
r"Unit 'deg' \(angle\) could not be applied to 'rho'. ",
),
])
def test_passing_inconsistent_coordinates_and_units_raises_helpful_error(kwargs, error_message):
# https://github.com/astropy/astropy/issues/10725
with pytest.raises(ValueError, match=error_message):
SkyCoord(**kwargs)
@pytest.mark.skipif(not HAS_SCIPY, reason="Requires scipy.")
def test_match_to_catalog_3d_and_sky():
# Test for issue #5857. See PR #11449
cfk5_default = SkyCoord([1, 2, 3, 4] * u.degree, [0, 0, 0, 0] * u.degree, distance=[1, 1, 1.5, 1] * u.kpc,
frame='fk5')
cfk5_J1950 = cfk5_default.transform_to(FK5(equinox='J1950'))
idx, angle, quantity = cfk5_J1950.match_to_catalog_3d(cfk5_default)
npt.assert_array_equal(idx, [0, 1, 2, 3])
assert_allclose(angle, 0*u.deg, atol=2e-15*u.deg, rtol=0)
assert_allclose(quantity, 0*u.kpc, atol=1e-15*u.kpc, rtol=0)
idx, angle, distance = cfk5_J1950.match_to_catalog_sky(cfk5_default)
npt.assert_array_equal(idx, [0, 1, 2, 3])
assert_allclose(angle, 0 * u.deg, atol=2e-15*u.deg, rtol=0)
assert_allclose(distance, 0*u.kpc, atol=2e-15*u.kpc, rtol=0)
|
d7c480eaa04c36659ff1c2671a510c6fe7209902f0de9496bdbd8a67cfa2a193 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This includes tests for the Distance class and related calculations
"""
import pytest
import numpy as np
from numpy import testing as npt
from astropy import units as u
from astropy.units import allclose as quantity_allclose
from astropy.coordinates import Longitude, Latitude, Distance, CartesianRepresentation
from astropy.coordinates.builtin_frames import ICRS, Galactic
from astropy.utils.exceptions import AstropyWarning
from astropy.utils.compat.optional_deps import HAS_SCIPY # noqa
def test_distances():
"""
Tests functionality for Coordinate class distances and cartesian
transformations.
"""
'''
Distances can also be specified, and allow for a full 3D definition of a
coordinate.
'''
# try all the different ways to initialize a Distance
distance = Distance(12, u.parsec)
Distance(40, unit=u.au)
Distance(value=5, unit=u.kpc)
# need to provide a unit
with pytest.raises(u.UnitsError):
Distance(12)
with pytest.raises(ValueError, match='none of `value`, `z`, `distmod`,'):
Distance(unit=u.km)
# standard units are pre-defined
npt.assert_allclose(distance.lyr, 39.138765325702551)
npt.assert_allclose(distance.km, 370281309776063.0)
# Coordinate objects can be assigned a distance object, giving them a full
# 3D position
c = Galactic(l=158.558650*u.degree, b=-43.350066*u.degree,
distance=Distance(12, u.parsec))
assert quantity_allclose(c.distance, 12 * u.pc)
# or initialize distances via redshifts - this is actually tested in the
# function below that checks for scipy. This is kept here as an example
# c.distance = Distance(z=0.2) # uses current cosmology
# with whatever your preferred cosmology may be
# c.distance = Distance(z=0.2, cosmology=WMAP5)
# Coordinate objects can be initialized with a distance using special
# syntax
c1 = Galactic(l=158.558650*u.deg, b=-43.350066*u.deg, distance=12 * u.kpc)
# Coordinate objects can be instantiated with cartesian coordinates
# Internally they will immediately be converted to two angles + a distance
cart = CartesianRepresentation(x=2 * u.pc, y=4 * u.pc, z=8 * u.pc)
c2 = Galactic(cart)
sep12 = c1.separation_3d(c2)
# returns a *3d* distance between the c1 and c2 coordinates
# not that this does *not*
assert isinstance(sep12, Distance)
npt.assert_allclose(sep12.pc, 12005.784163916317, 10)
'''
All spherical coordinate systems with distances can be converted to
cartesian coordinates.
'''
cartrep2 = c2.cartesian
assert isinstance(cartrep2.x, u.Quantity)
npt.assert_allclose(cartrep2.x.value, 2)
npt.assert_allclose(cartrep2.y.value, 4)
npt.assert_allclose(cartrep2.z.value, 8)
# with no distance, the unit sphere is assumed when converting to cartesian
c3 = Galactic(l=158.558650*u.degree, b=-43.350066*u.degree, distance=None)
unitcart = c3.cartesian
npt.assert_allclose(((unitcart.x**2 + unitcart.y**2 +
unitcart.z**2)**0.5).value, 1.0)
# TODO: choose between these when CartesianRepresentation gets a definite
# decision on whether or not it gets __add__
#
# CartesianRepresentation objects can be added and subtracted, which are
# vector/elementwise they can also be given as arguments to a coordinate
# system
# csum = ICRS(c1.cartesian + c2.cartesian)
csumrep = CartesianRepresentation(c1.cartesian.xyz + c2.cartesian.xyz)
csum = ICRS(csumrep)
npt.assert_allclose(csumrep.x.value, -8.12016610185)
npt.assert_allclose(csumrep.y.value, 3.19380597435)
npt.assert_allclose(csumrep.z.value, -8.2294483707)
npt.assert_allclose(csum.ra.degree, 158.529401774)
npt.assert_allclose(csum.dec.degree, -43.3235825777)
npt.assert_allclose(csum.distance.kpc, 11.9942200501)
@pytest.mark.skipif('not HAS_SCIPY')
def test_distances_scipy():
"""
The distance-related tests that require scipy due to the cosmology
module needing scipy integration routines
"""
from astropy.cosmology import WMAP5
# try different ways to initialize a Distance
d4 = Distance(z=0.23) # uses default cosmology - as of writing, WMAP7
npt.assert_allclose(d4.z, 0.23, rtol=1e-8)
d5 = Distance(z=0.23, cosmology=WMAP5)
npt.assert_allclose(d5.compute_z(WMAP5), 0.23, rtol=1e-8)
d6 = Distance(z=0.23, cosmology=WMAP5, unit=u.km)
npt.assert_allclose(d6.value, 3.5417046898762366e+22)
with pytest.raises(ValueError, match='a `cosmology` was given but `z`'):
Distance(parallax=1*u.mas, cosmology=WMAP5)
# Regression test for #12531
with pytest.raises(ValueError, match='more than one'):
Distance(z=0.23, parallax=1*u.mas)
# vectors! regression test for #11949
d4 = Distance(z=[0.23, 0.45]) # as of writing, Planck18
npt.assert_allclose(d4.z, [0.23, 0.45], rtol=1e-8)
def test_distance_change():
ra = Longitude("4:08:15.162342", unit=u.hour)
dec = Latitude("-41:08:15.162342", unit=u.degree)
c1 = ICRS(ra, dec, Distance(1, unit=u.kpc))
oldx = c1.cartesian.x.value
assert (oldx - 0.35284083171901953) < 1e-10
# first make sure distances are immutable
with pytest.raises(AttributeError):
c1.distance = Distance(2, unit=u.kpc)
# now x should increase with a bigger distance increases
c2 = ICRS(ra, dec, Distance(2, unit=u.kpc))
assert c2.cartesian.x.value == oldx * 2
def test_distance_is_quantity():
"""
test that distance behaves like a proper quantity
"""
Distance(2 * u.kpc)
d = Distance([2, 3.1], u.kpc)
assert d.shape == (2,)
a = d.view(np.ndarray)
q = d.view(u.Quantity)
a[0] = 1.2
q.value[1] = 5.4
assert d[0].value == 1.2
assert d[1].value == 5.4
q = u.Quantity(d, copy=True)
q.value[1] = 0
assert q.value[1] == 0
assert d.value[1] != 0
# regression test against #2261
d = Distance([2 * u.kpc, 250. * u.pc])
assert d.unit is u.kpc
assert np.all(d.value == np.array([2., 0.25]))
def test_distmod():
d = Distance(10, u.pc)
assert d.distmod.value == 0
d = Distance(distmod=20)
assert d.distmod.value == 20
assert d.kpc == 100
d = Distance(distmod=-1., unit=u.au)
npt.assert_allclose(d.value, 1301442.9440836983)
with pytest.raises(ValueError):
d = Distance(value=d, distmod=20)
with pytest.raises(ValueError):
d = Distance(z=.23, distmod=20)
# check the Mpc/kpc/pc behavior
assert Distance(distmod=1).unit == u.pc
assert Distance(distmod=11).unit == u.kpc
assert Distance(distmod=26).unit == u.Mpc
assert Distance(distmod=-21).unit == u.AU
# if an array, uses the mean of the log of the distances
assert Distance(distmod=[1, 11, 26]).unit == u.kpc
def test_parallax():
d = Distance(parallax=1*u.arcsecond)
assert d.pc == 1.
with pytest.raises(ValueError):
d = Distance(15*u.pc, parallax=20*u.milliarcsecond)
with pytest.raises(ValueError):
d = Distance(parallax=20*u.milliarcsecond, distmod=20)
# array
plx = [1, 10, 100.]*u.mas
d = Distance(parallax=plx)
assert quantity_allclose(d.pc, [1000., 100., 10.])
assert quantity_allclose(plx, d.parallax)
# check behavior for negative parallax
with pytest.raises(ValueError):
Distance(parallax=-1 * u.mas)
with pytest.raises(ValueError):
Distance(parallax=[10, 1, -1] * u.mas)
with pytest.warns(AstropyWarning):
Distance(parallax=-1 * u.mas, allow_negative=True)
with pytest.warns(AstropyWarning):
Distance(parallax=[10, 1, -1] * u.mas, allow_negative=True)
# Regression test for #12569; `unit` was ignored if `parallax` was given.
d = Distance(parallax=1*u.mas, unit=u.kpc)
assert d.value == 1.
assert d.unit is u.kpc
def test_distance_in_coordinates():
"""
test that distances can be created from quantities and that cartesian
representations come out right
"""
ra = Longitude("4:08:15.162342", unit=u.hour)
dec = Latitude("-41:08:15.162342", unit=u.degree)
coo = ICRS(ra, dec, distance=2*u.kpc)
cart = coo.cartesian
assert isinstance(cart.xyz, u.Quantity)
def test_negative_distance():
""" Test optional kwarg allow_negative """
with pytest.raises(ValueError):
Distance([-2, 3.1], u.kpc)
with pytest.raises(ValueError):
Distance([-2, -3.1], u.kpc)
with pytest.raises(ValueError):
Distance(-2, u.kpc)
d = Distance(-2, u.kpc, allow_negative=True)
assert d.value == -2
def test_distance_comparison():
"""Ensure comparisons of distances work (#2206, #2250)"""
a = Distance(15*u.kpc)
b = Distance(15*u.kpc)
assert a == b
c = Distance(1.*u.Mpc)
assert a < c
def test_distance_to_quantity_when_not_units_of_length():
"""Any operation that leaves units other than those of length
should turn a distance into a quantity (#2206, #2250)"""
d = Distance(15*u.kpc)
twice = 2.*d
assert isinstance(twice, Distance)
area = 4.*np.pi*d**2
assert area.unit.is_equivalent(u.m**2)
assert not isinstance(area, Distance)
assert type(area) is u.Quantity
def test_distance_nan():
# Check that giving NaNs to Distance doesn't emit a warning
Distance([0, np.nan, 1] * u.m)
|
13878be49cd83de1e3391c897e554f97d247d1fc0500cdb106ce02831df44162 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test initialization of angles not already covered by the API tests"""
import pickle
import pytest
import numpy as np
from astropy.coordinates.earth import EarthLocation, ELLIPSOIDS
from astropy.coordinates.angles import Longitude, Latitude
from astropy.units import allclose as quantity_allclose
from astropy import units as u
from astropy.time import Time
from astropy import constants
from astropy.coordinates.name_resolve import NameResolveError
def allclose_m14(a, b, rtol=1.e-14, atol=None):
if atol is None:
atol = 1.e-14 * getattr(a, 'unit', 1)
return quantity_allclose(a, b, rtol, atol)
def allclose_m8(a, b, rtol=1.e-8, atol=None):
if atol is None:
atol = 1.e-8 * getattr(a, 'unit', 1)
return quantity_allclose(a, b, rtol, atol)
def isclose_m14(val, ref):
return np.array([allclose_m14(v, r) for (v, r) in zip(val, ref)])
def isclose_m8(val, ref):
return np.array([allclose_m8(v, r) for (v, r) in zip(val, ref)])
def vvd(val, valok, dval, func, test, status):
"""Mimic routine of erfa/src/t_erfa_c.c (to help copy & paste)"""
assert quantity_allclose(val, valok * val.unit, atol=dval * val.unit)
def test_gc2gd():
"""Test that we reproduce erfa/src/t_erfa_c.c t_gc2gd"""
x, y, z = (2e6, 3e6, 5.244e6)
status = 0 # help for copy & paste of vvd
location = EarthLocation.from_geocentric(x, y, z, u.m)
e, p, h = location.to_geodetic('WGS84')
e, p, h = e.to(u.radian), p.to(u.radian), h.to(u.m)
vvd(e, 0.98279372324732907, 1e-14, "eraGc2gd", "e2", status)
vvd(p, 0.97160184820607853, 1e-14, "eraGc2gd", "p2", status)
vvd(h, 331.41731754844348, 1e-8, "eraGc2gd", "h2", status)
e, p, h = location.to_geodetic('GRS80')
e, p, h = e.to(u.radian), p.to(u.radian), h.to(u.m)
vvd(e, 0.98279372324732907, 1e-14, "eraGc2gd", "e2", status)
vvd(p, 0.97160184820607853, 1e-14, "eraGc2gd", "p2", status)
vvd(h, 331.41731754844348, 1e-8, "eraGc2gd", "h2", status)
e, p, h = location.to_geodetic('WGS72')
e, p, h = e.to(u.radian), p.to(u.radian), h.to(u.m)
vvd(e, 0.98279372324732907, 1e-14, "eraGc2gd", "e3", status)
vvd(p, 0.97160181811015119, 1e-14, "eraGc2gd", "p3", status)
vvd(h, 333.27707261303181, 1e-8, "eraGc2gd", "h3", status)
def test_gd2gc():
"""Test that we reproduce erfa/src/t_erfa_c.c t_gd2gc"""
e = 3.1 * u.rad
p = -0.5 * u.rad
h = 2500.0 * u.m
status = 0 # help for copy & paste of vvd
location = EarthLocation.from_geodetic(e, p, h, ellipsoid='WGS84')
xyz = tuple(v.to(u.m) for v in location.to_geocentric())
vvd(xyz[0], -5599000.5577049947, 1e-7, "eraGd2gc", "0/1", status)
vvd(xyz[1], 233011.67223479203, 1e-7, "eraGd2gc", "1/1", status)
vvd(xyz[2], -3040909.4706983363, 1e-7, "eraGd2gc", "2/1", status)
location = EarthLocation.from_geodetic(e, p, h, ellipsoid='GRS80')
xyz = tuple(v.to(u.m) for v in location.to_geocentric())
vvd(xyz[0], -5599000.5577260984, 1e-7, "eraGd2gc", "0/2", status)
vvd(xyz[1], 233011.6722356703, 1e-7, "eraGd2gc", "1/2", status)
vvd(xyz[2], -3040909.4706095476, 1e-7, "eraGd2gc", "2/2", status)
location = EarthLocation.from_geodetic(e, p, h, ellipsoid='WGS72')
xyz = tuple(v.to(u.m) for v in location.to_geocentric())
vvd(xyz[0], -5598998.7626301490, 1e-7, "eraGd2gc", "0/3", status)
vvd(xyz[1], 233011.5975297822, 1e-7, "eraGd2gc", "1/3", status)
vvd(xyz[2], -3040908.6861467111, 1e-7, "eraGd2gc", "2/3", status)
class TestInput():
def setup(self):
self.lon = Longitude([0., 45., 90., 135., 180., -180, -90, -45], u.deg,
wrap_angle=180*u.deg)
self.lat = Latitude([+0., 30., 60., +90., -90., -60., -30., 0.], u.deg)
self.h = u.Quantity([0.1, 0.5, 1.0, -0.5, -1.0, +4.2, -11., -.1], u.m)
self.location = EarthLocation.from_geodetic(self.lon, self.lat, self.h)
self.x, self.y, self.z = self.location.to_geocentric()
def test_default_ellipsoid(self):
assert self.location.ellipsoid == EarthLocation._ellipsoid
def test_geo_attributes(self):
assert all(np.all(_1 == _2)
for _1, _2 in zip(self.location.geodetic,
self.location.to_geodetic()))
assert all(np.all(_1 == _2)
for _1, _2 in zip(self.location.geocentric,
self.location.to_geocentric()))
def test_attribute_classes(self):
"""Test that attribute classes are correct (and not EarthLocation)"""
assert type(self.location.x) is u.Quantity
assert type(self.location.y) is u.Quantity
assert type(self.location.z) is u.Quantity
assert type(self.location.lon) is Longitude
assert type(self.location.lat) is Latitude
assert type(self.location.height) is u.Quantity
def test_input(self):
"""Check input is parsed correctly"""
# units of length should be assumed geocentric
geocentric = EarthLocation(self.x, self.y, self.z)
assert np.all(geocentric == self.location)
geocentric2 = EarthLocation(self.x.value, self.y.value, self.z.value,
self.x.unit)
assert np.all(geocentric2 == self.location)
geodetic = EarthLocation(self.lon, self.lat, self.h)
assert np.all(geodetic == self.location)
geodetic2 = EarthLocation(self.lon.to_value(u.degree),
self.lat.to_value(u.degree),
self.h.to_value(u.m))
assert np.all(geodetic2 == self.location)
geodetic3 = EarthLocation(self.lon, self.lat)
assert allclose_m14(geodetic3.lon.value,
self.location.lon.value)
assert allclose_m14(geodetic3.lat.value,
self.location.lat.value)
assert not np.any(isclose_m14(geodetic3.height.value,
self.location.height.value))
geodetic4 = EarthLocation(self.lon, self.lat, self.h[-1])
assert allclose_m14(geodetic4.lon.value,
self.location.lon.value)
assert allclose_m14(geodetic4.lat.value,
self.location.lat.value)
assert allclose_m14(geodetic4.height[-1].value,
self.location.height[-1].value)
assert not np.any(isclose_m14(geodetic4.height[:-1].value,
self.location.height[:-1].value))
# check length unit preservation
geocentric5 = EarthLocation(self.x, self.y, self.z, u.pc)
assert geocentric5.unit is u.pc
assert geocentric5.x.unit is u.pc
assert geocentric5.height.unit is u.pc
assert allclose_m14(geocentric5.x.to_value(self.x.unit), self.x.value)
geodetic5 = EarthLocation(self.lon, self.lat, self.h.to(u.pc))
assert geodetic5.unit is u.pc
assert geodetic5.x.unit is u.pc
assert geodetic5.height.unit is u.pc
assert allclose_m14(geodetic5.x.to_value(self.x.unit), self.x.value)
def test_invalid_input(self):
"""Check invalid input raises exception"""
# incomprehensible by either raises TypeError
with pytest.raises(TypeError):
EarthLocation(self.lon, self.y, self.z)
# wrong units
with pytest.raises(u.UnitsError):
EarthLocation.from_geocentric(self.lon, self.lat, self.lat)
# inconsistent units
with pytest.raises(u.UnitsError):
EarthLocation.from_geocentric(self.h, self.lon, self.lat)
# floats without a unit
with pytest.raises(TypeError):
EarthLocation.from_geocentric(self.x.value, self.y.value,
self.z.value)
# inconsistent shape
with pytest.raises(ValueError):
EarthLocation.from_geocentric(self.x, self.y, self.z[:5])
# inconsistent units
with pytest.raises(u.UnitsError):
EarthLocation.from_geodetic(self.x, self.y, self.z)
# inconsistent shape
with pytest.raises(ValueError):
EarthLocation.from_geodetic(self.lon, self.lat, self.h[:5])
def test_slicing(self):
# test on WGS72 location, so we can check the ellipsoid is passed on
locwgs72 = EarthLocation.from_geodetic(self.lon, self.lat, self.h,
ellipsoid='WGS72')
loc_slice1 = locwgs72[4]
assert isinstance(loc_slice1, EarthLocation)
assert loc_slice1.unit is locwgs72.unit
assert loc_slice1.ellipsoid == locwgs72.ellipsoid == 'WGS72'
assert not loc_slice1.shape
with pytest.raises(TypeError):
loc_slice1[0]
with pytest.raises(IndexError):
len(loc_slice1)
loc_slice2 = locwgs72[4:6]
assert isinstance(loc_slice2, EarthLocation)
assert len(loc_slice2) == 2
assert loc_slice2.unit is locwgs72.unit
assert loc_slice2.ellipsoid == locwgs72.ellipsoid
assert loc_slice2.shape == (2,)
loc_x = locwgs72['x']
assert type(loc_x) is u.Quantity
assert loc_x.shape == locwgs72.shape
assert loc_x.unit is locwgs72.unit
def test_invalid_ellipsoid(self):
# unknown ellipsoid
with pytest.raises(ValueError):
EarthLocation.from_geodetic(self.lon, self.lat, self.h,
ellipsoid='foo')
with pytest.raises(TypeError):
EarthLocation(self.lon, self.lat, self.h, ellipsoid='foo')
with pytest.raises(ValueError):
self.location.ellipsoid = 'foo'
with pytest.raises(ValueError):
self.location.to_geodetic('foo')
@pytest.mark.parametrize('ellipsoid', ELLIPSOIDS)
def test_ellipsoid(self, ellipsoid):
"""Test that different ellipsoids are understood, and differ"""
# check that heights differ for different ellipsoids
# need different tolerance, since heights are relative to ~6000 km
lon, lat, h = self.location.to_geodetic(ellipsoid)
if ellipsoid == self.location.ellipsoid:
assert allclose_m8(h.value, self.h.value)
else:
# Some heights are very similar for some; some lon, lat identical.
assert not np.all(isclose_m8(h.value, self.h.value))
# given lon, lat, height, check that x,y,z differ
location = EarthLocation.from_geodetic(self.lon, self.lat, self.h,
ellipsoid=ellipsoid)
if ellipsoid == self.location.ellipsoid:
assert allclose_m14(location.z.value, self.z.value)
else:
assert not np.all(isclose_m14(location.z.value, self.z.value))
def test_to_value(self):
loc = self.location
loc_ndarray = loc.view(np.ndarray)
assert np.all(loc.value == loc_ndarray)
loc2 = self.location.to(u.km)
loc2_ndarray = np.empty_like(loc_ndarray)
for coo in 'x', 'y', 'z':
loc2_ndarray[coo] = loc_ndarray[coo] / 1000.
assert np.all(loc2.value == loc2_ndarray)
loc2_value = self.location.to_value(u.km)
assert np.all(loc2_value == loc2_ndarray)
def test_pickling():
"""Regression test against #4304."""
el = EarthLocation(0.*u.m, 6000*u.km, 6000*u.km)
s = pickle.dumps(el)
el2 = pickle.loads(s)
assert el == el2
def test_repr_latex():
"""
Regression test for issue #4542
"""
somelocation = EarthLocation(lon='149:3:57.9', lat='-31:16:37.3')
somelocation._repr_latex_()
somelocation2 = EarthLocation(lon=[1., 2.]*u.deg, lat=[-1., 9.]*u.deg)
somelocation2._repr_latex_()
@pytest.mark.remote_data
# TODO: this parametrize should include a second option with a valid Google API
# key. For example, we should make an API key for Astropy, and add it to GitHub Actions
# as an environment variable (for security).
@pytest.mark.parametrize('google_api_key', [None])
def test_of_address(google_api_key):
NYC_lon = -74.0 * u.deg
NYC_lat = 40.7 * u.deg
# ~10 km tolerance to address difference between OpenStreetMap and Google
# for "New York, NY". This doesn't matter in practice because this test is
# only used to verify that the query succeeded, not that the returned
# position is precise.
NYC_tol = 0.1 * u.deg
# just a location
try:
loc = EarthLocation.of_address("New York, NY")
except NameResolveError as e:
# API limit might surface even here in CI.
if 'unknown failure with' not in str(e):
pytest.xfail(str(e))
else:
assert quantity_allclose(loc.lat, NYC_lat, atol=NYC_tol)
assert quantity_allclose(loc.lon, NYC_lon, atol=NYC_tol)
assert np.allclose(loc.height.value, 0.)
# Put this one here as buffer to get around Google map API limit per sec.
# no match: This always raises NameResolveError
with pytest.raises(NameResolveError):
EarthLocation.of_address("lkjasdflkja")
if google_api_key is not None:
# a location and height
try:
loc = EarthLocation.of_address("New York, NY", get_height=True)
except NameResolveError as e:
# Buffer above sometimes insufficient to get around API limit but
# we also do not want to drag things out with time.sleep(0.195),
# where 0.195 was empirically determined on some physical machine.
pytest.xfail(str(e.value))
else:
assert quantity_allclose(loc.lat, NYC_lat, atol=NYC_tol)
assert quantity_allclose(loc.lon, NYC_lon, atol=NYC_tol)
assert quantity_allclose(loc.height, 10.438*u.meter, atol=1.*u.cm)
def test_geodetic_tuple():
lat = 2*u.deg
lon = 10*u.deg
height = 100*u.m
el = EarthLocation.from_geodetic(lat=lat, lon=lon, height=height)
res1 = el.to_geodetic()
res2 = el.geodetic
assert res1.lat == res2.lat and quantity_allclose(res1.lat, lat)
assert res1.lon == res2.lon and quantity_allclose(res1.lon, lon)
assert res1.height == res2.height and quantity_allclose(res1.height, height)
def test_gravitational_redshift():
someloc = EarthLocation(lon=-87.7*u.deg, lat=37*u.deg)
sometime = Time('2017-8-21 18:26:40')
zg0 = someloc.gravitational_redshift(sometime)
# should be of order ~few mm/s change per week
zg_week = someloc.gravitational_redshift(sometime + 7 * u.day)
assert 1.*u.mm/u.s < abs(zg_week - zg0) < 1*u.cm/u.s
# ~cm/s over a half-year
zg_halfyear = someloc.gravitational_redshift(sometime + 0.5 * u.yr)
assert 1*u.cm/u.s < abs(zg_halfyear - zg0) < 1*u.dm/u.s
# but when back to the same time in a year, should be tenths of mm
# even over decades
zg_year = someloc.gravitational_redshift(sometime - 20 * u.year)
assert .1*u.mm/u.s < abs(zg_year - zg0) < 1*u.mm/u.s
# Check mass adjustments.
# If Jupiter and the moon are ignored, effect should be off by ~ .5 mm/s
masses = {'sun': constants.G*constants.M_sun,
'jupiter': 0*constants.G*u.kg,
'moon': 0*constants.G*u.kg}
zg_moonjup = someloc.gravitational_redshift(sometime, masses=masses)
assert .1*u.mm/u.s < abs(zg_moonjup - zg0) < 1*u.mm/u.s
# Check that simply not including the bodies gives the same result.
assert zg_moonjup == someloc.gravitational_redshift(sometime,
bodies=('sun',))
# And that earth can be given, even not as last argument
assert zg_moonjup == someloc.gravitational_redshift(
sometime, bodies=('earth', 'sun',))
# If the earth is also ignored, effect should be off by ~ 20 cm/s
# This also tests the conversion of kg to gravitational units.
masses['earth'] = 0*u.kg
zg_moonjupearth = someloc.gravitational_redshift(sometime, masses=masses)
assert 1*u.dm/u.s < abs(zg_moonjupearth - zg0) < 1*u.m/u.s
# If all masses are zero, redshift should be 0 as well.
masses['sun'] = 0*u.kg
assert someloc.gravitational_redshift(sometime, masses=masses) == 0
with pytest.raises(KeyError):
someloc.gravitational_redshift(sometime, bodies=('saturn',))
with pytest.raises(u.UnitsError):
masses = {'sun': constants.G*constants.M_sun,
'jupiter': constants.G*constants.M_jup,
'moon': 1*u.km, # wrong units!
'earth': constants.G*constants.M_earth}
someloc.gravitational_redshift(sometime, masses=masses)
def test_read_only_input():
lon = np.array([80., 440.]) * u.deg
lat = np.array([45.]) * u.deg
lon.flags.writeable = lat.flags.writeable = False
loc = EarthLocation.from_geodetic(lon=lon, lat=lat)
assert quantity_allclose(loc[1].x, loc[0].x)
def test_info():
EarthLocation._get_site_registry(force_builtin=True)
greenwich = EarthLocation.of_site('greenwich')
assert str(greenwich.info).startswith('name = Royal Observatory Greenwich')
|
4d7a332f4b61e307ce34170d702dd601b240ff01de95a05eb3705a7c09c21cbf | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from astropy.units import allclose as quantity_allclose
from astropy import units as u
from astropy import constants
from astropy.time import Time
from astropy.coordinates.builtin_frames import ICRS, AltAz, LSR, GCRS, Galactic, FK5
from astropy.coordinates.baseframe import frame_transform_graph
from astropy.coordinates.sites import get_builtin_sites
from astropy.coordinates import (TimeAttribute,
FunctionTransformWithFiniteDifference, get_sun,
CartesianRepresentation, SphericalRepresentation,
CartesianDifferential, SphericalDifferential,
DynamicMatrixTransform)
J2000 = Time('J2000')
@pytest.mark.parametrize("dt, symmetric", [(1*u.second, True),
(1*u.year, True),
(1*u.second, False),
(1*u.year, False)])
def test_faux_lsr(dt, symmetric):
class LSR2(LSR):
obstime = TimeAttribute(default=J2000)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference,
ICRS, LSR2, finite_difference_dt=dt,
symmetric_finite_difference=symmetric)
def icrs_to_lsr(icrs_coo, lsr_frame):
dt = lsr_frame.obstime - J2000
offset = lsr_frame.v_bary * dt.to(u.second)
return lsr_frame.realize_frame(icrs_coo.data.without_differentials() + offset)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference,
LSR2, ICRS, finite_difference_dt=dt,
symmetric_finite_difference=symmetric)
def lsr_to_icrs(lsr_coo, icrs_frame):
dt = lsr_coo.obstime - J2000
offset = lsr_coo.v_bary * dt.to(u.second)
return icrs_frame.realize_frame(lsr_coo.data - offset)
ic = ICRS(ra=12.3*u.deg, dec=45.6*u.deg, distance=7.8*u.au,
pm_ra_cosdec=0*u.marcsec/u.yr, pm_dec=0*u.marcsec/u.yr,
radial_velocity=0*u.km/u.s)
lsrc = ic.transform_to(LSR2())
assert quantity_allclose(ic.cartesian.xyz, lsrc.cartesian.xyz)
idiff = ic.cartesian.differentials['s']
ldiff = lsrc.cartesian.differentials['s']
change = (ldiff.d_xyz - idiff.d_xyz).to(u.km/u.s)
totchange = np.sum(change**2)**0.5
assert quantity_allclose(totchange, np.sum(lsrc.v_bary.d_xyz**2)**0.5)
ic2 = ICRS(ra=120.3*u.deg, dec=45.6*u.deg, distance=7.8*u.au,
pm_ra_cosdec=0*u.marcsec/u.yr, pm_dec=10*u.marcsec/u.yr,
radial_velocity=1000*u.km/u.s)
lsrc2 = ic2.transform_to(LSR2())
ic2_roundtrip = lsrc2.transform_to(ICRS())
tot = np.sum(lsrc2.cartesian.differentials['s'].d_xyz**2)**0.5
assert np.abs(tot.to('km/s') - 1000*u.km/u.s) < 20*u.km/u.s
assert quantity_allclose(ic2.cartesian.xyz,
ic2_roundtrip.cartesian.xyz)
def test_faux_fk5_galactic():
from astropy.coordinates.builtin_frames.galactic_transforms import fk5_to_gal, _gal_to_fk5
class Galactic2(Galactic):
pass
dt = 1000*u.s
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference,
FK5, Galactic2, finite_difference_dt=dt,
symmetric_finite_difference=True,
finite_difference_frameattr_name=None)
def fk5_to_gal2(fk5_coo, gal_frame):
trans = DynamicMatrixTransform(fk5_to_gal, FK5, Galactic2)
return trans(fk5_coo, gal_frame)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference,
Galactic2, ICRS, finite_difference_dt=dt,
symmetric_finite_difference=True,
finite_difference_frameattr_name=None)
def gal2_to_fk5(gal_coo, fk5_frame):
trans = DynamicMatrixTransform(_gal_to_fk5, Galactic2, FK5)
return trans(gal_coo, fk5_frame)
c1 = FK5(ra=150*u.deg, dec=-17*u.deg, radial_velocity=83*u.km/u.s,
pm_ra_cosdec=-41*u.mas/u.yr, pm_dec=16*u.mas/u.yr,
distance=150*u.pc)
c2 = c1.transform_to(Galactic2())
c3 = c1.transform_to(Galactic())
# compare the matrix and finite-difference calculations
assert quantity_allclose(c2.pm_l_cosb, c3.pm_l_cosb, rtol=1e-4)
assert quantity_allclose(c2.pm_b, c3.pm_b, rtol=1e-4)
def test_gcrs_diffs():
time = Time('2017-01-01')
gf = GCRS(obstime=time)
sung = get_sun(time) # should have very little vhelio
# qtr-year off sun location should be the direction of ~ maximal vhelio
qtrsung = get_sun(time-.25*u.year)
# now we use those essentially as directions where the velocities should
# be either maximal or minimal - with or perpendiculat to Earh's orbit
msungr = CartesianRepresentation(-sung.cartesian.xyz).represent_as(SphericalRepresentation)
suni = ICRS(ra=msungr.lon, dec=msungr.lat, distance=100*u.au,
pm_ra_cosdec=0*u.marcsec/u.yr, pm_dec=0*u.marcsec/u.yr,
radial_velocity=0*u.km/u.s)
qtrsuni = ICRS(ra=qtrsung.ra, dec=qtrsung.dec, distance=100*u.au,
pm_ra_cosdec=0*u.marcsec/u.yr, pm_dec=0*u.marcsec/u.yr,
radial_velocity=0*u.km/u.s)
# Now we transform those parallel- and perpendicular-to Earth's orbit
# directions to GCRS, which should shift the velocity to either include
# the Earth's velocity vector, or not (for parallel and perpendicular,
# respectively).
sung = suni.transform_to(gf)
qtrsung = qtrsuni.transform_to(gf)
# should be high along the ecliptic-not-sun sun axis and
# low along the sun axis
assert np.abs(qtrsung.radial_velocity) > 30*u.km/u.s
assert np.abs(qtrsung.radial_velocity) < 40*u.km/u.s
assert np.abs(sung.radial_velocity) < 1*u.km/u.s
suni2 = sung.transform_to(ICRS())
assert np.all(np.abs(suni2.data.differentials['s'].d_xyz) < 3e-5*u.km/u.s)
qtrisun2 = qtrsung.transform_to(ICRS())
assert np.all(np.abs(qtrisun2.data.differentials['s'].d_xyz) < 3e-5*u.km/u.s)
def test_altaz_diffs():
time = Time('J2015') + np.linspace(-1, 1, 1000)*u.day
loc = get_builtin_sites()['greenwich']
aa = AltAz(obstime=time, location=loc)
icoo = ICRS(np.zeros(time.shape)*u.deg, 10*u.deg, 100*u.au,
pm_ra_cosdec=np.zeros(time.shape)*u.marcsec/u.yr,
pm_dec=0*u.marcsec/u.yr,
radial_velocity=0*u.km/u.s)
acoo = icoo.transform_to(aa)
# Make sure the change in radial velocity over ~2 days isn't too much
# more than the rotation speed of the Earth - some excess is expected
# because the orbit also shifts the RV, but it should be pretty small
# over this short a time.
assert np.ptp(acoo.radial_velocity)/2 < (2*np.pi*constants.R_earth/u.day)*1.2 # MAGIC NUMBER
cdiff = acoo.data.differentials['s'].represent_as(CartesianDifferential,
acoo.data)
# The "total" velocity should be > c, because the *tangential* velocity
# isn't a True velocity, but rather an induced velocity due to the Earth's
# rotation at a distance of 100 AU
assert np.all(np.sum(cdiff.d_xyz**2, axis=0)**0.5 > constants.c)
_xfail = pytest.mark.xfail
@pytest.mark.parametrize('distance', [1000*u.au,
10*u.pc,
pytest.param(10*u.kpc, marks=_xfail),
pytest.param(100*u.kpc, marks=_xfail)])
# TODO: make these not fail when the
# finite-difference numerical stability
# is improved
def test_numerical_limits(distance):
"""
Tests the numerical stability of the default settings for the finite
difference transformation calculation. This is *known* to fail for at
>~1kpc, but this may be improved in future versions.
"""
time = Time('J2017') + np.linspace(-.5, .5, 100)*u.year
icoo = ICRS(ra=0*u.deg, dec=10*u.deg, distance=distance,
pm_ra_cosdec=0*u.marcsec/u.yr, pm_dec=0*u.marcsec/u.yr,
radial_velocity=0*u.km/u.s)
gcoo = icoo.transform_to(GCRS(obstime=time))
rv = gcoo.radial_velocity.to('km/s')
# if its a lot bigger than this - ~the maximal velocity shift along
# the direction above with a small allowance for noise - finite-difference
# rounding errors have ruined the calculation
assert np.ptp(rv) < 65*u.km/u.s
def diff_info_plot(frame, time):
"""
Useful for plotting a frame with multiple times. *Not* used in the testing
suite per se, but extremely useful for interactive plotting of results from
tests in this module.
"""
from matplotlib import pyplot as plt
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(20, 12))
ax1.plot_date(time.plot_date, frame.data.differentials['s'].d_xyz.to(u.km/u.s).T, fmt='-')
ax1.legend(['x', 'y', 'z'])
ax2.plot_date(time.plot_date, np.sum(frame.data.differentials['s'].d_xyz.to(u.km/u.s)**2, axis=0)**0.5, fmt='-')
ax2.set_title('total')
sd = frame.data.differentials['s'].represent_as(SphericalDifferential, frame.data)
ax3.plot_date(time.plot_date, sd.d_distance.to(u.km/u.s), fmt='-')
ax3.set_title('radial')
ax4.plot_date(time.plot_date, sd.d_lat.to(u.marcsec/u.yr), fmt='-', label='lat')
ax4.plot_date(time.plot_date, sd.d_lon.to(u.marcsec/u.yr), fmt='-', label='lon')
return fig
|
36cdf741b7dcb047a07f2f51fbf3bb99bc3e62c9d494eb93e480e6b4775c4d40 | """
The modules in the accuracy testing subpackage are primarily intended for
comparison with "known-good" (or at least "known-familiar") datasets. More
basic functionality and sanity checks are in the main ``coordinates/tests``
testing modules.
"""
N_ACCURACY_TESTS = 10 # the number of samples to use per accuracy test
|
97967f5f85016afa9dc0b5ac17d96d2a7cb0e6cf75f3b4c429511e1765568381 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Facilities for diffing two FITS files. Includes objects for diffing entire
FITS files, individual HDUs, FITS headers, or just FITS data.
Used to implement the fitsdiff program.
"""
import fnmatch
import glob
import io
import operator
import os
import os.path
import textwrap
from collections import defaultdict
from inspect import signature
from itertools import islice
import numpy as np
from astropy import __version__
from .card import Card, BLANK_CARD
from .header import Header
# HDUList is used in one of the doctests
from .hdu.hdulist import fitsopen, HDUList # pylint: disable=W0611
from .hdu.table import _TableLikeHDU
from astropy.utils.diff import (report_diff_values, fixed_width_indent,
where_not_allclose, diff_values)
from astropy.utils.misc import NOT_OVERWRITING_MSG
__all__ = ['FITSDiff', 'HDUDiff', 'HeaderDiff', 'ImageDataDiff', 'RawDataDiff',
'TableDataDiff']
# Column attributes of interest for comparison
_COL_ATTRS = [('unit', 'units'), ('null', 'null values'),
('bscale', 'bscales'), ('bzero', 'bzeros'),
('disp', 'display formats'), ('dim', 'dimensions')]
class _BaseDiff:
"""
Base class for all FITS diff objects.
When instantiating a FITS diff object, the first two arguments are always
the two objects to diff (two FITS files, two FITS headers, etc.).
Instantiating a ``_BaseDiff`` also causes the diff itself to be executed.
The returned ``_BaseDiff`` instance has a number of attribute that describe
the results of the diff operation.
The most basic attribute, present on all ``_BaseDiff`` instances, is
``.identical`` which is `True` if the two objects being compared are
identical according to the diff method for objects of that type.
"""
def __init__(self, a, b):
"""
The ``_BaseDiff`` class does not implement a ``_diff`` method and
should not be instantiated directly. Instead instantiate the
appropriate subclass of ``_BaseDiff`` for the objects being compared
(for example, use `HeaderDiff` to compare two `Header` objects.
"""
self.a = a
self.b = b
# For internal use in report output
self._fileobj = None
self._indent = 0
self._diff()
def __bool__(self):
"""
A ``_BaseDiff`` object acts as `True` in a boolean context if the two
objects compared are identical. Otherwise it acts as `False`.
"""
return not self.identical
@classmethod
def fromdiff(cls, other, a, b):
"""
Returns a new Diff object of a specific subclass from an existing diff
object, passing on the values for any arguments they share in common
(such as ignore_keywords).
For example::
>>> from astropy.io import fits
>>> hdul1, hdul2 = fits.HDUList(), fits.HDUList()
>>> headera, headerb = fits.Header(), fits.Header()
>>> fd = fits.FITSDiff(hdul1, hdul2, ignore_keywords=['*'])
>>> hd = fits.HeaderDiff.fromdiff(fd, headera, headerb)
>>> list(hd.ignore_keywords)
['*']
"""
sig = signature(cls.__init__)
# The first 3 arguments of any Diff initializer are self, a, and b.
kwargs = {}
for arg in list(sig.parameters.keys())[3:]:
if hasattr(other, arg):
kwargs[arg] = getattr(other, arg)
return cls(a, b, **kwargs)
@property
def identical(self):
"""
`True` if all the ``.diff_*`` attributes on this diff instance are
empty, implying that no differences were found.
Any subclass of ``_BaseDiff`` must have at least one ``.diff_*``
attribute, which contains a non-empty value if and only if some
difference was found between the two objects being compared.
"""
return not any(getattr(self, attr) for attr in self.__dict__
if attr.startswith('diff_'))
def report(self, fileobj=None, indent=0, overwrite=False):
"""
Generates a text report on the differences (if any) between two
objects, and either returns it as a string or writes it to a file-like
object.
Parameters
----------
fileobj : file-like, string, or None, optional
If `None`, this method returns the report as a string. Otherwise it
returns `None` and writes the report to the given file-like object
(which must have a ``.write()`` method at a minimum), or to a new
file at the path specified.
indent : int
The number of 4 space tabs to indent the report.
overwrite : bool, optional
If ``True``, overwrite the output file if it exists. Raises an
``OSError`` if ``False`` and the output file exists. Default is
``False``.
Returns
-------
report : str or None
"""
return_string = False
filepath = None
if isinstance(fileobj, str):
if os.path.exists(fileobj) and not overwrite:
raise OSError(NOT_OVERWRITING_MSG.format(fileobj))
else:
filepath = fileobj
fileobj = open(filepath, 'w')
elif fileobj is None:
fileobj = io.StringIO()
return_string = True
self._fileobj = fileobj
self._indent = indent # This is used internally by _writeln
try:
self._report()
finally:
if filepath:
fileobj.close()
if return_string:
return fileobj.getvalue()
def _writeln(self, text):
self._fileobj.write(fixed_width_indent(text, self._indent) + '\n')
def _diff(self):
raise NotImplementedError
def _report(self):
raise NotImplementedError
class FITSDiff(_BaseDiff):
"""Diff two FITS files by filename, or two `HDUList` objects.
`FITSDiff` objects have the following diff attributes:
- ``diff_hdu_count``: If the FITS files being compared have different
numbers of HDUs, this contains a 2-tuple of the number of HDUs in each
file.
- ``diff_hdus``: If any HDUs with the same index are different, this
contains a list of 2-tuples of the HDU index and the `HDUDiff` object
representing the differences between the two HDUs.
"""
def __init__(self, a, b, ignore_hdus=[], ignore_keywords=[],
ignore_comments=[], ignore_fields=[],
numdiffs=10, rtol=0.0, atol=0.0,
ignore_blanks=True, ignore_blank_cards=True):
"""
Parameters
----------
a : str or `HDUList`
The filename of a FITS file on disk, or an `HDUList` object.
b : str or `HDUList`
The filename of a FITS file on disk, or an `HDUList` object to
compare to the first file.
ignore_hdus : sequence, optional
HDU names to ignore when comparing two FITS files or HDU lists; the
presence of these HDUs and their contents are ignored. Wildcard
strings may also be included in the list.
ignore_keywords : sequence, optional
Header keywords to ignore when comparing two headers; the presence
of these keywords and their values are ignored. Wildcard strings
may also be included in the list.
ignore_comments : sequence, optional
A list of header keywords whose comments should be ignored in the
comparison. May contain wildcard strings as with ignore_keywords.
ignore_fields : sequence, optional
The (case-insensitive) names of any table columns to ignore if any
table data is to be compared.
numdiffs : int, optional
The number of pixel/table values to output when reporting HDU data
differences. Though the count of differences is the same either
way, this allows controlling the number of different values that
are kept in memory or output. If a negative value is given, then
numdiffs is treated as unlimited (default: 10).
rtol : float, optional
The relative difference to allow when comparing two float values
either in header values, image arrays, or table columns
(default: 0.0). Values which satisfy the expression
.. math::
\\left| a - b \\right| > \\text{atol} + \\text{rtol} \\cdot \\left| b \\right|
are considered to be different.
The underlying function used for comparison is `numpy.allclose`.
.. versionadded:: 2.0
atol : float, optional
The allowed absolute difference. See also ``rtol`` parameter.
.. versionadded:: 2.0
ignore_blanks : bool, optional
Ignore extra whitespace at the end of string values either in
headers or data. Extra leading whitespace is not ignored
(default: True).
ignore_blank_cards : bool, optional
Ignore all cards that are blank, i.e. they only contain
whitespace (default: True).
"""
if isinstance(a, (str, os.PathLike)):
try:
a = fitsopen(a)
except Exception as exc:
raise OSError("error opening file a ({}): {}: {}".format(
a, exc.__class__.__name__, exc.args[0]))
close_a = True
else:
close_a = False
if isinstance(b, (str, os.PathLike)):
try:
b = fitsopen(b)
except Exception as exc:
raise OSError("error opening file b ({}): {}: {}".format(
b, exc.__class__.__name__, exc.args[0]))
close_b = True
else:
close_b = False
# Normalize keywords/fields to ignore to upper case
self.ignore_hdus = {k.upper() for k in ignore_hdus}
self.ignore_keywords = {k.upper() for k in ignore_keywords}
self.ignore_comments = {k.upper() for k in ignore_comments}
self.ignore_fields = {k.upper() for k in ignore_fields}
self.numdiffs = numdiffs
self.rtol = rtol
self.atol = atol
self.ignore_blanks = ignore_blanks
self.ignore_blank_cards = ignore_blank_cards
# Some hdu names may be pattern wildcards. Find them.
self.ignore_hdu_patterns = set()
for name in list(self.ignore_hdus):
if name != '*' and glob.has_magic(name):
self.ignore_hdus.remove(name)
self.ignore_hdu_patterns.add(name)
self.diff_hdu_count = ()
self.diff_hdus = []
try:
super().__init__(a, b)
finally:
if close_a:
a.close()
if close_b:
b.close()
def _diff(self):
if len(self.a) != len(self.b):
self.diff_hdu_count = (len(self.a), len(self.b))
# Record filenames for use later in _report
self.filenamea = self.a.filename()
if not self.filenamea:
self.filenamea = f'<{self.a.__class__.__name__} object at {id(self.a):#x}>'
self.filenameb = self.b.filename()
if not self.filenameb:
self.filenameb = f'<{self.b.__class__.__name__} object at {id(self.b):#x}>'
if self.ignore_hdus:
self.a = HDUList([h for h in self.a if h.name not in self.ignore_hdus])
self.b = HDUList([h for h in self.b if h.name not in self.ignore_hdus])
if self.ignore_hdu_patterns:
a_names = [hdu.name for hdu in self.a]
b_names = [hdu.name for hdu in self.b]
for pattern in self.ignore_hdu_patterns:
self.a = HDUList([h for h in self.a if h.name not in fnmatch.filter(
a_names, pattern)])
self.b = HDUList([h for h in self.b if h.name not in fnmatch.filter(
b_names, pattern)])
# For now, just compare the extensions one by one in order.
# Might allow some more sophisticated types of diffing later.
# TODO: Somehow or another simplify the passing around of diff
# options--this will become important as the number of options grows
for idx in range(min(len(self.a), len(self.b))):
hdu_diff = HDUDiff.fromdiff(self, self.a[idx], self.b[idx])
if not hdu_diff.identical:
if self.a[idx].name == self.b[idx].name and self.a[idx].ver == self.b[idx].ver:
self.diff_hdus.append((idx, hdu_diff, self.a[idx].name, self.a[idx].ver))
else:
self.diff_hdus.append((idx, hdu_diff, "", self.a[idx].ver))
def _report(self):
wrapper = textwrap.TextWrapper(initial_indent=' ',
subsequent_indent=' ')
self._fileobj.write('\n')
self._writeln(f' fitsdiff: {__version__}')
self._writeln(f' a: {self.filenamea}\n b: {self.filenameb}')
if self.ignore_hdus:
ignore_hdus = ' '.join(sorted(self.ignore_hdus))
self._writeln(f' HDU(s) not to be compared:\n{wrapper.fill(ignore_hdus)}')
if self.ignore_hdu_patterns:
ignore_hdu_patterns = ' '.join(sorted(self.ignore_hdu_patterns))
self._writeln(' HDU(s) not to be compared:\n{}'
.format(wrapper.fill(ignore_hdu_patterns)))
if self.ignore_keywords:
ignore_keywords = ' '.join(sorted(self.ignore_keywords))
self._writeln(' Keyword(s) not to be compared:\n{}'
.format(wrapper.fill(ignore_keywords)))
if self.ignore_comments:
ignore_comments = ' '.join(sorted(self.ignore_comments))
self._writeln(' Keyword(s) whose comments are not to be compared'
':\n{}'.format(wrapper.fill(ignore_comments)))
if self.ignore_fields:
ignore_fields = ' '.join(sorted(self.ignore_fields))
self._writeln(' Table column(s) not to be compared:\n{}'
.format(wrapper.fill(ignore_fields)))
self._writeln(' Maximum number of different data values to be '
'reported: {}'.format(self.numdiffs))
self._writeln(' Relative tolerance: {}, Absolute tolerance: {}'
.format(self.rtol, self.atol))
if self.diff_hdu_count:
self._fileobj.write('\n')
self._writeln('Files contain different numbers of HDUs:')
self._writeln(f' a: {self.diff_hdu_count[0]}')
self._writeln(f' b: {self.diff_hdu_count[1]}')
if not self.diff_hdus:
self._writeln('No differences found between common HDUs.')
return
elif not self.diff_hdus:
self._fileobj.write('\n')
self._writeln('No differences found.')
return
for idx, hdu_diff, extname, extver in self.diff_hdus:
# print out the extension heading
if idx == 0:
self._fileobj.write('\n')
self._writeln('Primary HDU:')
else:
self._fileobj.write('\n')
if extname:
self._writeln(f'Extension HDU {idx} ({extname}, {extver}):')
else:
self._writeln(f'Extension HDU {idx}:')
hdu_diff.report(self._fileobj, indent=self._indent + 1)
class HDUDiff(_BaseDiff):
"""
Diff two HDU objects, including their headers and their data (but only if
both HDUs contain the same type of data (image, table, or unknown).
`HDUDiff` objects have the following diff attributes:
- ``diff_extnames``: If the two HDUs have different EXTNAME values, this
contains a 2-tuple of the different extension names.
- ``diff_extvers``: If the two HDUS have different EXTVER values, this
contains a 2-tuple of the different extension versions.
- ``diff_extlevels``: If the two HDUs have different EXTLEVEL values, this
contains a 2-tuple of the different extension levels.
- ``diff_extension_types``: If the two HDUs have different XTENSION values,
this contains a 2-tuple of the different extension types.
- ``diff_headers``: Contains a `HeaderDiff` object for the headers of the
two HDUs. This will always contain an object--it may be determined
whether the headers are different through ``diff_headers.identical``.
- ``diff_data``: Contains either a `ImageDataDiff`, `TableDataDiff`, or
`RawDataDiff` as appropriate for the data in the HDUs, and only if the
two HDUs have non-empty data of the same type (`RawDataDiff` is used for
HDUs containing non-empty data of an indeterminate type).
"""
def __init__(self, a, b, ignore_keywords=[], ignore_comments=[],
ignore_fields=[], numdiffs=10, rtol=0.0, atol=0.0,
ignore_blanks=True, ignore_blank_cards=True):
"""
Parameters
----------
a : BaseHDU
An HDU object.
b : BaseHDU
An HDU object to compare to the first HDU object.
ignore_keywords : sequence, optional
Header keywords to ignore when comparing two headers; the presence
of these keywords and their values are ignored. Wildcard strings
may also be included in the list.
ignore_comments : sequence, optional
A list of header keywords whose comments should be ignored in the
comparison. May contain wildcard strings as with ignore_keywords.
ignore_fields : sequence, optional
The (case-insensitive) names of any table columns to ignore if any
table data is to be compared.
numdiffs : int, optional
The number of pixel/table values to output when reporting HDU data
differences. Though the count of differences is the same either
way, this allows controlling the number of different values that
are kept in memory or output. If a negative value is given, then
numdiffs is treated as unlimited (default: 10).
rtol : float, optional
The relative difference to allow when comparing two float values
either in header values, image arrays, or table columns
(default: 0.0). Values which satisfy the expression
.. math::
\\left| a - b \\right| > \\text{atol} + \\text{rtol} \\cdot \\left| b \\right|
are considered to be different.
The underlying function used for comparison is `numpy.allclose`.
.. versionadded:: 2.0
atol : float, optional
The allowed absolute difference. See also ``rtol`` parameter.
.. versionadded:: 2.0
ignore_blanks : bool, optional
Ignore extra whitespace at the end of string values either in
headers or data. Extra leading whitespace is not ignored
(default: True).
ignore_blank_cards : bool, optional
Ignore all cards that are blank, i.e. they only contain
whitespace (default: True).
"""
self.ignore_keywords = {k.upper() for k in ignore_keywords}
self.ignore_comments = {k.upper() for k in ignore_comments}
self.ignore_fields = {k.upper() for k in ignore_fields}
self.rtol = rtol
self.atol = atol
self.numdiffs = numdiffs
self.ignore_blanks = ignore_blanks
self.ignore_blank_cards = ignore_blank_cards
self.diff_extnames = ()
self.diff_extvers = ()
self.diff_extlevels = ()
self.diff_extension_types = ()
self.diff_headers = None
self.diff_data = None
super().__init__(a, b)
def _diff(self):
if self.a.name != self.b.name:
self.diff_extnames = (self.a.name, self.b.name)
if self.a.ver != self.b.ver:
self.diff_extvers = (self.a.ver, self.b.ver)
if self.a.level != self.b.level:
self.diff_extlevels = (self.a.level, self.b.level)
if self.a.header.get('XTENSION') != self.b.header.get('XTENSION'):
self.diff_extension_types = (self.a.header.get('XTENSION'),
self.b.header.get('XTENSION'))
self.diff_headers = HeaderDiff.fromdiff(self, self.a.header.copy(),
self.b.header.copy())
if self.a.data is None or self.b.data is None:
# TODO: Perhaps have some means of marking this case
pass
elif self.a.is_image and self.b.is_image:
self.diff_data = ImageDataDiff.fromdiff(self, self.a.data,
self.b.data)
# Clean up references to (possibly) memmapped arrays so they can
# be closed by .close()
self.diff_data.a = None
self.diff_data.b = None
elif (isinstance(self.a, _TableLikeHDU) and
isinstance(self.b, _TableLikeHDU)):
# TODO: Replace this if/when _BaseHDU grows a .is_table property
self.diff_data = TableDataDiff.fromdiff(self, self.a.data,
self.b.data)
# Clean up references to (possibly) memmapped arrays so they can
# be closed by .close()
self.diff_data.a = None
self.diff_data.b = None
elif not self.diff_extension_types:
# Don't diff the data for unequal extension types that are not
# recognized image or table types
self.diff_data = RawDataDiff.fromdiff(self, self.a.data,
self.b.data)
# Clean up references to (possibly) memmapped arrays so they can
# be closed by .close()
self.diff_data.a = None
self.diff_data.b = None
def _report(self):
if self.identical:
self._writeln(" No differences found.")
if self.diff_extension_types:
self._writeln(" Extension types differ:\n a: {}\n "
"b: {}".format(*self.diff_extension_types))
if self.diff_extnames:
self._writeln(" Extension names differ:\n a: {}\n "
"b: {}".format(*self.diff_extnames))
if self.diff_extvers:
self._writeln(" Extension versions differ:\n a: {}\n "
"b: {}".format(*self.diff_extvers))
if self.diff_extlevels:
self._writeln(" Extension levels differ:\n a: {}\n "
"b: {}".format(*self.diff_extlevels))
if not self.diff_headers.identical:
self._fileobj.write('\n')
self._writeln(" Headers contain differences:")
self.diff_headers.report(self._fileobj, indent=self._indent + 1)
if self.diff_data is not None and not self.diff_data.identical:
self._fileobj.write('\n')
self._writeln(" Data contains differences:")
self.diff_data.report(self._fileobj, indent=self._indent + 1)
class HeaderDiff(_BaseDiff):
"""
Diff two `Header` objects.
`HeaderDiff` objects have the following diff attributes:
- ``diff_keyword_count``: If the two headers contain a different number of
keywords, this contains a 2-tuple of the keyword count for each header.
- ``diff_keywords``: If either header contains one or more keywords that
don't appear at all in the other header, this contains a 2-tuple
consisting of a list of the keywords only appearing in header a, and a
list of the keywords only appearing in header b.
- ``diff_duplicate_keywords``: If a keyword appears in both headers at
least once, but contains a different number of duplicates (for example, a
different number of HISTORY cards in each header), an item is added to
this dict with the keyword as the key, and a 2-tuple of the different
counts of that keyword as the value. For example::
{'HISTORY': (20, 19)}
means that header a contains 20 HISTORY cards, while header b contains
only 19 HISTORY cards.
- ``diff_keyword_values``: If any of the common keyword between the two
headers have different values, they appear in this dict. It has a
structure similar to ``diff_duplicate_keywords``, with the keyword as the
key, and a 2-tuple of the different values as the value. For example::
{'NAXIS': (2, 3)}
means that the NAXIS keyword has a value of 2 in header a, and a value of
3 in header b. This excludes any keywords matched by the
``ignore_keywords`` list.
- ``diff_keyword_comments``: Like ``diff_keyword_values``, but contains
differences between keyword comments.
`HeaderDiff` objects also have a ``common_keywords`` attribute that lists
all keywords that appear in both headers.
"""
def __init__(self, a, b, ignore_keywords=[], ignore_comments=[],
rtol=0.0, atol=0.0, ignore_blanks=True, ignore_blank_cards=True):
"""
Parameters
----------
a : `~astropy.io.fits.Header` or string or bytes
A header.
b : `~astropy.io.fits.Header` or string or bytes
A header to compare to the first header.
ignore_keywords : sequence, optional
Header keywords to ignore when comparing two headers; the presence
of these keywords and their values are ignored. Wildcard strings
may also be included in the list.
ignore_comments : sequence, optional
A list of header keywords whose comments should be ignored in the
comparison. May contain wildcard strings as with ignore_keywords.
numdiffs : int, optional
The number of pixel/table values to output when reporting HDU data
differences. Though the count of differences is the same either
way, this allows controlling the number of different values that
are kept in memory or output. If a negative value is given, then
numdiffs is treated as unlimited (default: 10).
rtol : float, optional
The relative difference to allow when comparing two float values
either in header values, image arrays, or table columns
(default: 0.0). Values which satisfy the expression
.. math::
\\left| a - b \\right| > \\text{atol} + \\text{rtol} \\cdot \\left| b \\right|
are considered to be different.
The underlying function used for comparison is `numpy.allclose`.
.. versionadded:: 2.0
atol : float, optional
The allowed absolute difference. See also ``rtol`` parameter.
.. versionadded:: 2.0
ignore_blanks : bool, optional
Ignore extra whitespace at the end of string values either in
headers or data. Extra leading whitespace is not ignored
(default: True).
ignore_blank_cards : bool, optional
Ignore all cards that are blank, i.e. they only contain
whitespace (default: True).
"""
self.ignore_keywords = {k.upper() for k in ignore_keywords}
self.ignore_comments = {k.upper() for k in ignore_comments}
self.rtol = rtol
self.atol = atol
self.ignore_blanks = ignore_blanks
self.ignore_blank_cards = ignore_blank_cards
self.ignore_keyword_patterns = set()
self.ignore_comment_patterns = set()
for keyword in list(self.ignore_keywords):
keyword = keyword.upper()
if keyword != '*' and glob.has_magic(keyword):
self.ignore_keywords.remove(keyword)
self.ignore_keyword_patterns.add(keyword)
for keyword in list(self.ignore_comments):
keyword = keyword.upper()
if keyword != '*' and glob.has_magic(keyword):
self.ignore_comments.remove(keyword)
self.ignore_comment_patterns.add(keyword)
# Keywords appearing in each header
self.common_keywords = []
# Set to the number of keywords in each header if the counts differ
self.diff_keyword_count = ()
# Set if the keywords common to each header (excluding ignore_keywords)
# appear in different positions within the header
# TODO: Implement this
self.diff_keyword_positions = ()
# Keywords unique to each header (excluding keywords in
# ignore_keywords)
self.diff_keywords = ()
# Keywords that have different numbers of duplicates in each header
# (excluding keywords in ignore_keywords)
self.diff_duplicate_keywords = {}
# Keywords common to each header but having different values (excluding
# keywords in ignore_keywords)
self.diff_keyword_values = defaultdict(list)
# Keywords common to each header but having different comments
# (excluding keywords in ignore_keywords or in ignore_comments)
self.diff_keyword_comments = defaultdict(list)
if isinstance(a, str):
a = Header.fromstring(a)
if isinstance(b, str):
b = Header.fromstring(b)
if not (isinstance(a, Header) and isinstance(b, Header)):
raise TypeError('HeaderDiff can only diff astropy.io.fits.Header '
'objects or strings containing FITS headers.')
super().__init__(a, b)
# TODO: This doesn't pay much attention to the *order* of the keywords,
# except in the case of duplicate keywords. The order should be checked
# too, or at least it should be an option.
def _diff(self):
if self.ignore_blank_cards:
cardsa = [c for c in self.a.cards if str(c) != BLANK_CARD]
cardsb = [c for c in self.b.cards if str(c) != BLANK_CARD]
else:
cardsa = list(self.a.cards)
cardsb = list(self.b.cards)
# build dictionaries of keyword values and comments
def get_header_values_comments(cards):
values = {}
comments = {}
for card in cards:
value = card.value
if self.ignore_blanks and isinstance(value, str):
value = value.rstrip()
values.setdefault(card.keyword, []).append(value)
comments.setdefault(card.keyword, []).append(card.comment)
return values, comments
valuesa, commentsa = get_header_values_comments(cardsa)
valuesb, commentsb = get_header_values_comments(cardsb)
# Normalize all keyword to upper-case for comparison's sake;
# TODO: HIERARCH keywords should be handled case-sensitively I think
keywordsa = {k.upper() for k in valuesa}
keywordsb = {k.upper() for k in valuesb}
self.common_keywords = sorted(keywordsa.intersection(keywordsb))
if len(cardsa) != len(cardsb):
self.diff_keyword_count = (len(cardsa), len(cardsb))
# Any other diff attributes should exclude ignored keywords
keywordsa = keywordsa.difference(self.ignore_keywords)
keywordsb = keywordsb.difference(self.ignore_keywords)
if self.ignore_keyword_patterns:
for pattern in self.ignore_keyword_patterns:
keywordsa = keywordsa.difference(fnmatch.filter(keywordsa,
pattern))
keywordsb = keywordsb.difference(fnmatch.filter(keywordsb,
pattern))
if '*' in self.ignore_keywords:
# Any other differences between keywords are to be ignored
return
left_only_keywords = sorted(keywordsa.difference(keywordsb))
right_only_keywords = sorted(keywordsb.difference(keywordsa))
if left_only_keywords or right_only_keywords:
self.diff_keywords = (left_only_keywords, right_only_keywords)
# Compare count of each common keyword
for keyword in self.common_keywords:
if keyword in self.ignore_keywords:
continue
if self.ignore_keyword_patterns:
skip = False
for pattern in self.ignore_keyword_patterns:
if fnmatch.fnmatch(keyword, pattern):
skip = True
break
if skip:
continue
counta = len(valuesa[keyword])
countb = len(valuesb[keyword])
if counta != countb:
self.diff_duplicate_keywords[keyword] = (counta, countb)
# Compare keywords' values and comments
for a, b in zip(valuesa[keyword], valuesb[keyword]):
if diff_values(a, b, rtol=self.rtol, atol=self.atol):
self.diff_keyword_values[keyword].append((a, b))
else:
# If there are duplicate keywords we need to be able to
# index each duplicate; if the values of a duplicate
# are identical use None here
self.diff_keyword_values[keyword].append(None)
if not any(self.diff_keyword_values[keyword]):
# No differences found; delete the array of Nones
del self.diff_keyword_values[keyword]
if '*' in self.ignore_comments or keyword in self.ignore_comments:
continue
if self.ignore_comment_patterns:
skip = False
for pattern in self.ignore_comment_patterns:
if fnmatch.fnmatch(keyword, pattern):
skip = True
break
if skip:
continue
for a, b in zip(commentsa[keyword], commentsb[keyword]):
if diff_values(a, b):
self.diff_keyword_comments[keyword].append((a, b))
else:
self.diff_keyword_comments[keyword].append(None)
if not any(self.diff_keyword_comments[keyword]):
del self.diff_keyword_comments[keyword]
def _report(self):
if self.diff_keyword_count:
self._writeln(' Headers have different number of cards:')
self._writeln(f' a: {self.diff_keyword_count[0]}')
self._writeln(f' b: {self.diff_keyword_count[1]}')
if self.diff_keywords:
for keyword in self.diff_keywords[0]:
if keyword in Card._commentary_keywords:
val = self.a[keyword][0]
else:
val = self.a[keyword]
self._writeln(f' Extra keyword {keyword!r:8} in a: {val!r}')
for keyword in self.diff_keywords[1]:
if keyword in Card._commentary_keywords:
val = self.b[keyword][0]
else:
val = self.b[keyword]
self._writeln(f' Extra keyword {keyword!r:8} in b: {val!r}')
if self.diff_duplicate_keywords:
for keyword, count in sorted(self.diff_duplicate_keywords.items()):
self._writeln(f' Inconsistent duplicates of keyword {keyword!r:8}:')
self._writeln(' Occurs {} time(s) in a, {} times in (b)'
.format(*count))
if self.diff_keyword_values or self.diff_keyword_comments:
for keyword in self.common_keywords:
report_diff_keyword_attr(self._fileobj, 'values',
self.diff_keyword_values, keyword,
ind=self._indent)
report_diff_keyword_attr(self._fileobj, 'comments',
self.diff_keyword_comments, keyword,
ind=self._indent)
# TODO: It might be good if there was also a threshold option for percentage of
# different pixels: For example ignore if only 1% of the pixels are different
# within some threshold. There are lots of possibilities here, but hold off
# for now until specific cases come up.
class ImageDataDiff(_BaseDiff):
"""
Diff two image data arrays (really any array from a PRIMARY HDU or an IMAGE
extension HDU, though the data unit is assumed to be "pixels").
`ImageDataDiff` objects have the following diff attributes:
- ``diff_dimensions``: If the two arrays contain either a different number
of dimensions or different sizes in any dimension, this contains a
2-tuple of the shapes of each array. Currently no further comparison is
performed on images that don't have the exact same dimensions.
- ``diff_pixels``: If the two images contain any different pixels, this
contains a list of 2-tuples of the array index where the difference was
found, and another 2-tuple containing the different values. For example,
if the pixel at (0, 0) contains different values this would look like::
[(0, 0), (1.1, 2.2)]
where 1.1 and 2.2 are the values of that pixel in each array. This
array only contains up to ``self.numdiffs`` differences, for storage
efficiency.
- ``diff_total``: The total number of different pixels found between the
arrays. Although ``diff_pixels`` does not necessarily contain all the
different pixel values, this can be used to get a count of the total
number of differences found.
- ``diff_ratio``: Contains the ratio of ``diff_total`` to the total number
of pixels in the arrays.
"""
def __init__(self, a, b, numdiffs=10, rtol=0.0, atol=0.0):
"""
Parameters
----------
a : BaseHDU
An HDU object.
b : BaseHDU
An HDU object to compare to the first HDU object.
numdiffs : int, optional
The number of pixel/table values to output when reporting HDU data
differences. Though the count of differences is the same either
way, this allows controlling the number of different values that
are kept in memory or output. If a negative value is given, then
numdiffs is treated as unlimited (default: 10).
rtol : float, optional
The relative difference to allow when comparing two float values
either in header values, image arrays, or table columns
(default: 0.0). Values which satisfy the expression
.. math::
\\left| a - b \\right| > \\text{atol} + \\text{rtol} \\cdot \\left| b \\right|
are considered to be different.
The underlying function used for comparison is `numpy.allclose`.
.. versionadded:: 2.0
atol : float, optional
The allowed absolute difference. See also ``rtol`` parameter.
.. versionadded:: 2.0
"""
self.numdiffs = numdiffs
self.rtol = rtol
self.atol = atol
self.diff_dimensions = ()
self.diff_pixels = []
self.diff_ratio = 0
# self.diff_pixels only holds up to numdiffs differing pixels, but this
# self.diff_total stores the total count of differences between
# the images, but not the different values
self.diff_total = 0
super().__init__(a, b)
def _diff(self):
if self.a.shape != self.b.shape:
self.diff_dimensions = (self.a.shape, self.b.shape)
# Don't do any further comparison if the dimensions differ
# TODO: Perhaps we could, however, diff just the intersection
# between the two images
return
# Find the indices where the values are not equal
# If neither a nor b are floating point (or complex), ignore rtol and
# atol
if not (np.issubdtype(self.a.dtype, np.inexact) or
np.issubdtype(self.b.dtype, np.inexact)):
rtol = 0
atol = 0
else:
rtol = self.rtol
atol = self.atol
diffs = where_not_allclose(self.a, self.b, atol=atol, rtol=rtol)
self.diff_total = len(diffs[0])
if self.diff_total == 0:
# Then we're done
return
if self.numdiffs < 0:
numdiffs = self.diff_total
else:
numdiffs = self.numdiffs
self.diff_pixels = [(idx, (self.a[idx], self.b[idx]))
for idx in islice(zip(*diffs), 0, numdiffs)]
self.diff_ratio = float(self.diff_total) / float(len(self.a.flat))
def _report(self):
if self.diff_dimensions:
dimsa = ' x '.join(str(d) for d in
reversed(self.diff_dimensions[0]))
dimsb = ' x '.join(str(d) for d in
reversed(self.diff_dimensions[1]))
self._writeln(' Data dimensions differ:')
self._writeln(f' a: {dimsa}')
self._writeln(f' b: {dimsb}')
# For now we don't do any further comparison if the dimensions
# differ; though in the future it might be nice to be able to
# compare at least where the images intersect
self._writeln(' No further data comparison performed.')
return
if not self.diff_pixels:
return
for index, values in self.diff_pixels:
index = [x + 1 for x in reversed(index)]
self._writeln(f' Data differs at {index}:')
report_diff_values(values[0], values[1], fileobj=self._fileobj,
indent_width=self._indent + 1)
if self.diff_total > self.numdiffs:
self._writeln(' ...')
self._writeln(' {} different pixels found ({:.2%} different).'
.format(self.diff_total, self.diff_ratio))
class RawDataDiff(ImageDataDiff):
"""
`RawDataDiff` is just a special case of `ImageDataDiff` where the images
are one-dimensional, and the data is treated as a 1-dimensional array of
bytes instead of pixel values. This is used to compare the data of two
non-standard extension HDUs that were not recognized as containing image or
table data.
`ImageDataDiff` objects have the following diff attributes:
- ``diff_dimensions``: Same as the ``diff_dimensions`` attribute of
`ImageDataDiff` objects. Though the "dimension" of each array is just an
integer representing the number of bytes in the data.
- ``diff_bytes``: Like the ``diff_pixels`` attribute of `ImageDataDiff`
objects, but renamed to reflect the minor semantic difference that these
are raw bytes and not pixel values. Also the indices are integers
instead of tuples.
- ``diff_total`` and ``diff_ratio``: Same as `ImageDataDiff`.
"""
def __init__(self, a, b, numdiffs=10):
"""
Parameters
----------
a : BaseHDU
An HDU object.
b : BaseHDU
An HDU object to compare to the first HDU object.
numdiffs : int, optional
The number of pixel/table values to output when reporting HDU data
differences. Though the count of differences is the same either
way, this allows controlling the number of different values that
are kept in memory or output. If a negative value is given, then
numdiffs is treated as unlimited (default: 10).
"""
self.diff_dimensions = ()
self.diff_bytes = []
super().__init__(a, b, numdiffs=numdiffs)
def _diff(self):
super()._diff()
if self.diff_dimensions:
self.diff_dimensions = (self.diff_dimensions[0][0],
self.diff_dimensions[1][0])
self.diff_bytes = [(x[0], y) for x, y in self.diff_pixels]
del self.diff_pixels
def _report(self):
if self.diff_dimensions:
self._writeln(' Data sizes differ:')
self._writeln(f' a: {self.diff_dimensions[0]} bytes')
self._writeln(f' b: {self.diff_dimensions[1]} bytes')
# For now we don't do any further comparison if the dimensions
# differ; though in the future it might be nice to be able to
# compare at least where the images intersect
self._writeln(' No further data comparison performed.')
return
if not self.diff_bytes:
return
for index, values in self.diff_bytes:
self._writeln(f' Data differs at byte {index}:')
report_diff_values(values[0], values[1], fileobj=self._fileobj,
indent_width=self._indent + 1)
self._writeln(' ...')
self._writeln(' {} different bytes found ({:.2%} different).'
.format(self.diff_total, self.diff_ratio))
class TableDataDiff(_BaseDiff):
"""
Diff two table data arrays. It doesn't matter whether the data originally
came from a binary or ASCII table--the data should be passed in as a
recarray.
`TableDataDiff` objects have the following diff attributes:
- ``diff_column_count``: If the tables being compared have different
numbers of columns, this contains a 2-tuple of the column count in each
table. Even if the tables have different column counts, an attempt is
still made to compare any columns they have in common.
- ``diff_columns``: If either table contains columns unique to that table,
either in name or format, this contains a 2-tuple of lists. The first
element is a list of columns (these are full `Column` objects) that
appear only in table a. The second element is a list of tables that
appear only in table b. This only lists columns with different column
definitions, and has nothing to do with the data in those columns.
- ``diff_column_names``: This is like ``diff_columns``, but lists only the
names of columns unique to either table, rather than the full `Column`
objects.
- ``diff_column_attributes``: Lists columns that are in both tables but
have different secondary attributes, such as TUNIT or TDISP. The format
is a list of 2-tuples: The first a tuple of the column name and the
attribute, the second a tuple of the different values.
- ``diff_values``: `TableDataDiff` compares the data in each table on a
column-by-column basis. If any different data is found, it is added to
this list. The format of this list is similar to the ``diff_pixels``
attribute on `ImageDataDiff` objects, though the "index" consists of a
(column_name, row) tuple. For example::
[('TARGET', 0), ('NGC1001', 'NGC1002')]
shows that the tables contain different values in the 0-th row of the
'TARGET' column.
- ``diff_total`` and ``diff_ratio``: Same as `ImageDataDiff`.
`TableDataDiff` objects also have a ``common_columns`` attribute that lists
the `Column` objects for columns that are identical in both tables, and a
``common_column_names`` attribute which contains a set of the names of
those columns.
"""
def __init__(self, a, b, ignore_fields=[], numdiffs=10, rtol=0.0, atol=0.0):
"""
Parameters
----------
a : BaseHDU
An HDU object.
b : BaseHDU
An HDU object to compare to the first HDU object.
ignore_fields : sequence, optional
The (case-insensitive) names of any table columns to ignore if any
table data is to be compared.
numdiffs : int, optional
The number of pixel/table values to output when reporting HDU data
differences. Though the count of differences is the same either
way, this allows controlling the number of different values that
are kept in memory or output. If a negative value is given, then
numdiffs is treated as unlimited (default: 10).
rtol : float, optional
The relative difference to allow when comparing two float values
either in header values, image arrays, or table columns
(default: 0.0). Values which satisfy the expression
.. math::
\\left| a - b \\right| > \\text{atol} + \\text{rtol} \\cdot \\left| b \\right|
are considered to be different.
The underlying function used for comparison is `numpy.allclose`.
.. versionadded:: 2.0
atol : float, optional
The allowed absolute difference. See also ``rtol`` parameter.
.. versionadded:: 2.0
"""
self.ignore_fields = set(ignore_fields)
self.numdiffs = numdiffs
self.rtol = rtol
self.atol = atol
self.common_columns = []
self.common_column_names = set()
# self.diff_columns contains columns with different column definitions,
# but not different column data. Column data is only compared in
# columns that have the same definitions
self.diff_rows = ()
self.diff_column_count = ()
self.diff_columns = ()
# If two columns have the same name+format, but other attributes are
# different (such as TUNIT or such) they are listed here
self.diff_column_attributes = []
# Like self.diff_columns, but just contains a list of the column names
# unique to each table, and in the order they appear in the tables
self.diff_column_names = ()
self.diff_values = []
self.diff_ratio = 0
self.diff_total = 0
super().__init__(a, b)
def _diff(self):
# Much of the code for comparing columns is similar to the code for
# comparing headers--consider refactoring
colsa = self.a.columns
colsb = self.b.columns
if len(colsa) != len(colsb):
self.diff_column_count = (len(colsa), len(colsb))
# Even if the number of columns are unequal, we still do comparison of
# any common columns
colsa = {c.name.lower(): c for c in colsa}
colsb = {c.name.lower(): c for c in colsb}
if '*' in self.ignore_fields:
# If all columns are to be ignored, ignore any further differences
# between the columns
return
# Keep the user's original ignore_fields list for reporting purposes,
# but internally use a case-insensitive version
ignore_fields = {f.lower() for f in self.ignore_fields}
# It might be nice if there were a cleaner way to do this, but for now
# it'll do
for fieldname in ignore_fields:
fieldname = fieldname.lower()
if fieldname in colsa:
del colsa[fieldname]
if fieldname in colsb:
del colsb[fieldname]
colsa_set = set(colsa.values())
colsb_set = set(colsb.values())
self.common_columns = sorted(colsa_set.intersection(colsb_set),
key=operator.attrgetter('name'))
self.common_column_names = {col.name.lower()
for col in self.common_columns}
left_only_columns = {col.name.lower(): col
for col in colsa_set.difference(colsb_set)}
right_only_columns = {col.name.lower(): col
for col in colsb_set.difference(colsa_set)}
if left_only_columns or right_only_columns:
self.diff_columns = (left_only_columns, right_only_columns)
self.diff_column_names = ([], [])
if left_only_columns:
for col in self.a.columns:
if col.name.lower() in left_only_columns:
self.diff_column_names[0].append(col.name)
if right_only_columns:
for col in self.b.columns:
if col.name.lower() in right_only_columns:
self.diff_column_names[1].append(col.name)
# If the tables have a different number of rows, we don't compare the
# columns right now.
# TODO: It might be nice to optionally compare the first n rows where n
# is the minimum of the row counts between the two tables.
if len(self.a) != len(self.b):
self.diff_rows = (len(self.a), len(self.b))
return
# If the tables contain no rows there's no data to compare, so we're
# done at this point. (See ticket #178)
if len(self.a) == len(self.b) == 0:
return
# Like in the old fitsdiff, compare tables on a column by column basis
# The difficulty here is that, while FITS column names are meant to be
# case-insensitive, Astropy still allows, for the sake of flexibility,
# two columns with the same name but different case. When columns are
# accessed in FITS tables, a case-sensitive is tried first, and failing
# that a case-insensitive match is made.
# It's conceivable that the same column could appear in both tables
# being compared, but with different case.
# Though it *may* lead to inconsistencies in these rare cases, this
# just assumes that there are no duplicated column names in either
# table, and that the column names can be treated case-insensitively.
for col in self.common_columns:
name_lower = col.name.lower()
if name_lower in ignore_fields:
continue
cola = colsa[name_lower]
colb = colsb[name_lower]
for attr, _ in _COL_ATTRS:
vala = getattr(cola, attr, None)
valb = getattr(colb, attr, None)
if diff_values(vala, valb):
self.diff_column_attributes.append(
((col.name.upper(), attr), (vala, valb)))
arra = self.a[col.name]
arrb = self.b[col.name]
if (np.issubdtype(arra.dtype, np.floating) and
np.issubdtype(arrb.dtype, np.floating)):
diffs = where_not_allclose(arra, arrb,
rtol=self.rtol,
atol=self.atol)
elif 'P' in col.format:
diffs = ([idx for idx in range(len(arra))
if not np.allclose(arra[idx], arrb[idx],
rtol=self.rtol,
atol=self.atol)],)
else:
diffs = np.where(arra != arrb)
self.diff_total += len(set(diffs[0]))
if self.numdiffs >= 0:
if len(self.diff_values) >= self.numdiffs:
# Don't save any more diff values
continue
# Add no more diff'd values than this
max_diffs = self.numdiffs - len(self.diff_values)
else:
max_diffs = len(diffs[0])
last_seen_idx = None
for idx in islice(diffs[0], 0, max_diffs):
if idx == last_seen_idx:
# Skip duplicate indices, which my occur when the column
# data contains multi-dimensional values; we're only
# interested in storing row-by-row differences
continue
last_seen_idx = idx
self.diff_values.append(((col.name, idx),
(arra[idx], arrb[idx])))
total_values = len(self.a) * len(self.a.dtype.fields)
self.diff_ratio = float(self.diff_total) / float(total_values)
def _report(self):
if self.diff_column_count:
self._writeln(' Tables have different number of columns:')
self._writeln(f' a: {self.diff_column_count[0]}')
self._writeln(f' b: {self.diff_column_count[1]}')
if self.diff_column_names:
# Show columns with names unique to either table
for name in self.diff_column_names[0]:
format = self.diff_columns[0][name.lower()].format
self._writeln(f' Extra column {name} of format {format} in a')
for name in self.diff_column_names[1]:
format = self.diff_columns[1][name.lower()].format
self._writeln(f' Extra column {name} of format {format} in b')
col_attrs = dict(_COL_ATTRS)
# Now go through each table again and show columns with common
# names but other property differences...
for col_attr, vals in self.diff_column_attributes:
name, attr = col_attr
self._writeln(f' Column {name} has different {col_attrs[attr]}:')
report_diff_values(vals[0], vals[1], fileobj=self._fileobj,
indent_width=self._indent + 1)
if self.diff_rows:
self._writeln(' Table rows differ:')
self._writeln(f' a: {self.diff_rows[0]}')
self._writeln(f' b: {self.diff_rows[1]}')
self._writeln(' No further data comparison performed.')
return
if not self.diff_values:
return
# Finally, let's go through and report column data differences:
for indx, values in self.diff_values:
self._writeln(' Column {} data differs in row {}:'.format(*indx))
report_diff_values(values[0], values[1], fileobj=self._fileobj,
indent_width=self._indent + 1)
if self.diff_values and self.numdiffs < self.diff_total:
self._writeln(' ...{} additional difference(s) found.'.format(
self.diff_total - self.numdiffs))
if self.diff_total > self.numdiffs:
self._writeln(' ...')
self._writeln(' {} different table data element(s) found '
'({:.2%} different).'
.format(self.diff_total, self.diff_ratio))
def report_diff_keyword_attr(fileobj, attr, diffs, keyword, ind=0):
"""
Write a diff between two header keyword values or comments to the specified
file-like object.
"""
if keyword in diffs:
vals = diffs[keyword]
for idx, val in enumerate(vals):
if val is None:
continue
if idx == 0:
dup = ''
else:
dup = f'[{idx + 1}]'
fileobj.write(
fixed_width_indent(' Keyword {:8}{} has different {}:\n'
.format(keyword, dup, attr), ind))
report_diff_values(val[0], val[1], fileobj=fileobj,
indent_width=ind + 1)
|
8ee71d2c04c5d0ce59ca67f5c2a5292401c55a421e85d5a4e3f2270ceded532a | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import collections
import copy
import itertools
import numbers
import re
import warnings
from .card import Card, _pad, KEYWORD_LENGTH, UNDEFINED
from .file import _File
from .util import (encode_ascii, decode_ascii, fileobj_closed,
fileobj_is_binary, path_like)
from ._utils import parse_header
from astropy.utils import isiterable
from astropy.utils.exceptions import AstropyUserWarning
BLOCK_SIZE = 2880 # the FITS block size
# This regular expression can match a *valid* END card which just consists of
# the string 'END' followed by all spaces, or an *invalid* end card which
# consists of END, followed by any character that is *not* a valid character
# for a valid FITS keyword (that is, this is not a keyword like 'ENDER' which
# starts with 'END' but is not 'END'), followed by any arbitrary bytes. An
# invalid end card may also consist of just 'END' with no trailing bytes.
HEADER_END_RE = re.compile(encode_ascii(
r'(?:(?P<valid>END {77}) *)|(?P<invalid>END$|END {0,76}[^A-Z0-9_-])'))
# According to the FITS standard the only characters that may appear in a
# header record are the restricted ASCII chars from 0x20 through 0x7E.
VALID_HEADER_CHARS = set(map(chr, range(0x20, 0x7F)))
END_CARD = 'END' + ' ' * 77
__doctest_skip__ = ['Header', 'Header.comments', 'Header.fromtextfile',
'Header.totextfile', 'Header.set', 'Header.update']
class Header:
"""
FITS header class. This class exposes both a dict-like interface and a
list-like interface to FITS headers.
The header may be indexed by keyword and, like a dict, the associated value
will be returned. When the header contains cards with duplicate keywords,
only the value of the first card with the given keyword will be returned.
It is also possible to use a 2-tuple as the index in the form (keyword,
n)--this returns the n-th value with that keyword, in the case where there
are duplicate keywords.
For example::
>>> header['NAXIS']
0
>>> header[('FOO', 1)] # Return the value of the second FOO keyword
'foo'
The header may also be indexed by card number::
>>> header[0] # Return the value of the first card in the header
'T'
Commentary keywords such as HISTORY and COMMENT are special cases: When
indexing the Header object with either 'HISTORY' or 'COMMENT' a list of all
the HISTORY/COMMENT values is returned::
>>> header['HISTORY']
This is the first history entry in this header.
This is the second history entry in this header.
...
See the Astropy documentation for more details on working with headers.
Notes
-----
Although FITS keywords must be exclusively upper case, retrieving an item
in a `Header` object is case insensitive.
"""
def __init__(self, cards=[], copy=False):
"""
Construct a `Header` from an iterable and/or text file.
Parameters
----------
cards : list of `Card`, optional
The cards to initialize the header with. Also allowed are other
`Header` (or `dict`-like) objects.
.. versionchanged:: 1.2
Allowed ``cards`` to be a `dict`-like object.
copy : bool, optional
If ``True`` copies the ``cards`` if they were another `Header`
instance.
Default is ``False``.
.. versionadded:: 1.3
"""
self.clear()
if isinstance(cards, Header):
if copy:
cards = cards.copy()
cards = cards.cards
elif isinstance(cards, dict):
cards = cards.items()
for card in cards:
self.append(card, end=True)
self._modified = False
def __len__(self):
return len(self._cards)
def __iter__(self):
for card in self._cards:
yield card.keyword
def __contains__(self, keyword):
if keyword in self._keyword_indices or keyword in self._rvkc_indices:
# For the most common case (single, standard form keyword lookup)
# this will work and is an O(1) check. If it fails that doesn't
# guarantee absence, just that we have to perform the full set of
# checks in self._cardindex
return True
try:
self._cardindex(keyword)
except (KeyError, IndexError):
return False
return True
def __getitem__(self, key):
if isinstance(key, slice):
return self.__class__([copy.copy(c) for c in self._cards[key]])
elif self._haswildcard(key):
return self.__class__([copy.copy(self._cards[idx])
for idx in self._wildcardmatch(key)])
elif isinstance(key, str):
key = key.strip()
if key.upper() in Card._commentary_keywords:
key = key.upper()
# Special case for commentary cards
return _HeaderCommentaryCards(self, key)
if isinstance(key, tuple):
keyword = key[0]
else:
keyword = key
card = self._cards[self._cardindex(key)]
if card.field_specifier is not None and keyword == card.rawkeyword:
# This is RVKC; if only the top-level keyword was specified return
# the raw value, not the parsed out float value
return card.rawvalue
value = card.value
if value == UNDEFINED:
return None
return value
def __setitem__(self, key, value):
if self._set_slice(key, value, self):
return
if isinstance(value, tuple):
if len(value) > 2:
raise ValueError(
'A Header item may be set with either a scalar value, '
'a 1-tuple containing a scalar value, or a 2-tuple '
'containing a scalar value and comment string.')
if len(value) == 1:
value, comment = value[0], None
if value is None:
value = UNDEFINED
elif len(value) == 2:
value, comment = value
if value is None:
value = UNDEFINED
if comment is None:
comment = ''
else:
comment = None
card = None
if isinstance(key, numbers.Integral):
card = self._cards[key]
elif isinstance(key, tuple):
card = self._cards[self._cardindex(key)]
if value is None:
value = UNDEFINED
if card:
card.value = value
if comment is not None:
card.comment = comment
if card._modified:
self._modified = True
else:
# If we get an IndexError that should be raised; we don't allow
# assignment to non-existing indices
self._update((key, value, comment))
def __delitem__(self, key):
if isinstance(key, slice) or self._haswildcard(key):
# This is very inefficient but it's not a commonly used feature.
# If someone out there complains that they make heavy use of slice
# deletions and it's too slow, well, we can worry about it then
# [the solution is not too complicated--it would be wait 'til all
# the cards are deleted before updating _keyword_indices rather
# than updating it once for each card that gets deleted]
if isinstance(key, slice):
indices = range(*key.indices(len(self)))
# If the slice step is backwards we want to reverse it, because
# it will be reversed in a few lines...
if key.step and key.step < 0:
indices = reversed(indices)
else:
indices = self._wildcardmatch(key)
for idx in reversed(indices):
del self[idx]
return
elif isinstance(key, str):
# delete ALL cards with the same keyword name
key = Card.normalize_keyword(key)
indices = self._keyword_indices
if key not in self._keyword_indices:
indices = self._rvkc_indices
if key not in indices:
# if keyword is not present raise KeyError.
# To delete keyword without caring if they were present,
# Header.remove(Keyword) can be used with optional argument ignore_missing as True
raise KeyError(f"Keyword '{key}' not found.")
for idx in reversed(indices[key]):
# Have to copy the indices list since it will be modified below
del self[idx]
return
idx = self._cardindex(key)
card = self._cards[idx]
keyword = card.keyword
del self._cards[idx]
keyword = Card.normalize_keyword(keyword)
indices = self._keyword_indices[keyword]
indices.remove(idx)
if not indices:
del self._keyword_indices[keyword]
# Also update RVKC indices if necessary :/
if card.field_specifier is not None:
indices = self._rvkc_indices[card.rawkeyword]
indices.remove(idx)
if not indices:
del self._rvkc_indices[card.rawkeyword]
# We also need to update all other indices
self._updateindices(idx, increment=False)
self._modified = True
def __repr__(self):
return self.tostring(sep='\n', endcard=False, padding=False)
def __str__(self):
return self.tostring()
def __eq__(self, other):
"""
Two Headers are equal only if they have the exact same string
representation.
"""
return str(self) == str(other)
def __add__(self, other):
temp = self.copy(strip=False)
temp.extend(other)
return temp
def __iadd__(self, other):
self.extend(other)
return self
def _ipython_key_completions_(self):
return self.__iter__()
@property
def cards(self):
"""
The underlying physical cards that make up this Header; it can be
looked at, but it should not be modified directly.
"""
return _CardAccessor(self)
@property
def comments(self):
"""
View the comments associated with each keyword, if any.
For example, to see the comment on the NAXIS keyword:
>>> header.comments['NAXIS']
number of data axes
Comments can also be updated through this interface:
>>> header.comments['NAXIS'] = 'Number of data axes'
"""
return _HeaderComments(self)
@property
def _modified(self):
"""
Whether or not the header has been modified; this is a property so that
it can also check each card for modifications--cards may have been
modified directly without the header containing it otherwise knowing.
"""
modified_cards = any(c._modified for c in self._cards)
if modified_cards:
# If any cards were modified then by definition the header was
# modified
self.__dict__['_modified'] = True
return self.__dict__['_modified']
@_modified.setter
def _modified(self, val):
self.__dict__['_modified'] = val
@classmethod
def fromstring(cls, data, sep=''):
"""
Creates an HDU header from a byte string containing the entire header
data.
Parameters
----------
data : str or bytes
String or bytes containing the entire header. In the case of bytes
they will be decoded using latin-1 (only plain ASCII characters are
allowed in FITS headers but latin-1 allows us to retain any invalid
bytes that might appear in malformatted FITS files).
sep : str, optional
The string separating cards from each other, such as a newline. By
default there is no card separator (as is the case in a raw FITS
file). In general this is only used in cases where a header was
printed as text (e.g. with newlines after each card) and you want
to create a new `Header` from it by copy/pasting.
Examples
--------
>>> from astropy.io.fits import Header
>>> hdr = Header({'SIMPLE': True})
>>> Header.fromstring(hdr.tostring()) == hdr
True
If you want to create a `Header` from printed text it's not necessary
to have the exact binary structure as it would appear in a FITS file,
with the full 80 byte card length. Rather, each "card" can end in a
newline and does not have to be padded out to a full card length as
long as it "looks like" a FITS header:
>>> hdr = Header.fromstring(\"\"\"\\
... SIMPLE = T / conforms to FITS standard
... BITPIX = 8 / array data type
... NAXIS = 0 / number of array dimensions
... EXTEND = T
... \"\"\", sep='\\n')
>>> hdr['SIMPLE']
True
>>> hdr['BITPIX']
8
>>> len(hdr)
4
Returns
-------
`Header`
A new `Header` instance.
"""
cards = []
# If the card separator contains characters that may validly appear in
# a card, the only way to unambiguously distinguish between cards is to
# require that they be Card.length long. However, if the separator
# contains non-valid characters (namely \n) the cards may be split
# immediately at the separator
require_full_cardlength = set(sep).issubset(VALID_HEADER_CHARS)
if isinstance(data, bytes):
# FITS supports only ASCII, but decode as latin1 and just take all
# bytes for now; if it results in mojibake due to e.g. UTF-8
# encoded data in a FITS header that's OK because it shouldn't be
# there in the first place--accepting it here still gives us the
# opportunity to display warnings later during validation
CONTINUE = b'CONTINUE'
END = b'END'
end_card = END_CARD.encode('ascii')
sep = sep.encode('latin1')
empty = b''
else:
CONTINUE = 'CONTINUE'
END = 'END'
end_card = END_CARD
empty = ''
# Split the header into individual cards
idx = 0
image = []
while idx < len(data):
if require_full_cardlength:
end_idx = idx + Card.length
else:
try:
end_idx = data.index(sep, idx)
except ValueError:
end_idx = len(data)
next_image = data[idx:end_idx]
idx = end_idx + len(sep)
if image:
if next_image[:8] == CONTINUE:
image.append(next_image)
continue
cards.append(Card.fromstring(empty.join(image)))
if require_full_cardlength:
if next_image == end_card:
image = []
break
else:
if next_image.split(sep)[0].rstrip() == END:
image = []
break
image = [next_image]
# Add the last image that was found before the end, if any
if image:
cards.append(Card.fromstring(empty.join(image)))
return cls._fromcards(cards)
@classmethod
def fromfile(cls, fileobj, sep='', endcard=True, padding=True):
"""
Similar to :meth:`Header.fromstring`, but reads the header string from
a given file-like object or filename.
Parameters
----------
fileobj : str, file-like
A filename or an open file-like object from which a FITS header is
to be read. For open file handles the file pointer must be at the
beginning of the header.
sep : str, optional
The string separating cards from each other, such as a newline. By
default there is no card separator (as is the case in a raw FITS
file).
endcard : bool, optional
If True (the default) the header must end with an END card in order
to be considered valid. If an END card is not found an
`OSError` is raised.
padding : bool, optional
If True (the default) the header will be required to be padded out
to a multiple of 2880, the FITS header block size. Otherwise any
padding, or lack thereof, is ignored.
Returns
-------
`Header`
A new `Header` instance.
"""
close_file = False
if isinstance(fileobj, path_like):
# If sep is non-empty we are trying to read a header printed to a
# text file, so open in text mode by default to support newline
# handling; if a binary-mode file object is passed in, the user is
# then on their own w.r.t. newline handling.
#
# Otherwise assume we are reading from an actual FITS file and open
# in binary mode.
if sep:
fileobj = open(fileobj, encoding='latin1')
else:
fileobj = open(fileobj, 'rb')
close_file = True
try:
is_binary = fileobj_is_binary(fileobj)
def block_iter(nbytes):
while True:
data = fileobj.read(nbytes)
if data:
yield data
else:
break
return cls._from_blocks(block_iter, is_binary, sep, endcard,
padding)[1]
finally:
if close_file:
fileobj.close()
@classmethod
def _fromcards(cls, cards):
header = cls()
for idx, card in enumerate(cards):
header._cards.append(card)
keyword = Card.normalize_keyword(card.keyword)
header._keyword_indices[keyword].append(idx)
if card.field_specifier is not None:
header._rvkc_indices[card.rawkeyword].append(idx)
header._modified = False
return header
@classmethod
def _from_blocks(cls, block_iter, is_binary, sep, endcard, padding):
"""
The meat of `Header.fromfile`; in a separate method so that
`Header.fromfile` itself is just responsible for wrapping file
handling. Also used by `_BaseHDU.fromstring`.
``block_iter`` should be a callable which, given a block size n
(typically 2880 bytes as used by the FITS standard) returns an iterator
of byte strings of that block size.
``is_binary`` specifies whether the returned blocks are bytes or text
Returns both the entire header *string*, and the `Header` object
returned by Header.fromstring on that string.
"""
actual_block_size = _block_size(sep)
clen = Card.length + len(sep)
blocks = block_iter(actual_block_size)
# Read the first header block.
try:
block = next(blocks)
except StopIteration:
raise EOFError()
if not is_binary:
# TODO: There needs to be error handling at *this* level for
# non-ASCII characters; maybe at this stage decoding latin-1 might
# be safer
block = encode_ascii(block)
read_blocks = []
is_eof = False
end_found = False
# continue reading header blocks until END card or EOF is reached
while True:
# find the END card
end_found, block = cls._find_end_card(block, clen)
read_blocks.append(decode_ascii(block))
if end_found:
break
try:
block = next(blocks)
except StopIteration:
is_eof = True
break
if not block:
is_eof = True
break
if not is_binary:
block = encode_ascii(block)
header_str = ''.join(read_blocks)
_check_padding(header_str, actual_block_size, is_eof,
check_block_size=padding)
if not end_found and is_eof and endcard:
# TODO: Pass this error to validation framework as an ERROR,
# rather than raising an exception
raise OSError('Header missing END card.')
return header_str, cls.fromstring(header_str, sep=sep)
@classmethod
def _find_end_card(cls, block, card_len):
"""
Utility method to search a header block for the END card and handle
invalid END cards.
This method can also returned a modified copy of the input header block
in case an invalid end card needs to be sanitized.
"""
for mo in HEADER_END_RE.finditer(block):
# Ensure the END card was found, and it started on the
# boundary of a new card (see ticket #142)
if mo.start() % card_len != 0:
continue
# This must be the last header block, otherwise the
# file is malformatted
if mo.group('invalid'):
offset = mo.start()
trailing = block[offset + 3:offset + card_len - 3].rstrip()
if trailing:
trailing = repr(trailing).lstrip('ub')
# TODO: Pass this warning up to the validation framework
warnings.warn(
'Unexpected bytes trailing END keyword: {}; these '
'bytes will be replaced with spaces on write.'.format(
trailing), AstropyUserWarning)
else:
# TODO: Pass this warning up to the validation framework
warnings.warn(
'Missing padding to end of the FITS block after the '
'END keyword; additional spaces will be appended to '
'the file upon writing to pad out to {} '
'bytes.'.format(BLOCK_SIZE), AstropyUserWarning)
# Sanitize out invalid END card now that the appropriate
# warnings have been issued
block = (block[:offset] + encode_ascii(END_CARD) +
block[offset + len(END_CARD):])
return True, block
return False, block
def tostring(self, sep='', endcard=True, padding=True):
r"""
Returns a string representation of the header.
By default this uses no separator between cards, adds the END card, and
pads the string with spaces to the next multiple of 2880 bytes. That
is, it returns the header exactly as it would appear in a FITS file.
Parameters
----------
sep : str, optional
The character or string with which to separate cards. By default
there is no separator, but one could use ``'\\n'``, for example, to
separate each card with a new line
endcard : bool, optional
If True (default) adds the END card to the end of the header
string
padding : bool, optional
If True (default) pads the string with spaces out to the next
multiple of 2880 characters
Returns
-------
str
A string representing a FITS header.
"""
lines = []
for card in self._cards:
s = str(card)
# Cards with CONTINUE cards may be longer than 80 chars; so break
# them into multiple lines
while s:
lines.append(s[:Card.length])
s = s[Card.length:]
s = sep.join(lines)
if endcard:
s += sep + _pad('END')
if padding:
s += ' ' * _pad_length(len(s))
return s
def tofile(self, fileobj, sep='', endcard=True, padding=True,
overwrite=False):
r"""
Writes the header to file or file-like object.
By default this writes the header exactly as it would be written to a
FITS file, with the END card included and padding to the next multiple
of 2880 bytes. However, aspects of this may be controlled.
Parameters
----------
fileobj : path-like or file-like, optional
Either the pathname of a file, or an open file handle or file-like
object.
sep : str, optional
The character or string with which to separate cards. By default
there is no separator, but one could use ``'\\n'``, for example, to
separate each card with a new line
endcard : bool, optional
If `True` (default) adds the END card to the end of the header
string
padding : bool, optional
If `True` (default) pads the string with spaces out to the next
multiple of 2880 characters
overwrite : bool, optional
If ``True``, overwrite the output file if it exists. Raises an
``OSError`` if ``False`` and the output file exists. Default is
``False``.
"""
close_file = fileobj_closed(fileobj)
if not isinstance(fileobj, _File):
fileobj = _File(fileobj, mode='ostream', overwrite=overwrite)
try:
blocks = self.tostring(sep=sep, endcard=endcard, padding=padding)
actual_block_size = _block_size(sep)
if padding and len(blocks) % actual_block_size != 0:
raise OSError(
'Header size ({}) is not a multiple of block '
'size ({}).'.format(
len(blocks) - actual_block_size + BLOCK_SIZE,
BLOCK_SIZE))
fileobj.flush()
fileobj.write(blocks.encode('ascii'))
fileobj.flush()
finally:
if close_file:
fileobj.close()
@classmethod
def fromtextfile(cls, fileobj, endcard=False):
"""
Read a header from a simple text file or file-like object.
Equivalent to::
>>> Header.fromfile(fileobj, sep='\\n', endcard=False,
... padding=False)
See Also
--------
fromfile
"""
return cls.fromfile(fileobj, sep='\n', endcard=endcard, padding=False)
def totextfile(self, fileobj, endcard=False, overwrite=False):
"""
Write the header as text to a file or a file-like object.
Equivalent to::
>>> Header.tofile(fileobj, sep='\\n', endcard=False,
... padding=False, overwrite=overwrite)
See Also
--------
tofile
"""
self.tofile(fileobj, sep='\n', endcard=endcard, padding=False,
overwrite=overwrite)
def clear(self):
"""
Remove all cards from the header.
"""
self._cards = []
self._keyword_indices = collections.defaultdict(list)
self._rvkc_indices = collections.defaultdict(list)
def copy(self, strip=False):
"""
Make a copy of the :class:`Header`.
.. versionchanged:: 1.3
`copy.copy` and `copy.deepcopy` on a `Header` will call this
method.
Parameters
----------
strip : bool, optional
If `True`, strip any headers that are specific to one of the
standard HDU types, so that this header can be used in a different
HDU.
Returns
-------
`Header`
A new :class:`Header` instance.
"""
tmp = self.__class__(copy.copy(card) for card in self._cards)
if strip:
tmp.strip()
return tmp
def __copy__(self):
return self.copy()
def __deepcopy__(self, *args, **kwargs):
return self.copy()
@classmethod
def fromkeys(cls, iterable, value=None):
"""
Similar to :meth:`dict.fromkeys`--creates a new `Header` from an
iterable of keywords and an optional default value.
This method is not likely to be particularly useful for creating real
world FITS headers, but it is useful for testing.
Parameters
----------
iterable
Any iterable that returns strings representing FITS keywords.
value : optional
A default value to assign to each keyword; must be a valid type for
FITS keywords.
Returns
-------
`Header`
A new `Header` instance.
"""
d = cls()
if not isinstance(value, tuple):
value = (value,)
for key in iterable:
d.append((key,) + value)
return d
def get(self, key, default=None):
"""
Similar to :meth:`dict.get`--returns the value associated with keyword
in the header, or a default value if the keyword is not found.
Parameters
----------
key : str
A keyword that may or may not be in the header.
default : optional
A default value to return if the keyword is not found in the
header.
Returns
-------
value: str, number, complex, bool, or ``astropy.io.fits.card.Undefined``
The value associated with the given keyword, or the default value
if the keyword is not in the header.
"""
try:
return self[key]
except (KeyError, IndexError):
return default
def set(self, keyword, value=None, comment=None, before=None, after=None):
"""
Set the value and/or comment and/or position of a specified keyword.
If the keyword does not already exist in the header, a new keyword is
created in the specified position, or appended to the end of the header
if no position is specified.
This method is similar to :meth:`Header.update` prior to Astropy v0.1.
.. note::
It should be noted that ``header.set(keyword, value)`` and
``header.set(keyword, value, comment)`` are equivalent to
``header[keyword] = value`` and
``header[keyword] = (value, comment)`` respectively.
New keywords can also be inserted relative to existing keywords
using, for example::
>>> header.insert('NAXIS1', ('NAXIS', 2, 'Number of axes'))
to insert before an existing keyword, or::
>>> header.insert('NAXIS', ('NAXIS1', 4096), after=True)
to insert after an existing keyword.
The only advantage of using :meth:`Header.set` is that it
easily replaces the old usage of :meth:`Header.update` both
conceptually and in terms of function signature.
Parameters
----------
keyword : str
A header keyword
value : str, optional
The value to set for the given keyword; if None the existing value
is kept, but '' may be used to set a blank value
comment : str, optional
The comment to set for the given keyword; if None the existing
comment is kept, but ``''`` may be used to set a blank comment
before : str, int, optional
Name of the keyword, or index of the `Card` before which this card
should be located in the header. The argument ``before`` takes
precedence over ``after`` if both specified.
after : str, int, optional
Name of the keyword, or index of the `Card` after which this card
should be located in the header.
"""
# Create a temporary card that looks like the one being set; if the
# temporary card turns out to be a RVKC this will make it easier to
# deal with the idiosyncrasies thereof
# Don't try to make a temporary card though if they keyword looks like
# it might be a HIERARCH card or is otherwise invalid--this step is
# only for validating RVKCs.
if (len(keyword) <= KEYWORD_LENGTH and
Card._keywd_FSC_RE.match(keyword) and
keyword not in self._keyword_indices):
new_card = Card(keyword, value, comment)
new_keyword = new_card.keyword
else:
new_keyword = keyword
if (new_keyword not in Card._commentary_keywords and
new_keyword in self):
if comment is None:
comment = self.comments[keyword]
if value is None:
value = self[keyword]
self[keyword] = (value, comment)
if before is not None or after is not None:
card = self._cards[self._cardindex(keyword)]
self._relativeinsert(card, before=before, after=after,
replace=True)
elif before is not None or after is not None:
self._relativeinsert((keyword, value, comment), before=before,
after=after)
else:
self[keyword] = (value, comment)
def items(self):
"""Like :meth:`dict.items`."""
for card in self._cards:
yield card.keyword, None if card.value == UNDEFINED else card.value
def keys(self):
"""
Like :meth:`dict.keys`--iterating directly over the `Header`
instance has the same behavior.
"""
for card in self._cards:
yield card.keyword
def values(self):
"""Like :meth:`dict.values`."""
for card in self._cards:
yield None if card.value == UNDEFINED else card.value
def pop(self, *args):
"""
Works like :meth:`list.pop` if no arguments or an index argument are
supplied; otherwise works like :meth:`dict.pop`.
"""
if len(args) > 2:
raise TypeError(f'Header.pop expected at most 2 arguments, got {len(args)}')
if len(args) == 0:
key = -1
else:
key = args[0]
try:
value = self[key]
except (KeyError, IndexError):
if len(args) == 2:
return args[1]
raise
del self[key]
return value
def popitem(self):
"""Similar to :meth:`dict.popitem`."""
try:
k, v = next(self.items())
except StopIteration:
raise KeyError('Header is empty')
del self[k]
return k, v
def setdefault(self, key, default=None):
"""Similar to :meth:`dict.setdefault`."""
try:
return self[key]
except (KeyError, IndexError):
self[key] = default
return default
def update(self, *args, **kwargs):
"""
Update the Header with new keyword values, updating the values of
existing keywords and appending new keywords otherwise; similar to
`dict.update`.
`update` accepts either a dict-like object or an iterable. In the
former case the keys must be header keywords and the values may be
either scalar values or (value, comment) tuples. In the case of an
iterable the items must be (keyword, value) tuples or (keyword, value,
comment) tuples.
Arbitrary arguments are also accepted, in which case the update() is
called again with the kwargs dict as its only argument. That is,
::
>>> header.update(NAXIS1=100, NAXIS2=100)
is equivalent to::
header.update({'NAXIS1': 100, 'NAXIS2': 100})
.. warning::
As this method works similarly to `dict.update` it is very
different from the ``Header.update()`` method in Astropy v0.1.
Use of the old API was
**deprecated** for a long time and is now removed. Most uses of the
old API can be replaced as follows:
* Replace ::
header.update(keyword, value)
with ::
header[keyword] = value
* Replace ::
header.update(keyword, value, comment=comment)
with ::
header[keyword] = (value, comment)
* Replace ::
header.update(keyword, value, before=before_keyword)
with ::
header.insert(before_keyword, (keyword, value))
* Replace ::
header.update(keyword, value, after=after_keyword)
with ::
header.insert(after_keyword, (keyword, value),
after=True)
See also :meth:`Header.set` which is a new method that provides an
interface similar to the old ``Header.update()`` and may help make
transition a little easier.
"""
if args:
other = args[0]
else:
other = None
def update_from_dict(k, v):
if not isinstance(v, tuple):
card = Card(k, v)
elif 0 < len(v) <= 2:
card = Card(*((k,) + v))
else:
raise ValueError(
'Header update value for key %r is invalid; the '
'value must be either a scalar, a 1-tuple '
'containing the scalar value, or a 2-tuple '
'containing the value and a comment string.' % k)
self._update(card)
if other is None:
pass
elif isinstance(other, Header):
for card in other.cards:
self._update(card)
elif hasattr(other, 'items'):
for k, v in other.items():
update_from_dict(k, v)
elif hasattr(other, 'keys'):
for k in other.keys():
update_from_dict(k, other[k])
else:
for idx, card in enumerate(other):
if isinstance(card, Card):
self._update(card)
elif isinstance(card, tuple) and (1 < len(card) <= 3):
self._update(Card(*card))
else:
raise ValueError(
'Header update sequence item #{} is invalid; '
'the item must either be a 2-tuple containing '
'a keyword and value, or a 3-tuple containing '
'a keyword, value, and comment string.'.format(idx))
if kwargs:
self.update(kwargs)
def append(self, card=None, useblanks=True, bottom=False, end=False):
"""
Appends a new keyword+value card to the end of the Header, similar
to `list.append`.
By default if the last cards in the Header have commentary keywords,
this will append the new keyword before the commentary (unless the new
keyword is also commentary).
Also differs from `list.append` in that it can be called with no
arguments: In this case a blank card is appended to the end of the
Header. In the case all the keyword arguments are ignored.
Parameters
----------
card : str, tuple
A keyword or a (keyword, value, [comment]) tuple representing a
single header card; the comment is optional in which case a
2-tuple may be used
useblanks : bool, optional
If there are blank cards at the end of the Header, replace the
first blank card so that the total number of cards in the Header
does not increase. Otherwise preserve the number of blank cards.
bottom : bool, optional
If True, instead of appending after the last non-commentary card,
append after the last non-blank card.
end : bool, optional
If True, ignore the useblanks and bottom options, and append at the
very end of the Header.
"""
if isinstance(card, str):
card = Card(card)
elif isinstance(card, tuple):
card = Card(*card)
elif card is None:
card = Card()
elif not isinstance(card, Card):
raise ValueError(
'The value appended to a Header must be either a keyword or '
'(keyword, value, [comment]) tuple; got: {!r}'.format(card))
if not end and card.is_blank:
# Blank cards should always just be appended to the end
end = True
if end:
self._cards.append(card)
idx = len(self._cards) - 1
else:
idx = len(self._cards) - 1
while idx >= 0 and self._cards[idx].is_blank:
idx -= 1
if not bottom and card.keyword not in Card._commentary_keywords:
while (idx >= 0 and
self._cards[idx].keyword in Card._commentary_keywords):
idx -= 1
idx += 1
self._cards.insert(idx, card)
self._updateindices(idx)
keyword = Card.normalize_keyword(card.keyword)
self._keyword_indices[keyword].append(idx)
if card.field_specifier is not None:
self._rvkc_indices[card.rawkeyword].append(idx)
if not end:
# If the appended card was a commentary card, and it was appended
# before existing cards with the same keyword, the indices for
# cards with that keyword may have changed
if not bottom and card.keyword in Card._commentary_keywords:
self._keyword_indices[keyword].sort()
# Finally, if useblanks, delete a blank cards from the end
if useblanks and self._countblanks():
# Don't do this unless there is at least one blanks at the end
# of the header; we need to convert the card to its string
# image to see how long it is. In the vast majority of cases
# this will just be 80 (Card.length) but it may be longer for
# CONTINUE cards
self._useblanks(len(str(card)) // Card.length)
self._modified = True
def extend(self, cards, strip=True, unique=False, update=False,
update_first=False, useblanks=True, bottom=False, end=False):
"""
Appends multiple keyword+value cards to the end of the header, similar
to `list.extend`.
Parameters
----------
cards : iterable
An iterable of (keyword, value, [comment]) tuples; see
`Header.append`.
strip : bool, optional
Remove any keywords that have meaning only to specific types of
HDUs, so that only more general keywords are added from extension
Header or Card list (default: `True`).
unique : bool, optional
If `True`, ensures that no duplicate keywords are appended;
keywords already in this header are simply discarded. The
exception is commentary keywords (COMMENT, HISTORY, etc.): they are
only treated as duplicates if their values match.
update : bool, optional
If `True`, update the current header with the values and comments
from duplicate keywords in the input header. This supersedes the
``unique`` argument. Commentary keywords are treated the same as
if ``unique=True``.
update_first : bool, optional
If the first keyword in the header is 'SIMPLE', and the first
keyword in the input header is 'XTENSION', the 'SIMPLE' keyword is
replaced by the 'XTENSION' keyword. Likewise if the first keyword
in the header is 'XTENSION' and the first keyword in the input
header is 'SIMPLE', the 'XTENSION' keyword is replaced by the
'SIMPLE' keyword. This behavior is otherwise dumb as to whether or
not the resulting header is a valid primary or extension header.
This is mostly provided to support backwards compatibility with the
old ``Header.fromTxtFile`` method, and only applies if
``update=True``.
useblanks, bottom, end : bool, optional
These arguments are passed to :meth:`Header.append` while appending
new cards to the header.
"""
temp = self.__class__(cards)
if strip:
temp.strip()
if len(self):
first = self._cards[0].keyword
else:
first = None
# We don't immediately modify the header, because first we need to sift
# out any duplicates in the new header prior to adding them to the
# existing header, but while *allowing* duplicates from the header
# being extended from (see ticket #156)
extend_cards = []
for idx, card in enumerate(temp.cards):
keyword = card.keyword
if keyword not in Card._commentary_keywords:
if unique and not update and keyword in self:
continue
elif update:
if idx == 0 and update_first:
# Dumbly update the first keyword to either SIMPLE or
# XTENSION as the case may be, as was in the case in
# Header.fromTxtFile
if ((keyword == 'SIMPLE' and first == 'XTENSION') or
(keyword == 'XTENSION' and first == 'SIMPLE')):
del self[0]
self.insert(0, card)
else:
self[keyword] = (card.value, card.comment)
elif keyword in self:
self[keyword] = (card.value, card.comment)
else:
extend_cards.append(card)
else:
extend_cards.append(card)
else:
if (unique or update) and keyword in self:
if card.is_blank:
extend_cards.append(card)
continue
for value in self[keyword]:
if value == card.value:
break
else:
extend_cards.append(card)
else:
extend_cards.append(card)
for card in extend_cards:
self.append(card, useblanks=useblanks, bottom=bottom, end=end)
def count(self, keyword):
"""
Returns the count of the given keyword in the header, similar to
`list.count` if the Header object is treated as a list of keywords.
Parameters
----------
keyword : str
The keyword to count instances of in the header
"""
keyword = Card.normalize_keyword(keyword)
# We have to look before we leap, since otherwise _keyword_indices,
# being a defaultdict, will create an entry for the nonexistent keyword
if keyword not in self._keyword_indices:
raise KeyError(f"Keyword {keyword!r} not found.")
return len(self._keyword_indices[keyword])
def index(self, keyword, start=None, stop=None):
"""
Returns the index if the first instance of the given keyword in the
header, similar to `list.index` if the Header object is treated as a
list of keywords.
Parameters
----------
keyword : str
The keyword to look up in the list of all keywords in the header
start : int, optional
The lower bound for the index
stop : int, optional
The upper bound for the index
"""
if start is None:
start = 0
if stop is None:
stop = len(self._cards)
if stop < start:
step = -1
else:
step = 1
norm_keyword = Card.normalize_keyword(keyword)
for idx in range(start, stop, step):
if self._cards[idx].keyword.upper() == norm_keyword:
return idx
else:
raise ValueError(f'The keyword {keyword!r} is not in the header.')
def insert(self, key, card, useblanks=True, after=False):
"""
Inserts a new keyword+value card into the Header at a given location,
similar to `list.insert`.
Parameters
----------
key : int, str, or tuple
The index into the list of header keywords before which the
new keyword should be inserted, or the name of a keyword before
which the new keyword should be inserted. Can also accept a
(keyword, index) tuple for inserting around duplicate keywords.
card : str, tuple
A keyword or a (keyword, value, [comment]) tuple; see
`Header.append`
useblanks : bool, optional
If there are blank cards at the end of the Header, replace the
first blank card so that the total number of cards in the Header
does not increase. Otherwise preserve the number of blank cards.
after : bool, optional
If set to `True`, insert *after* the specified index or keyword,
rather than before it. Defaults to `False`.
"""
if not isinstance(key, numbers.Integral):
# Don't pass through ints to _cardindex because it will not take
# kindly to indices outside the existing number of cards in the
# header, which insert needs to be able to support (for example
# when inserting into empty headers)
idx = self._cardindex(key)
else:
idx = key
if after:
if idx == -1:
idx = len(self._cards)
else:
idx += 1
if idx >= len(self._cards):
# This is just an append (Though it must be an append absolutely to
# the bottom, ignoring blanks, etc.--the point of the insert method
# is that you get exactly what you asked for with no surprises)
self.append(card, end=True)
return
if isinstance(card, str):
card = Card(card)
elif isinstance(card, tuple):
card = Card(*card)
elif not isinstance(card, Card):
raise ValueError(
'The value inserted into a Header must be either a keyword or '
'(keyword, value, [comment]) tuple; got: {!r}'.format(card))
self._cards.insert(idx, card)
keyword = card.keyword
# If idx was < 0, determine the actual index according to the rules
# used by list.insert()
if idx < 0:
idx += len(self._cards) - 1
if idx < 0:
idx = 0
# All the keyword indices above the insertion point must be updated
self._updateindices(idx)
keyword = Card.normalize_keyword(keyword)
self._keyword_indices[keyword].append(idx)
count = len(self._keyword_indices[keyword])
if count > 1:
# There were already keywords with this same name
if keyword not in Card._commentary_keywords:
warnings.warn(
'A {!r} keyword already exists in this header. Inserting '
'duplicate keyword.'.format(keyword), AstropyUserWarning)
self._keyword_indices[keyword].sort()
if card.field_specifier is not None:
# Update the index of RVKC as well
rvkc_indices = self._rvkc_indices[card.rawkeyword]
rvkc_indices.append(idx)
rvkc_indices.sort()
if useblanks:
self._useblanks(len(str(card)) // Card.length)
self._modified = True
def remove(self, keyword, ignore_missing=False, remove_all=False):
"""
Removes the first instance of the given keyword from the header similar
to `list.remove` if the Header object is treated as a list of keywords.
Parameters
----------
keyword : str
The keyword of which to remove the first instance in the header.
ignore_missing : bool, optional
When True, ignores missing keywords. Otherwise, if the keyword
is not present in the header a KeyError is raised.
remove_all : bool, optional
When True, all instances of keyword will be removed.
Otherwise only the first instance of the given keyword is removed.
"""
keyword = Card.normalize_keyword(keyword)
if keyword in self._keyword_indices:
del self[self._keyword_indices[keyword][0]]
if remove_all:
while keyword in self._keyword_indices:
del self[self._keyword_indices[keyword][0]]
elif not ignore_missing:
raise KeyError(f"Keyword '{keyword}' not found.")
def rename_keyword(self, oldkeyword, newkeyword, force=False):
"""
Rename a card's keyword in the header.
Parameters
----------
oldkeyword : str or int
Old keyword or card index
newkeyword : str
New keyword
force : bool, optional
When `True`, if the new keyword already exists in the header, force
the creation of a duplicate keyword. Otherwise a
`ValueError` is raised.
"""
oldkeyword = Card.normalize_keyword(oldkeyword)
newkeyword = Card.normalize_keyword(newkeyword)
if newkeyword == 'CONTINUE':
raise ValueError('Can not rename to CONTINUE')
if (newkeyword in Card._commentary_keywords or
oldkeyword in Card._commentary_keywords):
if not (newkeyword in Card._commentary_keywords and
oldkeyword in Card._commentary_keywords):
raise ValueError('Regular and commentary keys can not be '
'renamed to each other.')
elif not force and newkeyword in self:
raise ValueError(f'Intended keyword {newkeyword} already exists in header.')
idx = self.index(oldkeyword)
card = self._cards[idx]
del self[idx]
self.insert(idx, (newkeyword, card.value, card.comment))
def add_history(self, value, before=None, after=None):
"""
Add a ``HISTORY`` card.
Parameters
----------
value : str
History text to be added.
before : str or int, optional
Same as in `Header.update`
after : str or int, optional
Same as in `Header.update`
"""
self._add_commentary('HISTORY', value, before=before, after=after)
def add_comment(self, value, before=None, after=None):
"""
Add a ``COMMENT`` card.
Parameters
----------
value : str
Text to be added.
before : str or int, optional
Same as in `Header.update`
after : str or int, optional
Same as in `Header.update`
"""
self._add_commentary('COMMENT', value, before=before, after=after)
def add_blank(self, value='', before=None, after=None):
"""
Add a blank card.
Parameters
----------
value : str, optional
Text to be added.
before : str or int, optional
Same as in `Header.update`
after : str or int, optional
Same as in `Header.update`
"""
self._add_commentary('', value, before=before, after=after)
def strip(self):
"""
Strip cards specific to a certain kind of header.
Strip cards like ``SIMPLE``, ``BITPIX``, etc. so the rest of
the header can be used to reconstruct another kind of header.
"""
# TODO: Previously this only deleted some cards specific to an HDU if
# _hdutype matched that type. But it seemed simple enough to just
# delete all desired cards anyways, and just ignore the KeyErrors if
# they don't exist.
# However, it might be desirable to make this extendable somehow--have
# a way for HDU classes to specify some headers that are specific only
# to that type, and should be removed otherwise.
naxis = self.get('NAXIS', 0)
tfields = self.get('TFIELDS', 0)
for idx in range(naxis):
self.remove('NAXIS' + str(idx + 1), ignore_missing=True)
for name in ('TFORM', 'TSCAL', 'TZERO', 'TNULL', 'TTYPE',
'TUNIT', 'TDISP', 'TDIM', 'THEAP', 'TBCOL'):
for idx in range(tfields):
self.remove(name + str(idx + 1), ignore_missing=True)
for name in ('SIMPLE', 'XTENSION', 'BITPIX', 'NAXIS', 'EXTEND',
'PCOUNT', 'GCOUNT', 'GROUPS', 'BSCALE', 'BZERO',
'TFIELDS'):
self.remove(name, ignore_missing=True)
@property
def data_size(self):
"""
Return the size (in bytes) of the data portion following the `Header`.
"""
return _hdr_data_size(self)
@property
def data_size_padded(self):
"""
Return the size (in bytes) of the data portion following the `Header`
including padding.
"""
size = self.data_size
return size + _pad_length(size)
def _update(self, card):
"""
The real update code. If keyword already exists, its value and/or
comment will be updated. Otherwise a new card will be appended.
This will not create a duplicate keyword except in the case of
commentary cards. The only other way to force creation of a duplicate
is to use the insert(), append(), or extend() methods.
"""
keyword, value, comment = card
# Lookups for existing/known keywords are case-insensitive
keyword = keyword.strip().upper()
if keyword.startswith('HIERARCH '):
keyword = keyword[9:]
if (keyword not in Card._commentary_keywords and
keyword in self._keyword_indices):
# Easy; just update the value/comment
idx = self._keyword_indices[keyword][0]
existing_card = self._cards[idx]
existing_card.value = value
if comment is not None:
# '' should be used to explicitly blank a comment
existing_card.comment = comment
if existing_card._modified:
self._modified = True
elif keyword in Card._commentary_keywords:
cards = self._splitcommentary(keyword, value)
if keyword in self._keyword_indices:
# Append after the last keyword of the same type
idx = self.index(keyword, start=len(self) - 1, stop=-1)
isblank = not (keyword or value or comment)
for c in reversed(cards):
self.insert(idx + 1, c, useblanks=(not isblank))
else:
for c in cards:
self.append(c, bottom=True)
else:
# A new keyword! self.append() will handle updating _modified
self.append(card)
def _cardindex(self, key):
"""Returns an index into the ._cards list given a valid lookup key."""
# This used to just set key = (key, 0) and then go on to act as if the
# user passed in a tuple, but it's much more common to just be given a
# string as the key, so optimize more for that case
if isinstance(key, str):
keyword = key
n = 0
elif isinstance(key, numbers.Integral):
# If < 0, determine the actual index
if key < 0:
key += len(self._cards)
if key < 0 or key >= len(self._cards):
raise IndexError('Header index out of range.')
return key
elif isinstance(key, slice):
return key
elif isinstance(key, tuple):
if (len(key) != 2 or not isinstance(key[0], str) or
not isinstance(key[1], numbers.Integral)):
raise ValueError(
'Tuple indices must be 2-tuples consisting of a '
'keyword string and an integer index.')
keyword, n = key
else:
raise ValueError(
'Header indices must be either a string, a 2-tuple, or '
'an integer.')
keyword = Card.normalize_keyword(keyword)
# Returns the index into _cards for the n-th card with the given
# keyword (where n is 0-based)
indices = self._keyword_indices.get(keyword, None)
if keyword and not indices:
if len(keyword) > KEYWORD_LENGTH or '.' in keyword:
raise KeyError(f"Keyword {keyword!r} not found.")
else:
# Maybe it's a RVKC?
indices = self._rvkc_indices.get(keyword, None)
if not indices:
raise KeyError(f"Keyword {keyword!r} not found.")
try:
return indices[n]
except IndexError:
raise IndexError('There are only {} {!r} cards in the '
'header.'.format(len(indices), keyword))
def _keyword_from_index(self, idx):
"""
Given an integer index, return the (keyword, repeat) tuple that index
refers to. For most keywords the repeat will always be zero, but it
may be greater than zero for keywords that are duplicated (especially
commentary keywords).
In a sense this is the inverse of self.index, except that it also
supports duplicates.
"""
if idx < 0:
idx += len(self._cards)
keyword = self._cards[idx].keyword
keyword = Card.normalize_keyword(keyword)
repeat = self._keyword_indices[keyword].index(idx)
return keyword, repeat
def _relativeinsert(self, card, before=None, after=None, replace=False):
"""
Inserts a new card before or after an existing card; used to
implement support for the legacy before/after keyword arguments to
Header.update().
If replace=True, move an existing card with the same keyword.
"""
if before is None:
insertionkey = after
else:
insertionkey = before
def get_insertion_idx():
if not (isinstance(insertionkey, numbers.Integral) and
insertionkey >= len(self._cards)):
idx = self._cardindex(insertionkey)
else:
idx = insertionkey
if before is None:
idx += 1
return idx
if replace:
# The card presumably already exists somewhere in the header.
# Check whether or not we actually have to move it; if it does need
# to be moved we just delete it and then it will be reinserted
# below
old_idx = self._cardindex(card.keyword)
insertion_idx = get_insertion_idx()
if (insertion_idx >= len(self._cards) and
old_idx == len(self._cards) - 1):
# The card would be appended to the end, but it's already at
# the end
return
if before is not None:
if old_idx == insertion_idx - 1:
return
elif after is not None and old_idx == insertion_idx:
return
del self[old_idx]
# Even if replace=True, the insertion idx may have changed since the
# old card was deleted
idx = get_insertion_idx()
if card[0] in Card._commentary_keywords:
cards = reversed(self._splitcommentary(card[0], card[1]))
else:
cards = [card]
for c in cards:
self.insert(idx, c)
def _updateindices(self, idx, increment=True):
"""
For all cards with index above idx, increment or decrement its index
value in the keyword_indices dict.
"""
if idx > len(self._cards):
# Save us some effort
return
increment = 1 if increment else -1
for index_sets in (self._keyword_indices, self._rvkc_indices):
for indices in index_sets.values():
for jdx, keyword_index in enumerate(indices):
if keyword_index >= idx:
indices[jdx] += increment
def _countblanks(self):
"""Returns the number of blank cards at the end of the Header."""
for idx in range(1, len(self._cards)):
if not self._cards[-idx].is_blank:
return idx - 1
return 0
def _useblanks(self, count):
for _ in range(count):
if self._cards[-1].is_blank:
del self[-1]
else:
break
def _haswildcard(self, keyword):
"""Return `True` if the input keyword contains a wildcard pattern."""
return (isinstance(keyword, str) and
(keyword.endswith('...') or '*' in keyword or '?' in keyword))
def _wildcardmatch(self, pattern):
"""
Returns a list of indices of the cards matching the given wildcard
pattern.
* '*' matches 0 or more characters
* '?' matches a single character
* '...' matches 0 or more of any non-whitespace character
"""
pattern = pattern.replace('*', r'.*').replace('?', r'.')
pattern = pattern.replace('...', r'\S*') + '$'
pattern_re = re.compile(pattern, re.I)
return [idx for idx, card in enumerate(self._cards)
if pattern_re.match(card.keyword)]
def _set_slice(self, key, value, target):
"""
Used to implement Header.__setitem__ and CardAccessor.__setitem__.
"""
if isinstance(key, slice) or self._haswildcard(key):
if isinstance(key, slice):
indices = range(*key.indices(len(target)))
else:
indices = self._wildcardmatch(key)
if isinstance(value, str) or not isiterable(value):
value = itertools.repeat(value, len(indices))
for idx, val in zip(indices, value):
target[idx] = val
return True
return False
def _splitcommentary(self, keyword, value):
"""
Given a commentary keyword and value, returns a list of the one or more
cards needed to represent the full value. This is primarily used to
create the multiple commentary cards needed to represent a long value
that won't fit into a single commentary card.
"""
# The maximum value in each card can be the maximum card length minus
# the maximum key length (which can include spaces if they key length
# less than 8
maxlen = Card.length - KEYWORD_LENGTH
valuestr = str(value)
if len(valuestr) <= maxlen:
# The value can fit in a single card
cards = [Card(keyword, value)]
else:
# The value must be split across multiple consecutive commentary
# cards
idx = 0
cards = []
while idx < len(valuestr):
cards.append(Card(keyword, valuestr[idx:idx + maxlen]))
idx += maxlen
return cards
def _add_commentary(self, key, value, before=None, after=None):
"""
Add a commentary card.
If ``before`` and ``after`` are `None`, add to the last occurrence
of cards of the same name (except blank card). If there is no
card (or blank card), append at the end.
"""
if before is not None or after is not None:
self._relativeinsert((key, value), before=before,
after=after)
else:
self[key] = value
collections.abc.MutableSequence.register(Header)
collections.abc.MutableMapping.register(Header)
class _DelayedHeader:
"""
Descriptor used to create the Header object from the header string that
was stored in HDU._header_str when parsing the file.
"""
def __get__(self, obj, owner=None):
try:
return obj.__dict__['_header']
except KeyError:
if obj._header_str is not None:
hdr = Header.fromstring(obj._header_str)
obj._header_str = None
else:
raise AttributeError("'{}' object has no attribute '_header'"
.format(obj.__class__.__name__))
obj.__dict__['_header'] = hdr
return hdr
def __set__(self, obj, val):
obj.__dict__['_header'] = val
def __delete__(self, obj):
del obj.__dict__['_header']
class _BasicHeaderCards:
"""
This class allows to access cards with the _BasicHeader.cards attribute.
This is needed because during the HDU class detection, some HDUs uses
the .cards interface. Cards cannot be modified here as the _BasicHeader
object will be deleted once the HDU object is created.
"""
def __init__(self, header):
self.header = header
def __getitem__(self, key):
# .cards is a list of cards, so key here is an integer.
# get the keyword name from its index.
key = self.header._keys[key]
# then we get the card from the _BasicHeader._cards list, or parse it
# if needed.
try:
return self.header._cards[key]
except KeyError:
cardstr = self.header._raw_cards[key]
card = Card.fromstring(cardstr)
self.header._cards[key] = card
return card
class _BasicHeader(collections.abc.Mapping):
"""This class provides a fast header parsing, without all the additional
features of the Header class. Here only standard keywords are parsed, no
support for CONTINUE, HIERARCH, COMMENT, HISTORY, or rvkc.
The raw card images are stored and parsed only if needed. The idea is that
to create the HDU objects, only a small subset of standard cards is needed.
Once a card is parsed, which is deferred to the Card class, the Card object
is kept in a cache. This is useful because a small subset of cards is used
a lot in the HDU creation process (NAXIS, XTENSION, ...).
"""
def __init__(self, cards):
# dict of (keywords, card images)
self._raw_cards = cards
self._keys = list(cards.keys())
# dict of (keyword, Card object) storing the parsed cards
self._cards = {}
# the _BasicHeaderCards object allows to access Card objects from
# keyword indices
self.cards = _BasicHeaderCards(self)
self._modified = False
def __getitem__(self, key):
if isinstance(key, numbers.Integral):
key = self._keys[key]
try:
return self._cards[key].value
except KeyError:
# parse the Card and store it
cardstr = self._raw_cards[key]
self._cards[key] = card = Card.fromstring(cardstr)
return card.value
def __len__(self):
return len(self._raw_cards)
def __iter__(self):
return iter(self._raw_cards)
def index(self, keyword):
return self._keys.index(keyword)
@property
def data_size(self):
"""
Return the size (in bytes) of the data portion following the `Header`.
"""
return _hdr_data_size(self)
@property
def data_size_padded(self):
"""
Return the size (in bytes) of the data portion following the `Header`
including padding.
"""
size = self.data_size
return size + _pad_length(size)
@classmethod
def fromfile(cls, fileobj):
"""The main method to parse a FITS header from a file. The parsing is
done with the parse_header function implemented in Cython."""
close_file = False
if isinstance(fileobj, str):
fileobj = open(fileobj, 'rb')
close_file = True
try:
header_str, cards = parse_header(fileobj)
_check_padding(header_str, BLOCK_SIZE, False)
return header_str, cls(cards)
finally:
if close_file:
fileobj.close()
class _CardAccessor:
"""
This is a generic class for wrapping a Header in such a way that you can
use the header's slice/filtering capabilities to return a subset of cards
and do something with them.
This is sort of the opposite notion of the old CardList class--whereas
Header used to use CardList to get lists of cards, this uses Header to get
lists of cards.
"""
# TODO: Consider giving this dict/list methods like Header itself
def __init__(self, header):
self._header = header
def __repr__(self):
return '\n'.join(repr(c) for c in self._header._cards)
def __len__(self):
return len(self._header._cards)
def __iter__(self):
return iter(self._header._cards)
def __eq__(self, other):
# If the `other` item is a scalar we will still treat it as equal if
# this _CardAccessor only contains one item
if not isiterable(other) or isinstance(other, str):
if len(self) == 1:
other = [other]
else:
return False
for a, b in itertools.zip_longest(self, other):
if a != b:
return False
else:
return True
def __ne__(self, other):
return not (self == other)
def __getitem__(self, item):
if isinstance(item, slice) or self._header._haswildcard(item):
return self.__class__(self._header[item])
idx = self._header._cardindex(item)
return self._header._cards[idx]
def _setslice(self, item, value):
"""
Helper for implementing __setitem__ on _CardAccessor subclasses; slices
should always be handled in this same way.
"""
if isinstance(item, slice) or self._header._haswildcard(item):
if isinstance(item, slice):
indices = range(*item.indices(len(self)))
else:
indices = self._header._wildcardmatch(item)
if isinstance(value, str) or not isiterable(value):
value = itertools.repeat(value, len(indices))
for idx, val in zip(indices, value):
self[idx] = val
return True
return False
class _HeaderComments(_CardAccessor):
"""
A class used internally by the Header class for the Header.comments
attribute access.
This object can be used to display all the keyword comments in the Header,
or look up the comments on specific keywords. It allows all the same forms
of keyword lookup as the Header class itself, but returns comments instead
of values.
"""
def __iter__(self):
for card in self._header._cards:
yield card.comment
def __repr__(self):
"""Returns a simple list of all keywords and their comments."""
keyword_length = KEYWORD_LENGTH
for card in self._header._cards:
keyword_length = max(keyword_length, len(card.keyword))
return '\n'.join('{:>{len}} {}'.format(c.keyword, c.comment,
len=keyword_length)
for c in self._header._cards)
def __getitem__(self, item):
"""
Slices and filter strings return a new _HeaderComments containing the
returned cards. Otherwise the comment of a single card is returned.
"""
item = super().__getitem__(item)
if isinstance(item, _HeaderComments):
# The item key was a slice
return item
return item.comment
def __setitem__(self, item, comment):
"""
Set/update the comment on specified card or cards.
Slice/filter updates work similarly to how Header.__setitem__ works.
"""
if self._header._set_slice(item, comment, self):
return
# In this case, key/index errors should be raised; don't update
# comments of nonexistent cards
idx = self._header._cardindex(item)
value = self._header[idx]
self._header[idx] = (value, comment)
class _HeaderCommentaryCards(_CardAccessor):
"""
This is used to return a list-like sequence over all the values in the
header for a given commentary keyword, such as HISTORY.
"""
def __init__(self, header, keyword=''):
super().__init__(header)
self._keyword = keyword
self._count = self._header.count(self._keyword)
self._indices = slice(self._count).indices(self._count)
# __len__ and __iter__ need to be overridden from the base class due to the
# different approach this class has to take for slicing
def __len__(self):
return len(range(*self._indices))
def __iter__(self):
for idx in range(*self._indices):
yield self._header[(self._keyword, idx)]
def __repr__(self):
return '\n'.join(str(x) for x in self)
def __getitem__(self, idx):
if isinstance(idx, slice):
n = self.__class__(self._header, self._keyword)
n._indices = idx.indices(self._count)
return n
elif not isinstance(idx, numbers.Integral):
raise ValueError(f'{self._keyword} index must be an integer')
idx = list(range(*self._indices))[idx]
return self._header[(self._keyword, idx)]
def __setitem__(self, item, value):
"""
Set the value of a specified commentary card or cards.
Slice/filter updates work similarly to how Header.__setitem__ works.
"""
if self._header._set_slice(item, value, self):
return
# In this case, key/index errors should be raised; don't update
# comments of nonexistent cards
self._header[(self._keyword, item)] = value
def _block_size(sep):
"""
Determine the size of a FITS header block if a non-blank separator is used
between cards.
"""
return BLOCK_SIZE + (len(sep) * (BLOCK_SIZE // Card.length - 1))
def _pad_length(stringlen):
"""Bytes needed to pad the input stringlen to the next FITS block."""
return (BLOCK_SIZE - (stringlen % BLOCK_SIZE)) % BLOCK_SIZE
def _check_padding(header_str, block_size, is_eof, check_block_size=True):
# Strip any zero-padding (see ticket #106)
if header_str and header_str[-1] == '\0':
if is_eof and header_str.strip('\0') == '':
# TODO: Pass this warning to validation framework
warnings.warn(
'Unexpected extra padding at the end of the file. This '
'padding may not be preserved when saving changes.',
AstropyUserWarning)
raise EOFError()
else:
# Replace the illegal null bytes with spaces as required by
# the FITS standard, and issue a nasty warning
# TODO: Pass this warning to validation framework
warnings.warn(
'Header block contains null bytes instead of spaces for '
'padding, and is not FITS-compliant. Nulls may be '
'replaced with spaces upon writing.', AstropyUserWarning)
header_str.replace('\0', ' ')
if check_block_size and (len(header_str) % block_size) != 0:
# This error message ignores the length of the separator for
# now, but maybe it shouldn't?
actual_len = len(header_str) - block_size + BLOCK_SIZE
# TODO: Pass this error to validation framework
raise ValueError(f'Header size is not multiple of {BLOCK_SIZE}: {actual_len}')
def _hdr_data_size(header):
"""Calculate the data size (in bytes) following the given `Header`"""
size = 0
naxis = header.get('NAXIS', 0)
if naxis > 0:
size = 1
for idx in range(naxis):
size = size * header['NAXIS' + str(idx + 1)]
bitpix = header['BITPIX']
gcount = header.get('GCOUNT', 1)
pcount = header.get('PCOUNT', 0)
size = abs(bitpix) * gcount * (pcount + size) // 8
return size
|
9489e393948bd4e6b26e07d827797cfafaf789eba5390cfac49aec584978a22e | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import operator
import warnings
from astropy.utils import indent
from astropy.utils.exceptions import AstropyUserWarning
class VerifyError(Exception):
"""
Verify exception class.
"""
class VerifyWarning(AstropyUserWarning):
"""
Verify warning class.
"""
VERIFY_OPTIONS = ['ignore', 'warn', 'exception', 'fix', 'silentfix',
'fix+ignore', 'fix+warn', 'fix+exception',
'silentfix+ignore', 'silentfix+warn', 'silentfix+exception']
class _Verify:
"""
Shared methods for verification.
"""
def run_option(self, option='warn', err_text='', fix_text='Fixed.',
fix=None, fixable=True):
"""
Execute the verification with selected option.
"""
text = err_text
if option in ['warn', 'exception']:
fixable = False
# fix the value
elif not fixable:
text = f'Unfixable error: {text}'
else:
if fix:
fix()
text += ' ' + fix_text
return (fixable, text)
def verify(self, option='warn'):
"""
Verify all values in the instance.
Parameters
----------
option : str
Output verification option. Must be one of ``"fix"``,
``"silentfix"``, ``"ignore"``, ``"warn"``, or
``"exception"``. May also be any combination of ``"fix"`` or
``"silentfix"`` with ``"+ignore"``, ``"+warn"``, or ``"+exception"``
(e.g. ``"fix+warn"``). See :ref:`astropy:verify` for more info.
"""
opt = option.lower()
if opt not in VERIFY_OPTIONS:
raise ValueError(f'Option {option!r} not recognized.')
if opt == 'ignore':
return
errs = self._verify(opt)
# Break the verify option into separate options related to reporting of
# errors, and fixing of fixable errors
if '+' in opt:
fix_opt, report_opt = opt.split('+')
elif opt in ['fix', 'silentfix']:
# The original default behavior for 'fix' and 'silentfix' was to
# raise an exception for unfixable errors
fix_opt, report_opt = opt, 'exception'
else:
fix_opt, report_opt = None, opt
if fix_opt == 'silentfix' and report_opt == 'ignore':
# Fixable errors were fixed, but don't report anything
return
if fix_opt == 'silentfix':
# Don't print out fixable issues; the first element of each verify
# item is a boolean indicating whether or not the issue was fixable
line_filter = lambda x: not x[0]
elif fix_opt == 'fix' and report_opt == 'ignore':
# Don't print *unfixable* issues, but do print fixed issues; this
# is probably not very useful but the option exists for
# completeness
line_filter = operator.itemgetter(0)
else:
line_filter = None
unfixable = False
messages = []
for fixable, message in errs.iter_lines(filter=line_filter):
if fixable is not None:
unfixable = not fixable
messages.append(message)
if messages:
messages.insert(0, 'Verification reported errors:')
messages.append('Note: astropy.io.fits uses zero-based indexing.\n')
if fix_opt == 'silentfix' and not unfixable:
return
elif report_opt == 'warn' or (fix_opt == 'fix' and not unfixable):
for line in messages:
warnings.warn(line, VerifyWarning)
else:
raise VerifyError('\n' + '\n'.join(messages))
class _ErrList(list):
"""
Verification errors list class. It has a nested list structure
constructed by error messages generated by verifications at
different class levels.
"""
def __init__(self, val=(), unit='Element'):
super().__init__(val)
self.unit = unit
def __str__(self):
return '\n'.join(item[1] for item in self.iter_lines())
def iter_lines(self, filter=None, shift=0):
"""
Iterate the nested structure as a list of strings with appropriate
indentations for each level of structure.
"""
element = 0
# go through the list twice, first time print out all top level
# messages
for item in self:
if not isinstance(item, _ErrList):
if filter is None or filter(item):
yield item[0], indent(item[1], shift=shift)
# second time go through the next level items, each of the next level
# must present, even it has nothing.
for item in self:
if isinstance(item, _ErrList):
next_lines = item.iter_lines(filter=filter, shift=shift + 1)
try:
first_line = next(next_lines)
except StopIteration:
first_line = None
if first_line is not None:
if self.unit:
# This line is sort of a header for the next level in
# the hierarchy
yield None, indent(f'{self.unit} {element}:',
shift=shift)
yield first_line
yield from next_lines
element += 1
|
6ab28a86a174b5c72e04023010c44a864feed75bb8f142143f9dabb71d7576cd | # Licensed under a 3-clause BSD style license - see LICENSE.rst
""" An extensible ASCII table reader and writer.
core.py:
Core base classes and functions for reading and writing tables.
:Copyright: Smithsonian Astrophysical Observatory (2010)
:Author: Tom Aldcroft ([email protected])
"""
import copy
import csv
import functools
import itertools
import operator
import os
import re
import warnings
import inspect
import fnmatch
from collections import OrderedDict
from contextlib import suppress
from io import StringIO
import numpy
from astropy.utils.exceptions import AstropyWarning
from astropy.table import Table
from astropy.utils.data import get_readable_fileobj
from . import connect
from .docs import READ_DOCSTRING, WRITE_DOCSTRING
# Global dictionary mapping format arg to the corresponding Reader class
FORMAT_CLASSES = {}
# Similar dictionary for fast readers
FAST_CLASSES = {}
def _check_multidim_table(table, max_ndim):
"""Check that ``table`` has only columns with ndim <= ``max_ndim``
Currently ECSV is the only built-in format that supports output of arbitrary
N-d columns, but HTML supports 2-d.
"""
# No limit?
if max_ndim is None:
return
# Check for N-d columns
nd_names = [col.info.name for col in table.itercols() if len(col.shape) > max_ndim]
if nd_names:
raise ValueError(f'column(s) with dimension > {max_ndim} '
"cannot be be written with this format, try using 'ecsv' "
"(Enhanced CSV) format")
class CsvWriter:
"""
Internal class to replace the csv writer ``writerow`` and ``writerows``
functions so that in the case of ``delimiter=' '`` and
``quoting=csv.QUOTE_MINIMAL``, the output field value is quoted for empty
fields (when value == '').
This changes the API slightly in that the writerow() and writerows()
methods return the output written string instead of the length of
that string.
Examples
--------
>>> from astropy.io.ascii.core import CsvWriter
>>> writer = CsvWriter(delimiter=' ')
>>> print(writer.writerow(['hello', '', 'world']))
hello "" world
"""
# Random 16-character string that gets injected instead of any
# empty fields and is then replaced post-write with doubled-quotechar.
# Created with:
# ''.join(random.choice(string.printable[:90]) for _ in range(16))
replace_sentinel = '2b=48Av%0-V3p>bX'
def __init__(self, csvfile=None, **kwargs):
self.csvfile = csvfile
# Temporary StringIO for catching the real csv.writer() object output
self.temp_out = StringIO()
self.writer = csv.writer(self.temp_out, **kwargs)
dialect = self.writer.dialect
self.quotechar2 = dialect.quotechar * 2
self.quote_empty = (dialect.quoting == csv.QUOTE_MINIMAL) and (dialect.delimiter == ' ')
def writerow(self, values):
"""
Similar to csv.writer.writerow but with the custom quoting behavior.
Returns the written string instead of the length of that string.
"""
has_empty = False
# If QUOTE_MINIMAL and space-delimited then replace empty fields with
# the sentinel value.
if self.quote_empty:
for i, value in enumerate(values):
if value == '':
has_empty = True
values[i] = self.replace_sentinel
return self._writerow(self.writer.writerow, values, has_empty)
def writerows(self, values_list):
"""
Similar to csv.writer.writerows but with the custom quoting behavior.
Returns the written string instead of the length of that string.
"""
has_empty = False
# If QUOTE_MINIMAL and space-delimited then replace empty fields with
# the sentinel value.
if self.quote_empty:
for values in values_list:
for i, value in enumerate(values):
if value == '':
has_empty = True
values[i] = self.replace_sentinel
return self._writerow(self.writer.writerows, values_list, has_empty)
def _writerow(self, writerow_func, values, has_empty):
"""
Call ``writerow_func`` (either writerow or writerows) with ``values``.
If it has empty fields that have been replaced then change those
sentinel strings back to quoted empty strings, e.g. ``""``.
"""
# Clear the temporary StringIO buffer that self.writer writes into and
# then call the real csv.writer().writerow or writerows with values.
self.temp_out.seek(0)
self.temp_out.truncate()
writerow_func(values)
row_string = self.temp_out.getvalue()
if self.quote_empty and has_empty:
row_string = re.sub(self.replace_sentinel, self.quotechar2, row_string)
# self.csvfile is defined then write the output. In practice the pure
# Python writer calls with csvfile=None, while the fast writer calls with
# a file-like object.
if self.csvfile:
self.csvfile.write(row_string)
return row_string
class MaskedConstant(numpy.ma.core.MaskedConstant):
"""A trivial extension of numpy.ma.masked
We want to be able to put the generic term ``masked`` into a dictionary.
The constant ``numpy.ma.masked`` is not hashable (see
https://github.com/numpy/numpy/issues/4660), so we need to extend it
here with a hash value.
See https://github.com/numpy/numpy/issues/11021 for rationale for
__copy__ and __deepcopy__ methods.
"""
def __hash__(self):
'''All instances of this class shall have the same hash.'''
# Any large number will do.
return 1234567890
def __copy__(self):
"""This is a singleton so just return self."""
return self
def __deepcopy__(self, memo):
return self
masked = MaskedConstant()
class InconsistentTableError(ValueError):
"""
Indicates that an input table is inconsistent in some way.
The default behavior of ``BaseReader`` is to throw an instance of
this class if a data row doesn't match the header.
"""
class OptionalTableImportError(ImportError):
"""
Indicates that a dependency for table reading is not present.
An instance of this class is raised whenever an optional reader
with certain required dependencies cannot operate because of
an ImportError.
"""
class ParameterError(NotImplementedError):
"""
Indicates that a reader cannot handle a passed parameter.
The C-based fast readers in ``io.ascii`` raise an instance of
this error class upon encountering a parameter that the
C engine cannot handle.
"""
class FastOptionsError(NotImplementedError):
"""
Indicates that one of the specified options for fast
reading is invalid.
"""
class NoType:
"""
Superclass for ``StrType`` and ``NumType`` classes.
This class is the default type of ``Column`` and provides a base
class for other data types.
"""
class StrType(NoType):
"""
Indicates that a column consists of text data.
"""
class NumType(NoType):
"""
Indicates that a column consists of numerical data.
"""
class FloatType(NumType):
"""
Describes floating-point data.
"""
class BoolType(NoType):
"""
Describes boolean data.
"""
class IntType(NumType):
"""
Describes integer data.
"""
class AllType(StrType, FloatType, IntType):
"""
Subclass of all other data types.
This type is returned by ``convert_numpy`` if the given numpy
type does not match ``StrType``, ``FloatType``, or ``IntType``.
"""
class Column:
"""Table column.
The key attributes of a Column object are:
* **name** : column name
* **type** : column type (NoType, StrType, NumType, FloatType, IntType)
* **dtype** : numpy dtype (optional, overrides **type** if set)
* **str_vals** : list of column values as strings
* **fill_values** : dict of fill values
* **shape** : list of element shape (default [] => scalar)
* **data** : list of converted column values
* **subtype** : actual datatype for columns serialized with JSON
"""
def __init__(self, name):
self.name = name
self.type = NoType # Generic type (Int, Float, Str etc)
self.dtype = None # Numpy dtype if available
self.str_vals = []
self.fill_values = {}
self.shape = []
self.subtype = None
class BaseInputter:
"""
Get the lines from the table input and return a list of lines.
"""
encoding = None
"""Encoding used to read the file"""
def get_lines(self, table, newline=None):
"""
Get the lines from the ``table`` input. The input table can be one of:
* File name
* String (newline separated) with all header and data lines (must have at least 2 lines)
* File-like object with read() method
* List of strings
Parameters
----------
table : str, file-like, list
Can be either a file name, string (newline separated) with all header and data
lines (must have at least 2 lines), a file-like object with a
``read()`` method, or a list of strings.
newline :
Line separator. If `None` use OS default from ``splitlines()``.
Returns
-------
lines : list
List of lines
"""
try:
if (hasattr(table, 'read')
or ('\n' not in table + '' and '\r' not in table + '')):
with get_readable_fileobj(table,
encoding=self.encoding) as fileobj:
table = fileobj.read()
if newline is None:
lines = table.splitlines()
else:
lines = table.split(newline)
except TypeError:
try:
# See if table supports indexing, slicing, and iteration
table[0]
table[0:1]
iter(table)
if len(table) > 1:
lines = table
else:
# treat single entry as if string had been passed directly
if newline is None:
lines = table[0].splitlines()
else:
lines = table[0].split(newline)
except TypeError:
raise TypeError(
'Input "table" must be a string (filename or data) or an iterable')
return self.process_lines(lines)
def process_lines(self, lines):
"""Process lines for subsequent use. In the default case do nothing.
This routine is not generally intended for removing comment lines or
stripping whitespace. These are done (if needed) in the header and
data line processing.
Override this method if something more has to be done to convert raw
input lines to the table rows. For example the
ContinuationLinesInputter derived class accounts for continuation
characters if a row is split into lines."""
return lines
class BaseSplitter:
"""
Base splitter that uses python's split method to do the work.
This does not handle quoted values. A key feature is the formulation of
__call__ as a generator that returns a list of the split line values at
each iteration.
There are two methods that are intended to be overridden, first
``process_line()`` to do pre-processing on each input line before splitting
and ``process_val()`` to do post-processing on each split string value. By
default these apply the string ``strip()`` function. These can be set to
another function via the instance attribute or be disabled entirely, for
example::
reader.header.splitter.process_val = lambda x: x.lstrip()
reader.data.splitter.process_val = None
"""
delimiter = None
""" one-character string used to separate fields """
def process_line(self, line):
"""Remove whitespace at the beginning or end of line. This is especially useful for
whitespace-delimited files to prevent spurious columns at the beginning or end."""
return line.strip()
def process_val(self, val):
"""Remove whitespace at the beginning or end of value."""
return val.strip()
def __call__(self, lines):
if self.process_line:
lines = (self.process_line(x) for x in lines)
for line in lines:
vals = line.split(self.delimiter)
if self.process_val:
yield [self.process_val(x) for x in vals]
else:
yield vals
def join(self, vals):
if self.delimiter is None:
delimiter = ' '
else:
delimiter = self.delimiter
return delimiter.join(str(x) for x in vals)
class DefaultSplitter(BaseSplitter):
"""Default class to split strings into columns using python csv. The class
attributes are taken from the csv Dialect class.
Typical usage::
# lines = ..
splitter = ascii.DefaultSplitter()
for col_vals in splitter(lines):
for col_val in col_vals:
...
"""
delimiter = ' '
""" one-character string used to separate fields. """
quotechar = '"'
""" control how instances of *quotechar* in a field are quoted """
doublequote = True
""" character to remove special meaning from following character """
escapechar = None
""" one-character stringto quote fields containing special characters """
quoting = csv.QUOTE_MINIMAL
""" control when quotes are recognized by the reader """
skipinitialspace = True
""" ignore whitespace immediately following the delimiter """
csv_writer = None
csv_writer_out = StringIO()
def process_line(self, line):
"""Remove whitespace at the beginning or end of line. This is especially useful for
whitespace-delimited files to prevent spurious columns at the beginning or end.
If splitting on whitespace then replace unquoted tabs with space first"""
if self.delimiter == r'\s':
line = _replace_tab_with_space(line, self.escapechar, self.quotechar)
return line.strip() + '\n'
def process_val(self, val):
"""Remove whitespace at the beginning or end of value."""
return val.strip(' \t')
def __call__(self, lines):
"""Return an iterator over the table ``lines``, where each iterator output
is a list of the split line values.
Parameters
----------
lines : list
List of table lines
Yields
------
line : list of str
Each line's split values.
"""
if self.process_line:
lines = [self.process_line(x) for x in lines]
delimiter = ' ' if self.delimiter == r'\s' else self.delimiter
csv_reader = csv.reader(lines,
delimiter=delimiter,
doublequote=self.doublequote,
escapechar=self.escapechar,
quotechar=self.quotechar,
quoting=self.quoting,
skipinitialspace=self.skipinitialspace
)
for vals in csv_reader:
if self.process_val:
yield [self.process_val(x) for x in vals]
else:
yield vals
def join(self, vals):
delimiter = ' ' if self.delimiter is None else str(self.delimiter)
if self.csv_writer is None:
self.csv_writer = CsvWriter(delimiter=delimiter,
doublequote=self.doublequote,
escapechar=self.escapechar,
quotechar=self.quotechar,
quoting=self.quoting)
if self.process_val:
vals = [self.process_val(x) for x in vals]
out = self.csv_writer.writerow(vals).rstrip('\r\n')
return out
def _replace_tab_with_space(line, escapechar, quotechar):
"""Replace tabs with spaces in given string, preserving quoted substrings
Parameters
----------
line : str
String containing tabs to be replaced with spaces.
escapechar : str
Character in ``line`` used to escape special characters.
quotechar : str
Character in ``line`` indicating the start/end of a substring.
Returns
-------
line : str
A copy of ``line`` with tabs replaced by spaces, preserving quoted substrings.
"""
newline = []
in_quote = False
lastchar = 'NONE'
for char in line:
if char == quotechar and lastchar != escapechar:
in_quote = not in_quote
if char == '\t' and not in_quote:
char = ' '
lastchar = char
newline.append(char)
return ''.join(newline)
def _get_line_index(line_or_func, lines):
"""Return the appropriate line index, depending on ``line_or_func`` which
can be either a function, a positive or negative int, or None.
"""
if hasattr(line_or_func, '__call__'):
return line_or_func(lines)
elif line_or_func:
if line_or_func >= 0:
return line_or_func
else:
n_lines = sum(1 for line in lines)
return n_lines + line_or_func
else:
return line_or_func
class BaseHeader:
"""
Base table header reader
"""
auto_format = 'col{}'
""" format string for auto-generating column names """
start_line = None
""" None, int, or a function of ``lines`` that returns None or int """
comment = None
""" regular expression for comment lines """
splitter_class = DefaultSplitter
""" Splitter class for splitting data lines into columns """
names = None
""" list of names corresponding to each data column """
write_comment = False
write_spacer_lines = ['ASCII_TABLE_WRITE_SPACER_LINE']
def __init__(self):
self.splitter = self.splitter_class()
def _set_cols_from_names(self):
self.cols = [Column(name=x) for x in self.names]
def update_meta(self, lines, meta):
"""
Extract any table-level metadata, e.g. keywords, comments, column metadata, from
the table ``lines`` and update the OrderedDict ``meta`` in place. This base
method extracts comment lines and stores them in ``meta`` for output.
"""
if self.comment:
re_comment = re.compile(self.comment)
comment_lines = [x for x in lines if re_comment.match(x)]
else:
comment_lines = []
comment_lines = [re.sub('^' + self.comment, '', x).strip()
for x in comment_lines]
if comment_lines:
meta.setdefault('table', {})['comments'] = comment_lines
def get_cols(self, lines):
"""Initialize the header Column objects from the table ``lines``.
Based on the previously set Header attributes find or create the column names.
Sets ``self.cols`` with the list of Columns.
Parameters
----------
lines : list
List of table lines
"""
start_line = _get_line_index(self.start_line, self.process_lines(lines))
if start_line is None:
# No header line so auto-generate names from n_data_cols
# Get the data values from the first line of table data to determine n_data_cols
try:
first_data_vals = next(self.data.get_str_vals())
except StopIteration:
raise InconsistentTableError('No data lines found so cannot autogenerate '
'column names')
n_data_cols = len(first_data_vals)
self.names = [self.auto_format.format(i)
for i in range(1, n_data_cols + 1)]
else:
for i, line in enumerate(self.process_lines(lines)):
if i == start_line:
break
else: # No header line matching
raise ValueError('No header line found in table')
self.names = next(self.splitter([line]))
self._set_cols_from_names()
def process_lines(self, lines):
"""Generator to yield non-blank and non-comment lines"""
re_comment = re.compile(self.comment) if self.comment else None
# Yield non-comment lines
for line in lines:
if line.strip() and (not self.comment or not re_comment.match(line)):
yield line
def write_comments(self, lines, meta):
if self.write_comment not in (False, None):
for comment in meta.get('comments', []):
lines.append(self.write_comment + comment)
def write(self, lines):
if self.start_line is not None:
for i, spacer_line in zip(range(self.start_line),
itertools.cycle(self.write_spacer_lines)):
lines.append(spacer_line)
lines.append(self.splitter.join([x.info.name for x in self.cols]))
@property
def colnames(self):
"""Return the column names of the table"""
return tuple(col.name if isinstance(col, Column) else col.info.name
for col in self.cols)
def remove_columns(self, names):
"""
Remove several columns from the table.
Parameters
----------
names : list
A list containing the names of the columns to remove
"""
colnames = self.colnames
for name in names:
if name not in colnames:
raise KeyError(f"Column {name} does not exist")
self.cols = [col for col in self.cols if col.name not in names]
def rename_column(self, name, new_name):
"""
Rename a column.
Parameters
----------
name : str
The current name of the column.
new_name : str
The new name for the column
"""
try:
idx = self.colnames.index(name)
except ValueError:
raise KeyError(f"Column {name} does not exist")
col = self.cols[idx]
# For writing self.cols can contain cols that are not Column. Raise
# exception in that case.
if isinstance(col, Column):
col.name = new_name
else:
raise TypeError(f'got column type {type(col)} instead of required '
f'{Column}')
def get_type_map_key(self, col):
return col.raw_type
def get_col_type(self, col):
try:
type_map_key = self.get_type_map_key(col)
return self.col_type_map[type_map_key.lower()]
except KeyError:
raise ValueError('Unknown data type ""{}"" for column "{}"'.format(
col.raw_type, col.name))
def check_column_names(self, names, strict_names, guessing):
"""
Check column names.
This must be done before applying the names transformation
so that guessing will fail appropriately if ``names`` is supplied.
For instance if the basic reader is given a table with no column header
row.
Parameters
----------
names : list
User-supplied list of column names
strict_names : bool
Whether to impose extra requirements on names
guessing : bool
True if this method is being called while guessing the table format
"""
if strict_names:
# Impose strict requirements on column names (normally used in guessing)
bads = [" ", ",", "|", "\t", "'", '"']
for name in self.colnames:
if (_is_number(name) or len(name) == 0
or name[0] in bads or name[-1] in bads):
raise InconsistentTableError(
f'Column name {name!r} does not meet strict name requirements')
# When guessing require at least two columns, except for ECSV which can
# reliably be guessed from the header requirements.
if guessing and len(self.colnames) <= 1 and self.__class__.__name__ != 'EcsvHeader':
raise ValueError('Table format guessing requires at least two columns, got {}'
.format(list(self.colnames)))
if names is not None and len(names) != len(self.colnames):
raise InconsistentTableError(
'Length of names argument ({}) does not match number'
' of table columns ({})'.format(len(names), len(self.colnames)))
class BaseData:
"""
Base table data reader.
"""
start_line = None
""" None, int, or a function of ``lines`` that returns None or int """
end_line = None
""" None, int, or a function of ``lines`` that returns None or int """
comment = None
""" Regular expression for comment lines """
splitter_class = DefaultSplitter
""" Splitter class for splitting data lines into columns """
write_spacer_lines = ['ASCII_TABLE_WRITE_SPACER_LINE']
fill_include_names = None
fill_exclude_names = None
fill_values = [(masked, '')]
formats = {}
def __init__(self):
# Need to make sure fill_values list is instance attribute, not class attribute.
# On read, this will be overwritten by the default in the ui.read (thus, in
# the current implementation there can be no different default for different
# Readers). On write, ui.py does not specify a default, so this line here matters.
self.fill_values = copy.copy(self.fill_values)
self.formats = copy.copy(self.formats)
self.splitter = self.splitter_class()
def process_lines(self, lines):
"""
READ: Strip out comment lines and blank lines from list of ``lines``
Parameters
----------
lines : list
All lines in table
Returns
-------
lines : list
List of lines
"""
nonblank_lines = (x for x in lines if x.strip())
if self.comment:
re_comment = re.compile(self.comment)
return [x for x in nonblank_lines if not re_comment.match(x)]
else:
return [x for x in nonblank_lines]
def get_data_lines(self, lines):
"""READ: Set ``data_lines`` attribute to lines slice comprising table data values.
"""
data_lines = self.process_lines(lines)
start_line = _get_line_index(self.start_line, data_lines)
end_line = _get_line_index(self.end_line, data_lines)
if start_line is not None or end_line is not None:
self.data_lines = data_lines[slice(start_line, end_line)]
else: # Don't copy entire data lines unless necessary
self.data_lines = data_lines
def get_str_vals(self):
"""Return a generator that returns a list of column values (as strings)
for each data line."""
return self.splitter(self.data_lines)
def masks(self, cols):
"""READ: Set fill value for each column and then apply that fill value
In the first step it is evaluated with value from ``fill_values`` applies to
which column using ``fill_include_names`` and ``fill_exclude_names``.
In the second step all replacements are done for the appropriate columns.
"""
if self.fill_values:
self._set_fill_values(cols)
self._set_masks(cols)
def _set_fill_values(self, cols):
"""READ, WRITE: Set fill values of individual cols based on fill_values of BaseData
fill values has the following form:
<fill_spec> = (<bad_value>, <fill_value>, <optional col_name>...)
fill_values = <fill_spec> or list of <fill_spec>'s
"""
if self.fill_values:
# when we write tables the columns may be astropy.table.Columns
# which don't carry a fill_values by default
for col in cols:
if not hasattr(col, 'fill_values'):
col.fill_values = {}
# if input is only one <fill_spec>, then make it a list
with suppress(TypeError):
self.fill_values[0] + ''
self.fill_values = [self.fill_values]
# Step 1: Set the default list of columns which are affected by
# fill_values
colnames = set(self.header.colnames)
if self.fill_include_names is not None:
colnames.intersection_update(self.fill_include_names)
if self.fill_exclude_names is not None:
colnames.difference_update(self.fill_exclude_names)
# Step 2a: Find out which columns are affected by this tuple
# iterate over reversed order, so last condition is set first and
# overwritten by earlier conditions
for replacement in reversed(self.fill_values):
if len(replacement) < 2:
raise ValueError("Format of fill_values must be "
"(<bad>, <fill>, <optional col1>, ...)")
elif len(replacement) == 2:
affect_cols = colnames
else:
affect_cols = replacement[2:]
for i, key in ((i, x) for i, x in enumerate(self.header.colnames)
if x in affect_cols):
cols[i].fill_values[replacement[0]] = str(replacement[1])
def _set_masks(self, cols):
"""READ: Replace string values in col.str_vals and set masks"""
if self.fill_values:
for col in (col for col in cols if col.fill_values):
col.mask = numpy.zeros(len(col.str_vals), dtype=bool)
for i, str_val in ((i, x) for i, x in enumerate(col.str_vals)
if x in col.fill_values):
col.str_vals[i] = col.fill_values[str_val]
col.mask[i] = True
def _replace_vals(self, cols):
"""WRITE: replace string values in col.str_vals"""
if self.fill_values:
for col in (col for col in cols if col.fill_values):
for i, str_val in ((i, x) for i, x in enumerate(col.str_vals)
if x in col.fill_values):
col.str_vals[i] = col.fill_values[str_val]
if masked in col.fill_values and hasattr(col, 'mask'):
mask_val = col.fill_values[masked]
for i in col.mask.nonzero()[0]:
col.str_vals[i] = mask_val
def str_vals(self):
"""WRITE: convert all values in table to a list of lists of strings
This sets the fill values and possibly column formats from the input
formats={} keyword, then ends up calling table.pprint._pformat_col_iter()
by a circuitous path. That function does the real work of formatting.
Finally replace anything matching the fill_values.
Returns
-------
values : list of list of str
"""
self._set_fill_values(self.cols)
self._set_col_formats()
for col in self.cols:
col.str_vals = list(col.info.iter_str_vals())
self._replace_vals(self.cols)
return [col.str_vals for col in self.cols]
def write(self, lines):
"""Write ``self.cols`` in place to ``lines``.
Parameters
----------
lines : list
List for collecting output of writing self.cols.
"""
if hasattr(self.start_line, '__call__'):
raise TypeError('Start_line attribute cannot be callable for write()')
else:
data_start_line = self.start_line or 0
while len(lines) < data_start_line:
lines.append(itertools.cycle(self.write_spacer_lines))
col_str_iters = self.str_vals()
for vals in zip(*col_str_iters):
lines.append(self.splitter.join(vals))
def _set_col_formats(self):
"""WRITE: set column formats."""
for col in self.cols:
if col.info.name in self.formats:
col.info.format = self.formats[col.info.name]
def convert_numpy(numpy_type):
"""Return a tuple containing a function which converts a list into a numpy
array and the type produced by the converter function.
Parameters
----------
numpy_type : numpy data-type
The numpy type required of an array returned by ``converter``. Must be a
valid `numpy type <https://numpy.org/doc/stable/user/basics.types.html>`_
(e.g., numpy.uint, numpy.int8, numpy.int64, numpy.float64) or a python
type covered by a numpy type (e.g., int, float, str, bool).
Returns
-------
converter : callable
``converter`` is a function which accepts a list and converts it to a
numpy array of type ``numpy_type``.
converter_type : type
``converter_type`` tracks the generic data type produced by the
converter function.
Raises
------
ValueError
Raised by ``converter`` if the list elements could not be converted to
the required type.
"""
# Infer converter type from an instance of numpy_type.
type_name = numpy.array([], dtype=numpy_type).dtype.name
if 'int' in type_name:
converter_type = IntType
elif 'float' in type_name:
converter_type = FloatType
elif 'bool' in type_name:
converter_type = BoolType
elif 'str' in type_name:
converter_type = StrType
else:
converter_type = AllType
def bool_converter(vals):
"""
Convert values "False" and "True" to bools. Raise an exception
for any other string values.
"""
if len(vals) == 0:
return numpy.array([], dtype=bool)
# Try a smaller subset first for a long array
if len(vals) > 10000:
svals = numpy.asarray(vals[:1000])
if not numpy.all((svals == 'False')
| (svals == 'True')
| (svals == '0')
| (svals == '1')):
raise ValueError('bool input strings must be False, True, 0, 1, or ""')
vals = numpy.asarray(vals)
trues = (vals == 'True') | (vals == '1')
falses = (vals == 'False') | (vals == '0')
if not numpy.all(trues | falses):
raise ValueError('bool input strings must be only False, True, 0, 1, or ""')
return trues
def generic_converter(vals):
return numpy.array(vals, numpy_type)
converter = bool_converter if converter_type is BoolType else generic_converter
return converter, converter_type
class BaseOutputter:
"""Output table as a dict of column objects keyed on column name. The
table data are stored as plain python lists within the column objects.
"""
# User-defined converters which gets set in ascii.ui if a `converter` kwarg
# is supplied.
converters = {}
# Derived classes must define default_converters and __call__
@staticmethod
def _validate_and_copy(col, converters):
"""Validate the format for the type converters and then copy those
which are valid converters for this column (i.e. converter type is
a subclass of col.type)"""
# Allow specifying a single converter instead of a list of converters.
# The input `converters` must be a ``type`` value that can init np.dtype.
try:
# Don't allow list-like things that dtype accepts
assert type(converters) is type
converters = [numpy.dtype(converters)]
except (AssertionError, TypeError):
pass
converters_out = []
try:
for converter in converters:
try:
converter_func, converter_type = converter
except TypeError as err:
if str(err).startswith('cannot unpack'):
converter_func, converter_type = convert_numpy(converter)
else:
raise
if not issubclass(converter_type, NoType):
raise ValueError('converter_type must be a subclass of NoType')
if issubclass(converter_type, col.type):
converters_out.append((converter_func, converter_type))
except (ValueError, TypeError) as err:
raise ValueError('Error: invalid format for converters, see '
f'documentation\n{converters}: {err}')
return converters_out
def _convert_vals(self, cols):
for col in cols:
for key, converters in self.converters.items():
if fnmatch.fnmatch(col.name, key):
break
else:
if col.dtype is not None:
converters = [convert_numpy(col.dtype)]
else:
converters = self.default_converters
col.converters = self._validate_and_copy(col, converters)
# Catch the last error in order to provide additional information
# in case all attempts at column conversion fail. The initial
# value of of last_error will apply if no converters are defined
# and the first col.converters[0] access raises IndexError.
last_err = 'no converters defined'
while not hasattr(col, 'data'):
# Try converters, popping the unsuccessful ones from the list.
# If there are no converters left here then fail.
if not col.converters:
raise ValueError(f'Column {col.name} failed to convert: {last_err}')
converter_func, converter_type = col.converters[0]
if not issubclass(converter_type, col.type):
raise TypeError('converter type does not match column type')
try:
col.data = converter_func(col.str_vals)
col.type = converter_type
except (TypeError, ValueError) as err:
col.converters.pop(0)
last_err = err
except OverflowError as err:
# Overflow during conversion (most likely an int that
# doesn't fit in native C long). Put string at the top of
# the converters list for the next while iteration.
warnings.warn(
"OverflowError converting to {} in column {}, reverting to String."
.format(converter_type.__name__, col.name), AstropyWarning)
col.converters.insert(0, convert_numpy(numpy.str))
last_err = err
def _deduplicate_names(names):
"""Ensure there are no duplicates in ``names``
This is done by iteratively adding ``_<N>`` to the name for increasing N
until the name is unique.
"""
new_names = []
existing_names = set()
for name in names:
base_name = name + '_'
i = 1
while name in existing_names:
# Iterate until a unique name is found
name = base_name + str(i)
i += 1
new_names.append(name)
existing_names.add(name)
return new_names
class TableOutputter(BaseOutputter):
"""
Output the table as an astropy.table.Table object.
"""
default_converters = [convert_numpy(int),
convert_numpy(float),
convert_numpy(str)]
def __call__(self, cols, meta):
# Sets col.data to numpy array and col.type to io.ascii Type class (e.g.
# FloatType) for each col.
self._convert_vals(cols)
t_cols = [numpy.ma.MaskedArray(x.data, mask=x.mask)
if hasattr(x, 'mask') and numpy.any(x.mask)
else x.data for x in cols]
out = Table(t_cols, names=[x.name for x in cols], meta=meta['table'])
for col, out_col in zip(cols, out.columns.values()):
for attr in ('format', 'unit', 'description'):
if hasattr(col, attr):
setattr(out_col, attr, getattr(col, attr))
if hasattr(col, 'meta'):
out_col.meta.update(col.meta)
return out
class MetaBaseReader(type):
def __init__(cls, name, bases, dct):
super().__init__(name, bases, dct)
format = dct.get('_format_name')
if format is None:
return
fast = dct.get('_fast')
if fast is not None:
FAST_CLASSES[format] = cls
FORMAT_CLASSES[format] = cls
io_formats = ['ascii.' + format] + dct.get('_io_registry_format_aliases', [])
if dct.get('_io_registry_suffix'):
func = functools.partial(connect.io_identify, dct['_io_registry_suffix'])
connect.io_registry.register_identifier(io_formats[0], Table, func)
for io_format in io_formats:
func = functools.partial(connect.io_read, io_format)
header = f"ASCII reader '{io_format}' details\n"
func.__doc__ = (inspect.cleandoc(READ_DOCSTRING).strip() + '\n\n'
+ header + re.sub('.', '=', header) + '\n')
func.__doc__ += inspect.cleandoc(cls.__doc__).strip()
connect.io_registry.register_reader(io_format, Table, func)
if dct.get('_io_registry_can_write', True):
func = functools.partial(connect.io_write, io_format)
header = f"ASCII writer '{io_format}' details\n"
func.__doc__ = (inspect.cleandoc(WRITE_DOCSTRING).strip() + '\n\n'
+ header + re.sub('.', '=', header) + '\n')
func.__doc__ += inspect.cleandoc(cls.__doc__).strip()
connect.io_registry.register_writer(io_format, Table, func)
def _is_number(x):
with suppress(ValueError):
x = float(x)
return True
return False
def _apply_include_exclude_names(table, names, include_names, exclude_names):
"""
Apply names, include_names and exclude_names to a table or BaseHeader.
For the latter this relies on BaseHeader implementing ``colnames``,
``rename_column``, and ``remove_columns``.
Parameters
----------
table : `~astropy.table.Table`, `~astropy.io.ascii.BaseHeader`
Input table or BaseHeader subclass instance
names : list
List of names to override those in table (set to None to use existing names)
include_names : list
List of names to include in output
exclude_names : list
List of names to exclude from output (applied after ``include_names``)
"""
def rename_columns(table, names):
# Rename table column names to those passed by user
# Temporarily rename with names that are not in `names` or `table.colnames`.
# This ensures that rename succeeds regardless of existing names.
xxxs = 'x' * max(len(name) for name in list(names) + list(table.colnames))
for ii, colname in enumerate(table.colnames):
table.rename_column(colname, xxxs + str(ii))
for ii, name in enumerate(names):
table.rename_column(xxxs + str(ii), name)
if names is not None:
rename_columns(table, names)
else:
colnames_uniq = _deduplicate_names(table.colnames)
if colnames_uniq != list(table.colnames):
rename_columns(table, colnames_uniq)
names_set = set(table.colnames)
if include_names is not None:
names_set.intersection_update(include_names)
if exclude_names is not None:
names_set.difference_update(exclude_names)
if names_set != set(table.colnames):
remove_names = set(table.colnames) - names_set
table.remove_columns(remove_names)
class BaseReader(metaclass=MetaBaseReader):
"""Class providing methods to read and write an ASCII table using the specified
header, data, inputter, and outputter instances.
Typical usage is to instantiate a Reader() object and customize the
``header``, ``data``, ``inputter``, and ``outputter`` attributes. Each
of these is an object of the corresponding class.
There is one method ``inconsistent_handler`` that can be used to customize the
behavior of ``read()`` in the event that a data row doesn't match the header.
The default behavior is to raise an InconsistentTableError.
"""
names = None
include_names = None
exclude_names = None
strict_names = False
guessing = False
encoding = None
header_class = BaseHeader
data_class = BaseData
inputter_class = BaseInputter
outputter_class = TableOutputter
# Max column dimension that writer supports for this format. Exceptions
# include ECSV (no limit) and HTML (max_ndim=2).
max_ndim = 1
def __init__(self):
self.header = self.header_class()
self.data = self.data_class()
self.inputter = self.inputter_class()
self.outputter = self.outputter_class()
# Data and Header instances benefit from a little cross-coupling. Header may need to
# know about number of data columns for auto-column name generation and Data may
# need to know about header (e.g. for fixed-width tables where widths are spec'd in header.
self.data.header = self.header
self.header.data = self.data
# Metadata, consisting of table-level meta and column-level meta. The latter
# could include information about column type, description, formatting, etc,
# depending on the table meta format.
self.meta = OrderedDict(table=OrderedDict(),
cols=OrderedDict())
def _check_multidim_table(self, table):
"""Check that the dimensions of columns in ``table`` are acceptable.
The reader class attribute ``max_ndim`` defines the maximum dimension of
columns that can be written using this format. The base value is ``1``,
corresponding to normal scalar columns with just a length.
Parameters
----------
table : `~astropy.table.Table`
Input table.
Raises
------
ValueError
If any column exceeds the number of allowed dimensions
"""
_check_multidim_table(table, self.max_ndim)
def read(self, table):
"""Read the ``table`` and return the results in a format determined by
the ``outputter`` attribute.
The ``table`` parameter is any string or object that can be processed
by the instance ``inputter``. For the base Inputter class ``table`` can be
one of:
* File name
* File-like object
* String (newline separated) with all header and data lines (must have at least 2 lines)
* List of strings
Parameters
----------
table : str, file-like, list
Input table.
Returns
-------
table : `~astropy.table.Table`
Output table
"""
# If ``table`` is a file then store the name in the ``data``
# attribute. The ``table`` is a "file" if it is a string
# without the new line specific to the OS.
with suppress(TypeError):
# Strings only
if os.linesep not in table + '':
self.data.table_name = os.path.basename(table)
# If one of the newline chars is set as field delimiter, only
# accept the other one as line splitter
if self.header.splitter.delimiter == '\n':
newline = '\r'
elif self.header.splitter.delimiter == '\r':
newline = '\n'
else:
newline = None
# Get a list of the lines (rows) in the table
self.lines = self.inputter.get_lines(table, newline=newline)
# Set self.data.data_lines to a slice of lines contain the data rows
self.data.get_data_lines(self.lines)
# Extract table meta values (e.g. keywords, comments, etc). Updates self.meta.
self.header.update_meta(self.lines, self.meta)
# Get the table column definitions
self.header.get_cols(self.lines)
# Make sure columns are valid
self.header.check_column_names(self.names, self.strict_names, self.guessing)
self.cols = cols = self.header.cols
self.data.splitter.cols = cols
n_cols = len(cols)
for i, str_vals in enumerate(self.data.get_str_vals()):
if len(str_vals) != n_cols:
str_vals = self.inconsistent_handler(str_vals, n_cols)
# if str_vals is None, we skip this row
if str_vals is None:
continue
# otherwise, we raise an error only if it is still inconsistent
if len(str_vals) != n_cols:
errmsg = ('Number of header columns ({}) inconsistent with'
' data columns ({}) at data line {}\n'
'Header values: {}\n'
'Data values: {}'.format(
n_cols, len(str_vals), i,
[x.name for x in cols], str_vals))
raise InconsistentTableError(errmsg)
for j, col in enumerate(cols):
col.str_vals.append(str_vals[j])
self.data.masks(cols)
if hasattr(self.header, 'table_meta'):
self.meta['table'].update(self.header.table_meta)
_apply_include_exclude_names(self.header, self.names,
self.include_names, self.exclude_names)
table = self.outputter(self.header.cols, self.meta)
self.cols = self.header.cols
return table
def inconsistent_handler(self, str_vals, ncols):
"""
Adjust or skip data entries if a row is inconsistent with the header.
The default implementation does no adjustment, and hence will always trigger
an exception in read() any time the number of data entries does not match
the header.
Note that this will *not* be called if the row already matches the header.
Parameters
----------
str_vals : list
A list of value strings from the current row of the table.
ncols : int
The expected number of entries from the table header.
Returns
-------
str_vals : list
List of strings to be parsed into data entries in the output table. If
the length of this list does not match ``ncols``, an exception will be
raised in read(). Can also be None, in which case the row will be
skipped.
"""
# an empty list will always trigger an InconsistentTableError in read()
return str_vals
@property
def comment_lines(self):
"""Return lines in the table that match header.comment regexp"""
if not hasattr(self, 'lines'):
raise ValueError('Table must be read prior to accessing the header comment lines')
if self.header.comment:
re_comment = re.compile(self.header.comment)
comment_lines = [x for x in self.lines if re_comment.match(x)]
else:
comment_lines = []
return comment_lines
def update_table_data(self, table):
"""
Update table columns in place if needed.
This is a hook to allow updating the table columns after name
filtering but before setting up to write the data. This is currently
only used by ECSV and is otherwise just a pass-through.
Parameters
----------
table : `astropy.table.Table`
Input table for writing
Returns
-------
table : `astropy.table.Table`
Output table for writing
"""
return table
def write_header(self, lines, meta):
self.header.write_comments(lines, meta)
self.header.write(lines)
def write(self, table):
"""
Write ``table`` as list of strings.
Parameters
----------
table : `~astropy.table.Table`
Input table data.
Returns
-------
lines : list
List of strings corresponding to ASCII table
"""
# Check column names before altering
self.header.cols = list(table.columns.values())
self.header.check_column_names(self.names, self.strict_names, False)
# In-place update of columns in input ``table`` to reflect column
# filtering. Note that ``table`` is guaranteed to be a copy of the
# original user-supplied table.
_apply_include_exclude_names(table, self.names, self.include_names, self.exclude_names)
# This is a hook to allow updating the table columns after name
# filtering but before setting up to write the data. This is currently
# only used by ECSV and is otherwise just a pass-through.
table = self.update_table_data(table)
# Check that table column dimensions are supported by this format class.
# Most formats support only 1-d columns, but some like ECSV support N-d.
self._check_multidim_table(table)
# Now use altered columns
new_cols = list(table.columns.values())
# link information about the columns to the writer object (i.e. self)
self.header.cols = new_cols
self.data.cols = new_cols
self.header.table_meta = table.meta
# Write header and data to lines list
lines = []
self.write_header(lines, table.meta)
self.data.write(lines)
return lines
class ContinuationLinesInputter(BaseInputter):
"""Inputter where lines ending in ``continuation_char`` are joined
with the subsequent line. Example::
col1 col2 col3
1 \
2 3
4 5 \
6
"""
continuation_char = '\\'
replace_char = ' '
# If no_continue is not None then lines matching this regex are not subject
# to line continuation. The initial use case here is Daophot. In this
# case the continuation character is just replaced with replace_char.
no_continue = None
def process_lines(self, lines):
re_no_continue = re.compile(self.no_continue) if self.no_continue else None
parts = []
outlines = []
for line in lines:
if re_no_continue and re_no_continue.match(line):
line = line.replace(self.continuation_char, self.replace_char)
if line.endswith(self.continuation_char):
parts.append(line.replace(self.continuation_char, self.replace_char))
else:
parts.append(line)
outlines.append(''.join(parts))
parts = []
return outlines
class WhitespaceSplitter(DefaultSplitter):
def process_line(self, line):
"""Replace tab with space within ``line`` while respecting quoted substrings"""
newline = []
in_quote = False
lastchar = None
for char in line:
if char == self.quotechar and (self.escapechar is None
or lastchar != self.escapechar):
in_quote = not in_quote
if char == '\t' and not in_quote:
char = ' '
lastchar = char
newline.append(char)
return ''.join(newline)
extra_reader_pars = ('Reader', 'Inputter', 'Outputter',
'delimiter', 'comment', 'quotechar', 'header_start',
'data_start', 'data_end', 'converters', 'encoding',
'data_Splitter', 'header_Splitter',
'names', 'include_names', 'exclude_names', 'strict_names',
'fill_values', 'fill_include_names', 'fill_exclude_names')
def _get_reader(Reader, Inputter=None, Outputter=None, **kwargs):
"""Initialize a table reader allowing for common customizations. See ui.get_reader()
for param docs. This routine is for internal (package) use only and is useful
because it depends only on the "core" module.
"""
from .fastbasic import FastBasic
if issubclass(Reader, FastBasic): # Fast readers handle args separately
if Inputter is not None:
kwargs['Inputter'] = Inputter
return Reader(**kwargs)
# If user explicitly passed a fast reader with enable='force'
# (e.g. by passing non-default options), raise an error for slow readers
if 'fast_reader' in kwargs:
if kwargs['fast_reader']['enable'] == 'force':
raise ParameterError('fast_reader required with '
'{}, but this is not a fast C reader: {}'
.format(kwargs['fast_reader'], Reader))
else:
del kwargs['fast_reader'] # Otherwise ignore fast_reader parameter
reader_kwargs = {k: v for k, v in kwargs.items() if k not in extra_reader_pars}
reader = Reader(**reader_kwargs)
if Inputter is not None:
reader.inputter = Inputter()
if Outputter is not None:
reader.outputter = Outputter()
# Issue #855 suggested to set data_start to header_start + default_header_length
# Thus, we need to retrieve this from the class definition before resetting these numbers.
try:
default_header_length = reader.data.start_line - reader.header.start_line
except TypeError: # Start line could be None or an instancemethod
default_header_length = None
# csv.reader is hard-coded to recognise either '\r' or '\n' as end-of-line,
# therefore DefaultSplitter cannot handle these as delimiters.
if 'delimiter' in kwargs:
if kwargs['delimiter'] in ('\n', '\r', '\r\n'):
reader.header.splitter = BaseSplitter()
reader.data.splitter = BaseSplitter()
reader.header.splitter.delimiter = kwargs['delimiter']
reader.data.splitter.delimiter = kwargs['delimiter']
if 'comment' in kwargs:
reader.header.comment = kwargs['comment']
reader.data.comment = kwargs['comment']
if 'quotechar' in kwargs:
reader.header.splitter.quotechar = kwargs['quotechar']
reader.data.splitter.quotechar = kwargs['quotechar']
if 'data_start' in kwargs:
reader.data.start_line = kwargs['data_start']
if 'data_end' in kwargs:
reader.data.end_line = kwargs['data_end']
if 'header_start' in kwargs:
if (reader.header.start_line is not None):
reader.header.start_line = kwargs['header_start']
# For FixedWidthTwoLine the data_start is calculated relative to the position line.
# However, position_line is given as absolute number and not relative to header_start.
# So, ignore this Reader here.
if (('data_start' not in kwargs) and (default_header_length is not None)
and reader._format_name not in ['fixed_width_two_line', 'commented_header']):
reader.data.start_line = reader.header.start_line + default_header_length
elif kwargs['header_start'] is not None:
# User trying to set a None header start to some value other than None
raise ValueError('header_start cannot be modified for this Reader')
if 'converters' in kwargs:
reader.outputter.converters = kwargs['converters']
if 'data_Splitter' in kwargs:
reader.data.splitter = kwargs['data_Splitter']()
if 'header_Splitter' in kwargs:
reader.header.splitter = kwargs['header_Splitter']()
if 'names' in kwargs:
reader.names = kwargs['names']
if None in reader.names:
raise TypeError('Cannot have None for column name')
if len(set(reader.names)) != len(reader.names):
raise ValueError('Duplicate column names')
if 'include_names' in kwargs:
reader.include_names = kwargs['include_names']
if 'exclude_names' in kwargs:
reader.exclude_names = kwargs['exclude_names']
# Strict names is normally set only within the guessing process to
# indicate that column names cannot be numeric or have certain
# characters at the beginning or end. It gets used in
# BaseHeader.check_column_names().
if 'strict_names' in kwargs:
reader.strict_names = kwargs['strict_names']
if 'fill_values' in kwargs:
reader.data.fill_values = kwargs['fill_values']
if 'fill_include_names' in kwargs:
reader.data.fill_include_names = kwargs['fill_include_names']
if 'fill_exclude_names' in kwargs:
reader.data.fill_exclude_names = kwargs['fill_exclude_names']
if 'encoding' in kwargs:
reader.encoding = kwargs['encoding']
reader.inputter.encoding = kwargs['encoding']
return reader
extra_writer_pars = ('delimiter', 'comment', 'quotechar', 'formats',
'strip_whitespace',
'names', 'include_names', 'exclude_names',
'fill_values', 'fill_include_names',
'fill_exclude_names')
def _get_writer(Writer, fast_writer, **kwargs):
"""Initialize a table writer allowing for common customizations. This
routine is for internal (package) use only and is useful because it depends
only on the "core" module."""
from .fastbasic import FastBasic
# A value of None for fill_values imply getting the default string
# representation of masked values (depending on the writer class), but the
# machinery expects a list. The easiest here is to just pop the value off,
# i.e. fill_values=None is the same as not providing it at all.
if 'fill_values' in kwargs and kwargs['fill_values'] is None:
del kwargs['fill_values']
if issubclass(Writer, FastBasic): # Fast writers handle args separately
return Writer(**kwargs)
elif fast_writer and f'fast_{Writer._format_name}' in FAST_CLASSES:
# Switch to fast writer
kwargs['fast_writer'] = fast_writer
return FAST_CLASSES[f'fast_{Writer._format_name}'](**kwargs)
writer_kwargs = {k: v for k, v in kwargs.items() if k not in extra_writer_pars}
writer = Writer(**writer_kwargs)
if 'delimiter' in kwargs:
writer.header.splitter.delimiter = kwargs['delimiter']
writer.data.splitter.delimiter = kwargs['delimiter']
if 'comment' in kwargs:
writer.header.write_comment = kwargs['comment']
writer.data.write_comment = kwargs['comment']
if 'quotechar' in kwargs:
writer.header.splitter.quotechar = kwargs['quotechar']
writer.data.splitter.quotechar = kwargs['quotechar']
if 'formats' in kwargs:
writer.data.formats = kwargs['formats']
if 'strip_whitespace' in kwargs:
if kwargs['strip_whitespace']:
# Restore the default SplitterClass process_val method which strips
# whitespace. This may have been changed in the Writer
# initialization (e.g. Rdb and Tab)
writer.data.splitter.process_val = operator.methodcaller('strip', ' \t')
else:
writer.data.splitter.process_val = None
if 'names' in kwargs:
writer.header.names = kwargs['names']
if 'include_names' in kwargs:
writer.include_names = kwargs['include_names']
if 'exclude_names' in kwargs:
writer.exclude_names = kwargs['exclude_names']
if 'fill_values' in kwargs:
# Prepend user-specified values to the class default.
with suppress(TypeError, IndexError):
# Test if it looks like (match, replace_string, optional_colname),
# in which case make it a list
kwargs['fill_values'][1] + ''
kwargs['fill_values'] = [kwargs['fill_values']]
writer.data.fill_values = kwargs['fill_values'] + writer.data.fill_values
if 'fill_include_names' in kwargs:
writer.data.fill_include_names = kwargs['fill_include_names']
if 'fill_exclude_names' in kwargs:
writer.data.fill_exclude_names = kwargs['fill_exclude_names']
return writer
|
1f2a3caa395400ee63fb60e8654d06c69ca43545becf5dd7c041b38ce31bb451 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""An extensible ASCII table reader and writer.
fixedwidth.py:
Read or write a table with fixed width columns.
:Copyright: Smithsonian Astrophysical Observatory (2011)
:Author: Tom Aldcroft ([email protected])
"""
from . import core
from .core import InconsistentTableError, DefaultSplitter
from . import basic
class FixedWidthSplitter(core.BaseSplitter):
"""
Split line based on fixed start and end positions for each ``col`` in
``self.cols``.
This class requires that the Header class will have defined ``col.start``
and ``col.end`` for each column. The reference to the ``header.cols`` gets
put in the splitter object by the base Reader.read() function just in time
for splitting data lines by a ``data`` object.
Note that the ``start`` and ``end`` positions are defined in the pythonic
style so line[start:end] is the desired substring for a column. This splitter
class does not have a hook for ``process_lines`` since that is generally not
useful for fixed-width input.
"""
delimiter_pad = ''
bookend = False
delimiter = '|'
def __call__(self, lines):
for line in lines:
vals = [line[x.start:x.end] for x in self.cols]
if self.process_val:
yield [self.process_val(x) for x in vals]
else:
yield vals
def join(self, vals, widths):
pad = self.delimiter_pad or ''
delimiter = self.delimiter or ''
padded_delim = pad + delimiter + pad
if self.bookend:
bookend_left = delimiter + pad
bookend_right = pad + delimiter
else:
bookend_left = ''
bookend_right = ''
vals = [' ' * (width - len(val)) + val for val, width in zip(vals, widths)]
return bookend_left + padded_delim.join(vals) + bookend_right
class FixedWidthHeaderSplitter(DefaultSplitter):
'''Splitter class that splits on ``|``.'''
delimiter = '|'
class FixedWidthHeader(basic.BasicHeader):
"""
Fixed width table header reader.
"""
splitter_class = FixedWidthHeaderSplitter
""" Splitter class for splitting data lines into columns """
position_line = None # secondary header line position
""" row index of line that specifies position (default = 1) """
set_of_position_line_characters = set(r'`~!#$%^&*-_+=\|":' + "'")
def get_line(self, lines, index):
for i, line in enumerate(self.process_lines(lines)):
if i == index:
break
else: # No header line matching
raise InconsistentTableError('No header line found in table')
return line
def get_cols(self, lines):
"""
Initialize the header Column objects from the table ``lines``.
Based on the previously set Header attributes find or create the column names.
Sets ``self.cols`` with the list of Columns.
Parameters
----------
lines : list
List of table lines
"""
# See "else" clause below for explanation of start_line and position_line
start_line = core._get_line_index(self.start_line, self.process_lines(lines))
position_line = core._get_line_index(self.position_line, self.process_lines(lines))
# If start_line is none then there is no header line. Column positions are
# determined from first data line and column names are either supplied by user
# or auto-generated.
if start_line is None:
if position_line is not None:
raise ValueError("Cannot set position_line without also setting header_start")
# data.data_lines attribute already set via self.data.get_data_lines(lines)
# in BaseReader.read(). This includes slicing for data_start / data_end.
data_lines = self.data.data_lines
if not data_lines:
raise InconsistentTableError(
'No data lines found so cannot autogenerate column names')
vals, starts, ends = self.get_fixedwidth_params(data_lines[0])
self.names = [self.auto_format.format(i)
for i in range(1, len(vals) + 1)]
else:
# This bit of code handles two cases:
# start_line = <index> and position_line = None
# Single header line where that line is used to determine both the
# column positions and names.
# start_line = <index> and position_line = <index2>
# Two header lines where the first line defines the column names and
# the second line defines the column positions
if position_line is not None:
# Define self.col_starts and self.col_ends so that the call to
# get_fixedwidth_params below will use those to find the header
# column names. Note that get_fixedwidth_params returns Python
# slice col_ends but expects inclusive col_ends on input (for
# more intuitive user interface).
line = self.get_line(lines, position_line)
if len(set(line) - {self.splitter.delimiter, ' '}) != 1:
raise InconsistentTableError(
'Position line should only contain delimiters and '
'one other character, e.g. "--- ------- ---".')
# The line above lies. It accepts white space as well.
# We don't want to encourage using three different
# characters, because that can cause ambiguities, but white
# spaces are so common everywhere that practicality beats
# purity here.
charset = self.set_of_position_line_characters.union(
{self.splitter.delimiter, ' '})
if not set(line).issubset(charset):
raise InconsistentTableError(
f'Characters in position line must be part of {charset}')
vals, self.col_starts, col_ends = self.get_fixedwidth_params(line)
self.col_ends = [x - 1 if x is not None else None for x in col_ends]
# Get the header column names and column positions
line = self.get_line(lines, start_line)
vals, starts, ends = self.get_fixedwidth_params(line)
self.names = vals
self._set_cols_from_names()
# Set column start and end positions.
for i, col in enumerate(self.cols):
col.start = starts[i]
col.end = ends[i]
def get_fixedwidth_params(self, line):
"""
Split ``line`` on the delimiter and determine column values and
column start and end positions. This might include null columns with
zero length (e.g. for ``header row = "| col1 || col2 | col3 |"`` or
``header2_row = "----- ------- -----"``). The null columns are
stripped out. Returns the values between delimiters and the
corresponding start and end positions.
Parameters
----------
line : str
Input line
Returns
-------
vals : list
List of values.
starts : list
List of starting indices.
ends : list
List of ending indices.
"""
# If column positions are already specified then just use those.
# If neither column starts or ends are given, figure out positions
# between delimiters. Otherwise, either the starts or the ends have
# been given, so figure out whichever wasn't given.
if self.col_starts is not None and self.col_ends is not None:
starts = list(self.col_starts) # could be any iterable, e.g. np.array
# user supplies inclusive endpoint
ends = [x + 1 if x is not None else None for x in self.col_ends]
if len(starts) != len(ends):
raise ValueError('Fixed width col_starts and col_ends must have the same length')
vals = [line[start:end].strip() for start, end in zip(starts, ends)]
elif self.col_starts is None and self.col_ends is None:
# There might be a cleaner way to do this but it works...
vals = line.split(self.splitter.delimiter)
starts = [0]
ends = []
for val in vals:
if val:
ends.append(starts[-1] + len(val))
starts.append(ends[-1] + 1)
else:
starts[-1] += 1
starts = starts[:-1]
vals = [x.strip() for x in vals if x]
if len(vals) != len(starts) or len(vals) != len(ends):
raise InconsistentTableError('Error parsing fixed width header')
else:
# exactly one of col_starts or col_ends is given...
if self.col_starts is not None:
starts = list(self.col_starts)
ends = starts[1:] + [None] # Assume each col ends where the next starts
else: # self.col_ends is not None
ends = [x + 1 for x in self.col_ends]
starts = [0] + ends[:-1] # Assume each col starts where the last ended
vals = [line[start:end].strip() for start, end in zip(starts, ends)]
return vals, starts, ends
def write(self, lines):
# Header line not written until data are formatted. Until then it is
# not known how wide each column will be for fixed width.
pass
class FixedWidthData(basic.BasicData):
"""
Base table data reader.
"""
splitter_class = FixedWidthSplitter
""" Splitter class for splitting data lines into columns """
def write(self, lines):
vals_list = []
col_str_iters = self.str_vals()
for vals in zip(*col_str_iters):
vals_list.append(vals)
for i, col in enumerate(self.cols):
col.width = max(len(vals[i]) for vals in vals_list)
if self.header.start_line is not None:
col.width = max(col.width, len(col.info.name))
widths = [col.width for col in self.cols]
if self.header.start_line is not None:
lines.append(self.splitter.join([col.info.name for col in self.cols],
widths))
if self.header.position_line is not None:
char = self.header.position_char
if len(char) != 1:
raise ValueError(f'Position_char="{char}" must be a single character')
vals = [char * col.width for col in self.cols]
lines.append(self.splitter.join(vals, widths))
for vals in vals_list:
lines.append(self.splitter.join(vals, widths))
return lines
class FixedWidth(basic.Basic):
"""Fixed width table with single header line defining column names and positions.
Examples::
# Bar delimiter in header and data
| Col1 | Col2 | Col3 |
| 1.2 | hello there | 3 |
| 2.4 | many words | 7 |
# Bar delimiter in header only
Col1 | Col2 | Col3
1.2 hello there 3
2.4 many words 7
# No delimiter with column positions specified as input
Col1 Col2Col3
1.2hello there 3
2.4many words 7
See the :ref:`astropy:fixed_width_gallery` for specific usage examples.
"""
_format_name = 'fixed_width'
_description = 'Fixed width'
header_class = FixedWidthHeader
data_class = FixedWidthData
def __init__(self, col_starts=None, col_ends=None, delimiter_pad=' ', bookend=True):
super().__init__()
self.data.splitter.delimiter_pad = delimiter_pad
self.data.splitter.bookend = bookend
self.header.col_starts = col_starts
self.header.col_ends = col_ends
class FixedWidthNoHeaderHeader(FixedWidthHeader):
'''Header reader for fixed with tables with no header line'''
start_line = None
class FixedWidthNoHeaderData(FixedWidthData):
'''Data reader for fixed width tables with no header line'''
start_line = 0
class FixedWidthNoHeader(FixedWidth):
"""Fixed width table which has no header line.
When reading, column names are either input (``names`` keyword) or
auto-generated. Column positions are determined either by input
(``col_starts`` and ``col_stops`` keywords) or by splitting the first data
line. In the latter case a ``delimiter`` is required to split the data
line.
Examples::
# Bar delimiter in header and data
| 1.2 | hello there | 3 |
| 2.4 | many words | 7 |
# Compact table having no delimiter and column positions specified as input
1.2hello there3
2.4many words 7
This class is just a convenience wrapper around the ``FixedWidth`` reader
but with ``header_start=None`` and ``data_start=0``.
See the :ref:`astropy:fixed_width_gallery` for specific usage examples.
"""
_format_name = 'fixed_width_no_header'
_description = 'Fixed width with no header'
header_class = FixedWidthNoHeaderHeader
data_class = FixedWidthNoHeaderData
def __init__(self, col_starts=None, col_ends=None, delimiter_pad=' ', bookend=True):
super().__init__(col_starts, col_ends, delimiter_pad=delimiter_pad,
bookend=bookend)
class FixedWidthTwoLineHeader(FixedWidthHeader):
'''Header reader for fixed width tables splitting on whitespace.
For fixed width tables with several header lines, there is typically
a white-space delimited format line, so splitting on white space is
needed.
'''
splitter_class = DefaultSplitter
class FixedWidthTwoLineDataSplitter(FixedWidthSplitter):
'''Splitter for fixed width tables splitting on ``' '``.'''
delimiter = ' '
class FixedWidthTwoLineData(FixedWidthData):
'''Data reader for fixed with tables with two header lines.'''
splitter_class = FixedWidthTwoLineDataSplitter
class FixedWidthTwoLine(FixedWidth):
"""Fixed width table which has two header lines.
The first header line defines the column names and the second implicitly
defines the column positions.
Examples::
# Typical case with column extent defined by ---- under column names.
col1 col2 <== header_start = 0
----- ------------ <== position_line = 1, position_char = "-"
1 bee flies <== data_start = 2
2 fish swims
# Pretty-printed table
+------+------------+
| Col1 | Col2 |
+------+------------+
| 1.2 | "hello" |
| 2.4 | there world|
+------+------------+
See the :ref:`astropy:fixed_width_gallery` for specific usage examples.
"""
_format_name = 'fixed_width_two_line'
_description = 'Fixed width with second header line'
data_class = FixedWidthTwoLineData
header_class = FixedWidthTwoLineHeader
def __init__(self, position_line=1, position_char='-', delimiter_pad=None, bookend=False):
super().__init__(delimiter_pad=delimiter_pad, bookend=bookend)
self.header.position_line = position_line
self.header.position_char = position_char
self.data.start_line = position_line + 1
|
c924c1936a1ef42a0f20d96fc80c496ecfa4b461b59f3bbed0e540398da5eef0 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Classes to read AAS MRT table format
Ref: https://journals.aas.org/mrt-standards
:Copyright: Smithsonian Astrophysical Observatory (2021)
:Author: Tom Aldcroft ([email protected]), \
Suyog Garg ([email protected])
"""
import re
import math
import warnings
import numpy as np
from io import StringIO
from math import floor, ceil
from . import core
from . import fixedwidth, cds
from astropy import units as u
from astropy.table import Table
from astropy.table import Column, MaskedColumn
from string import Template
from textwrap import wrap
MAX_SIZE_README_LINE = 80
MAX_COL_INTLIMIT = 100000
__doctest_skip__ = ['*']
BYTE_BY_BYTE_TEMPLATE = [
"Byte-by-byte Description of file: $file",
"--------------------------------------------------------------------------------",
" Bytes Format Units Label Explanations",
"--------------------------------------------------------------------------------",
"$bytebybyte",
"--------------------------------------------------------------------------------"]
MRT_TEMPLATE = [
"Title:",
"Authors:",
"Table:",
"================================================================================",
"$bytebybyte",
"Notes:",
"--------------------------------------------------------------------------------"]
class MrtSplitter(fixedwidth.FixedWidthSplitter):
"""
Contains the join function to left align the MRT columns
when writing to a file.
"""
def join(self, vals, widths):
vals = [val + ' ' * (width - len(val)) for val, width in zip(vals, widths)]
return self.delimiter.join(vals)
class MrtHeader(cds.CdsHeader):
_subfmt = 'MRT'
def _split_float_format(self, value):
"""
Splits a Float string into different parts to find number
of digits after decimal and check if the value is in Scientific
notation.
Parameters
----------
value : str
String containing the float value to split.
Returns
-------
fmt: (int, int, int, bool, bool)
List of values describing the Float sting.
(size, dec, ent, sign, exp)
size, length of the given string.
ent, number of digits before decimal point.
dec, number of digits after decimal point.
sign, whether or not given value signed.
exp, is value in Scientific notation?
"""
regfloat = re.compile(r"""(?P<sign> [+-]*)
(?P<ent> [^eE.]+)
(?P<deciPt> [.]*)
(?P<decimals> [0-9]*)
(?P<exp> [eE]*-*)[0-9]*""",
re.VERBOSE)
mo = regfloat.match(value)
if mo is None:
raise Exception(f'{value} is not a float number')
return (len(value),
len(mo.group('ent')),
len(mo.group('decimals')),
mo.group('sign') != "",
mo.group('exp') != "")
def _set_column_val_limits(self, col):
"""
Sets the ``col.min`` and ``col.max`` column attributes,
taking into account columns with Null values.
"""
col.max = max(col)
col.min = min(col)
if col.max is np.ma.core.MaskedConstant:
col.max = None
if col.min is np.ma.core.MaskedConstant:
col.min = None
def column_float_formatter(self, col):
"""
String formatter function for a column containing Float values.
Checks if the values in the given column are in Scientific notation,
by spliting the value string. It is assumed that the column either has
float values or Scientific notation.
A ``col.formatted_width`` attribute is added to the column. It is not added
if such an attribute is already present, say when the ``formats`` argument
is passed to the writer. A properly formatted format string is also added as
the ``col.format`` attribute.
Parameters
----------
col : A ``Table.Column`` object.
"""
# maxsize: maximum length of string containing the float value.
# maxent: maximum number of digits places before decimal point.
# maxdec: maximum number of digits places after decimal point.
# maxprec: maximum precision of the column values, sum of maxent and maxdec.
maxsize, maxprec, maxent, maxdec = 1, 0, 1, 0
sign = False
fformat = 'F'
# Find maximum sized value in the col
for val in col.str_vals:
# Skip null values
if val is None or val == '':
continue
# Find format of the Float string
fmt = self._split_float_format(val)
# If value is in Scientific notation
if fmt[4] is True:
# if the previous column value was in normal Float format
# set maxsize, maxprec and maxdec to default.
if fformat == 'F':
maxsize, maxprec, maxdec = 1, 0, 0
# Designate the column to be in Scientific notation.
fformat = 'E'
else:
# Move to next column value if
# current value is not in Scientific notation
# but the column is designated as such because
# one of the previous values was.
if fformat == 'E':
continue
if maxsize < fmt[0]:
maxsize = fmt[0]
if maxent < fmt[1]:
maxent = fmt[1]
if maxdec < fmt[2]:
maxdec = fmt[2]
if fmt[3]:
sign = True
if maxprec < fmt[1] + fmt[2]:
maxprec = fmt[1] + fmt[2]
if fformat == 'E':
if getattr(col, 'formatted_width', None) is None: # If ``formats`` not passed.
col.formatted_width = maxsize
if sign:
col.formatted_width += 1
# Number of digits after decimal is replaced by the precision
# for values in Scientific notation, when writing that Format.
col.fortran_format = fformat + str(col.formatted_width) + "." + str(maxprec)
col.format = str(col.formatted_width) + "." + str(maxdec) + "e"
else:
lead = ''
if getattr(col, 'formatted_width', None) is None: # If ``formats`` not passed.
col.formatted_width = maxent + maxdec + 1
if sign:
col.formatted_width += 1
elif col.format.startswith('0'):
# Keep leading zero, if already set in format - primarily for `seconds` columns
# in coordinates; may need extra case if this is to be also supported with `sign`.
lead = '0'
col.fortran_format = fformat + str(col.formatted_width) + "." + str(maxdec)
col.format = lead + col.fortran_format[1:] + "f"
def write_byte_by_byte(self):
"""
Writes the Byte-By-Byte description of the table.
Columns that are `astropy.coordinates.SkyCoord` or `astropy.time.TimeSeries`
objects or columns with values that are such objects are recognized as such,
and some predefined labels and description is used for them.
See the Vizier MRT Standard documentation in the link below for more details
on these. An example Byte-By-Byte table is shown here.
See: http://vizier.u-strasbg.fr/doc/catstd-3.1.htx
Example::
--------------------------------------------------------------------------------
Byte-by-byte Description of file: table.dat
--------------------------------------------------------------------------------
Bytes Format Units Label Explanations
--------------------------------------------------------------------------------
1- 8 A8 --- names Description of names
10-14 E5.1 --- e [-3160000.0/0.01] Description of e
16-23 F8.5 --- d [22.25/27.25] Description of d
25-31 E7.1 --- s [-9e+34/2.0] Description of s
33-35 I3 --- i [-30/67] Description of i
37-39 F3.1 --- sameF [5.0/5.0] Description of sameF
41-42 I2 --- sameI [20] Description of sameI
44-45 I2 h RAh Right Ascension (hour)
47-48 I2 min RAm Right Ascension (minute)
50-67 F18.15 s RAs Right Ascension (second)
69 A1 --- DE- Sign of Declination
70-71 I2 deg DEd Declination (degree)
73-74 I2 arcmin DEm Declination (arcmin)
76-91 F16.13 arcsec DEs Declination (arcsec)
--------------------------------------------------------------------------------
"""
# Get column widths
vals_list = []
col_str_iters = self.data.str_vals()
for vals in zip(*col_str_iters):
vals_list.append(vals)
for i, col in enumerate(self.cols):
col.width = max(len(vals[i]) for vals in vals_list)
if self.start_line is not None:
col.width = max(col.width, len(col.info.name))
widths = [col.width for col in self.cols]
startb = 1 # Byte count starts at 1.
# Set default width of the Bytes count column of the Byte-By-Byte table.
# This ``byte_count_width`` value helps align byte counts with respect
# to the hyphen using a format string.
byte_count_width = len(str(sum(widths) + len(self.cols) - 1))
# Format string for Start Byte and End Byte
singlebfmt = "{:" + str(byte_count_width) + "d}"
fmtb = singlebfmt + "-" + singlebfmt
# Add trailing single whitespaces to Bytes column for better visibility.
singlebfmt += " "
fmtb += " "
# Set default width of Label and Description Byte-By-Byte columns.
max_label_width, max_descrip_size = 7, 16
bbb = Table(names=['Bytes', 'Format', 'Units', 'Label', 'Explanations'],
dtype=[str] * 5)
# Iterate over the columns to write Byte-By-Byte rows.
for i, col in enumerate(self.cols):
# Check if column is MaskedColumn
col.has_null = isinstance(col, MaskedColumn)
if col.format is not None:
col.formatted_width = max(len(sval) for sval in col.str_vals)
# Set MRTColumn type, size and format.
if np.issubdtype(col.dtype, np.integer):
# Integer formatter
self._set_column_val_limits(col)
if getattr(col, 'formatted_width', None) is None: # If ``formats`` not passed.
col.formatted_width = max(len(str(col.max)), len(str(col.min)))
col.fortran_format = "I" + str(col.formatted_width)
if col.format is None:
col.format = ">" + col.fortran_format[1:]
elif np.issubdtype(col.dtype, np.dtype(float).type):
# Float formatter
self._set_column_val_limits(col)
self.column_float_formatter(col)
else:
# String formatter, ``np.issubdtype(col.dtype, str)`` is ``True``.
dtype = col.dtype.str
if col.has_null:
mcol = col
mcol.fill_value = ""
coltmp = Column(mcol.filled(), dtype=str)
dtype = coltmp.dtype.str
if getattr(col, 'formatted_width', None) is None: # If ``formats`` not passed.
col.formatted_width = int(re.search(r'(\d+)$', dtype).group(1))
col.fortran_format = "A" + str(col.formatted_width)
col.format = str(col.formatted_width) + "s"
endb = col.formatted_width + startb - 1
# ``mixin`` columns converted to string valued columns will not have a name
# attribute. In those cases, a ``Unknown`` column label is put, indicating that
# such columns can be better formatted with some manipulation before calling
# the MRT writer.
if col.name is None:
col.name = "Unknown"
# Set column description.
if col.description is not None:
description = col.description
else:
description = "Description of " + col.name
# Set null flag in column description
nullflag = ""
if col.has_null:
nullflag = "?"
# Set column unit
if col.unit is not None:
col_unit = col.unit.to_string("cds")
elif col.name.lower().find("magnitude") > -1:
# ``col.unit`` can still be ``None``, if the unit of column values
# is ``Magnitude``, because ``astropy.units.Magnitude`` is actually a class.
# Unlike other units which are instances of ``astropy.units.Unit``,
# application of the ``Magnitude`` unit calculates the logarithm
# of the values. Thus, the only way to check for if the column values
# have ``Magnitude`` unit is to check the column name.
col_unit = "mag"
else:
col_unit = "---"
# Add col limit values to col description
lim_vals = ""
if (col.min and col.max and
not any(x in col.name for x in ['RA', 'DE', 'LON', 'LAT', 'PLN', 'PLT'])):
# No col limit values for coordinate columns.
if col.fortran_format[0] == 'I':
if abs(col.min) < MAX_COL_INTLIMIT and abs(col.max) < MAX_COL_INTLIMIT:
if col.min == col.max:
lim_vals = f"[{col.min}]"
else:
lim_vals = f"[{col.min}/{col.max}]"
elif col.fortran_format[0] in ('E', 'F'):
lim_vals = f"[{floor(col.min * 100) / 100.}/{ceil(col.max * 100) / 100.}]"
if lim_vals != '' or nullflag != '':
description = f"{lim_vals}{nullflag} {description}"
# Find the maximum label and description column widths.
if len(col.name) > max_label_width:
max_label_width = len(col.name)
if len(description) > max_descrip_size:
max_descrip_size = len(description)
# Add a row for the Sign of Declination in the bbb table
if col.name == 'DEd':
bbb.add_row([singlebfmt.format(startb),
"A1", "---", "DE-",
"Sign of Declination"])
col.fortran_format = 'I2'
startb += 1
# Add Byte-By-Byte row to bbb table
bbb.add_row([singlebfmt.format(startb) if startb == endb
else fmtb.format(startb, endb),
"" if col.fortran_format is None else col.fortran_format,
col_unit,
"" if col.name is None else col.name,
description])
startb = endb + 2
# Properly format bbb columns
bbblines = StringIO()
bbb.write(bbblines, format='ascii.fixed_width_no_header',
delimiter=' ', bookend=False, delimiter_pad=None,
formats={'Format': '<6s',
'Units': '<6s',
'Label': '<' + str(max_label_width) + 's',
'Explanations': '' + str(max_descrip_size) + 's'})
# Get formatted bbb lines
bbblines = bbblines.getvalue().splitlines()
# ``nsplit`` is the number of whitespaces to prefix to long description
# lines in order to wrap them. It is the sum of the widths of the
# previous 4 columns plus the number of single spacing between them.
# The hyphen in the Bytes column is also counted.
nsplit = byte_count_width * 2 + 1 + 12 + max_label_width + 4
# Wrap line if it is too long
buff = ""
for newline in bbblines:
if len(newline) > MAX_SIZE_README_LINE:
buff += ("\n").join(wrap(newline,
subsequent_indent=" " * nsplit,
width=MAX_SIZE_README_LINE))
buff += "\n"
else:
buff += newline + "\n"
# Last value of ``endb`` is the sum of column widths after formatting.
self.linewidth = endb
# Remove the last extra newline character from Byte-By-Byte.
buff = buff[:-1]
return buff
def write(self, lines):
"""
Writes the Header of the MRT table, aka ReadMe, which
also contains the Byte-By-Byte description of the table.
"""
from astropy.coordinates import SkyCoord
# Recognised ``SkyCoord.name`` forms with their default column names (helio* require SunPy).
coord_systems = {'galactic': ('GLAT', 'GLON', 'b', 'l'),
'ecliptic': ('ELAT', 'ELON', 'lat', 'lon'), # 'geocentric*ecliptic'
'heliographic': ('HLAT', 'HLON', 'lat', 'lon'), # '_carrington|stonyhurst'
'helioprojective': ('HPLT', 'HPLN', 'Ty', 'Tx')}
eqtnames = ['RAh', 'RAm', 'RAs', 'DEd', 'DEm', 'DEs']
# list to store indices of columns that are modified.
to_pop = []
# For columns that are instances of ``SkyCoord`` and other ``mixin`` columns
# or whose values are objects of these classes.
for i, col in enumerate(self.cols):
# If col is a ``Column`` object but its values are ``SkyCoord`` objects,
# convert the whole column to ``SkyCoord`` object, which helps in applying
# SkyCoord methods directly.
if not isinstance(col, SkyCoord) and isinstance(col[0], SkyCoord):
try:
col = SkyCoord(col)
except (ValueError, TypeError):
# If only the first value of the column is a ``SkyCoord`` object,
# the column cannot be converted to a ``SkyCoord`` object.
# These columns are converted to ``Column`` object and then converted
# to string valued column.
if not isinstance(col, Column):
col = Column(col)
col = Column([str(val) for val in col])
self.cols[i] = col
continue
# Replace single ``SkyCoord`` column by its coordinate components if no coordinate
# columns of the correspoding type exist yet.
if isinstance(col, SkyCoord):
# If coordinates are given in RA/DEC, divide each them into hour/deg,
# minute/arcminute, second/arcsecond columns.
if ('ra' in col.representation_component_names.keys() and
len(set(eqtnames) - set(self.colnames)) == 6):
ra_c, dec_c = col.ra.hms, col.dec.dms
coords = [ra_c.h.round().astype('i1'), ra_c.m.round().astype('i1'), ra_c.s,
dec_c.d.round().astype('i1'), dec_c.m.round().astype('i1'), dec_c.s]
coord_units = [u.h, u.min, u.second,
u.deg, u.arcmin, u.arcsec]
coord_descrip = ['Right Ascension (hour)', 'Right Ascension (minute)',
'Right Ascension (second)', 'Declination (degree)',
'Declination (arcmin)', 'Declination (arcsec)']
for coord, name, coord_unit, descrip in zip(
coords, eqtnames, coord_units, coord_descrip):
# Have Sign of Declination only in the DEd column.
if name in ['DEm', 'DEs']:
coord_col = Column(list(np.abs(coord)), name=name,
unit=coord_unit, description=descrip)
else:
coord_col = Column(list(coord), name=name, unit=coord_unit,
description=descrip)
# Set default number of digits after decimal point for the
# second values, and deg-min to (signed) 2-digit zero-padded integer.
if name == 'RAs':
coord_col.format = '013.10f'
elif name == 'DEs':
coord_col.format = '012.9f'
elif name == 'RAh':
coord_col.format = '2d'
elif name == 'DEd':
coord_col.format = '+03d'
elif name.startswith(('RA', 'DE')):
coord_col.format = '02d'
self.cols.append(coord_col)
to_pop.append(i) # Delete original ``SkyCoord`` column.
# For all other coordinate types, simply divide into two columns
# for latitude and longitude resp. with the unit used been as it is.
else:
frminfo = ''
for frame, latlon in coord_systems.items():
if frame in col.name and len(set(latlon[:2]) - set(self.colnames)) == 2:
if frame != col.name:
frminfo = f' ({col.name})'
lon_col = Column(getattr(col, latlon[3]), name=latlon[1],
description=f'{frame.capitalize()} Longitude{frminfo}',
unit=col.representation_component_units[latlon[3]],
format='.12f')
lat_col = Column(getattr(col, latlon[2]), name=latlon[0],
description=f'{frame.capitalize()} Latitude{frminfo}',
unit=col.representation_component_units[latlon[2]],
format='+.12f')
self.cols.append(lon_col)
self.cols.append(lat_col)
to_pop.append(i) # Delete original ``SkyCoord`` column.
# Convert all other ``SkyCoord`` columns that are not in the above three
# representations to string valued columns. Those could either be types not
# supported yet (e.g. 'helioprojective'), or already present and converted.
# If there were any extra ``SkyCoord`` columns of one kind after the first one,
# then their decomposition into their component columns has been skipped.
# This is done in order to not create duplicate component columns.
# Explicit renaming of the extra coordinate component columns by appending some
# suffix to their name, so as to distinguish them, is not yet implemented.
if i not in to_pop:
warnings.warn(f"Coordinate system of type '{col.name}' already stored in table "
f"as CDS/MRT-syle columns or of unrecognized type. So column {i} "
f"is being skipped with designation of a string valued column "
f"`{self.colnames[i]}`.", UserWarning)
self.cols.append(Column(col.to_string(), name=self.colnames[i]))
to_pop.append(i) # Delete original ``SkyCoord`` column.
# Convert all other ``mixin`` columns to ``Column`` objects.
# Parsing these may still lead to errors!
elif not isinstance(col, Column):
col = Column(col)
# If column values are ``object`` types, convert them to string.
if np.issubdtype(col.dtype, np.dtype(object).type):
col = Column([str(val) for val in col])
self.cols[i] = col
# Delete original ``SkyCoord`` columns, if there were any.
for i in to_pop[::-1]:
self.cols.pop(i)
# Check for any left over extra coordinate columns.
if any(x in self.colnames for x in ['RAh', 'DEd', 'ELON', 'GLAT']):
# At this point any extra ``SkyCoord`` columns should have been converted to string
# valued columns, together with issuance of a warning, by the coordinate parser above.
# This test is just left here as a safeguard.
for i, col in enumerate(self.cols):
if isinstance(col, SkyCoord):
self.cols[i] = Column(col.to_string(), name=self.colnames[i])
message = ('Table already has coordinate system in CDS/MRT-syle columns. '
f'So column {i} should have been replaced already with '
f'a string valued column `{self.colnames[i]}`.')
raise core.InconsistentTableError(message)
# Get Byte-By-Byte description and fill the template
bbb_template = Template('\n'.join(BYTE_BY_BYTE_TEMPLATE))
byte_by_byte = bbb_template.substitute({'file': 'table.dat',
'bytebybyte': self.write_byte_by_byte()})
# Fill up the full ReadMe
rm_template = Template('\n'.join(MRT_TEMPLATE))
readme_filled = rm_template.substitute({'bytebybyte': byte_by_byte})
lines.append(readme_filled)
class MrtData(cds.CdsData):
"""MRT table data reader
"""
_subfmt = 'MRT'
splitter_class = MrtSplitter
def write(self, lines):
self.splitter.delimiter = ' '
fixedwidth.FixedWidthData.write(self, lines)
class Mrt(core.BaseReader):
"""AAS MRT (Machine-Readable Table) format table.
**Reading**
::
>>> from astropy.io import ascii
>>> table = ascii.read('data.mrt', format='mrt')
**Writing**
Use ``ascii.write(table, 'data.mrt', format='mrt')`` to write tables to
Machine Readable Table (MRT) format.
Note that the metadata of the table, apart from units, column names and
description, will not be written. These have to be filled in by hand later.
See also: :ref:`cds_mrt_format`.
Caveats:
* The Units and Explanations are available in the column ``unit`` and
``description`` attributes, respectively.
* The other metadata defined by this format is not available in the output table.
"""
_format_name = 'mrt'
_io_registry_format_aliases = ['mrt']
_io_registry_can_write = True
_description = 'MRT format table'
data_class = MrtData
header_class = MrtHeader
def write(self, table=None):
# Construct for writing empty table is not yet done.
if len(table) == 0:
raise NotImplementedError
self.data.header = self.header
self.header.position_line = None
self.header.start_line = None
# Create a copy of the ``table``, so that it the copy gets modified and
# written to the file, while the original table remains as it is.
table = table.copy()
return super().write(table)
|
0c87a44b5a6170b1b48b677ed6a8f98139701d828d3bba71618d7333c5e0a8ac | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""An extensible ASCII table reader and writer.
ipac.py:
Classes to read IPAC table format
:Copyright: Smithsonian Astrophysical Observatory (2011)
:Author: Tom Aldcroft ([email protected])
"""
import re
from collections import defaultdict, OrderedDict
from textwrap import wrap
from warnings import warn
from . import core
from . import fixedwidth
from . import basic
from astropy.utils.exceptions import AstropyUserWarning
from astropy.table.pprint import get_auto_format_func
class IpacFormatErrorDBMS(Exception):
def __str__(self):
return '{}\nSee {}'.format(
super().__str__(),
'https://irsa.ipac.caltech.edu/applications/DDGEN/Doc/DBMSrestriction.html')
class IpacFormatError(Exception):
def __str__(self):
return '{}\nSee {}'.format(
super().__str__(),
'https://irsa.ipac.caltech.edu/applications/DDGEN/Doc/ipac_tbl.html')
class IpacHeaderSplitter(core.BaseSplitter):
'''Splitter for Ipac Headers.
This splitter is similar its parent when reading, but supports a
fixed width format (as required for Ipac table headers) for writing.
'''
process_line = None
process_val = None
delimiter = '|'
delimiter_pad = ''
skipinitialspace = False
comment = r'\s*\\'
write_comment = r'\\'
col_starts = None
col_ends = None
def join(self, vals, widths):
pad = self.delimiter_pad or ''
delimiter = self.delimiter or ''
padded_delim = pad + delimiter + pad
bookend_left = delimiter + pad
bookend_right = pad + delimiter
vals = [' ' * (width - len(val)) + val for val, width in zip(vals, widths)]
return bookend_left + padded_delim.join(vals) + bookend_right
class IpacHeader(fixedwidth.FixedWidthHeader):
"""IPAC table header"""
splitter_class = IpacHeaderSplitter
# Defined ordered list of possible types. Ordering is needed to
# distinguish between "d" (double) and "da" (date) as defined by
# the IPAC standard for abbreviations. This gets used in get_col_type().
col_type_list = (('integer', core.IntType),
('long', core.IntType),
('double', core.FloatType),
('float', core.FloatType),
('real', core.FloatType),
('char', core.StrType),
('date', core.StrType))
definition = 'ignore'
start_line = None
def process_lines(self, lines):
"""Generator to yield IPAC header lines, i.e. those starting and ending with
delimiter character (with trailing whitespace stripped)"""
delim = self.splitter.delimiter
for line in lines:
line = line.rstrip()
if line.startswith(delim) and line.endswith(delim):
yield line.strip(delim)
def update_meta(self, lines, meta):
"""
Extract table-level comments and keywords for IPAC table. See:
https://irsa.ipac.caltech.edu/applications/DDGEN/Doc/ipac_tbl.html#kw
"""
def process_keyword_value(val):
"""
Take a string value and convert to float, int or str, and strip quotes
as needed.
"""
val = val.strip()
try:
val = int(val)
except Exception:
try:
val = float(val)
except Exception:
# Strip leading/trailing quote. The spec says that a matched pair
# of quotes is required, but this code will allow a non-quoted value.
for quote in ('"', "'"):
if val.startswith(quote) and val.endswith(quote):
val = val[1:-1]
break
return val
table_meta = meta['table']
table_meta['comments'] = []
table_meta['keywords'] = OrderedDict()
keywords = table_meta['keywords']
re_keyword = re.compile(r'\\'
r'(?P<name> \w+)'
r'\s* = (?P<value> .+) $',
re.VERBOSE)
for line in lines:
# Keywords and comments start with "\". Once the first non-slash
# line is seen then bail out.
if not line.startswith('\\'):
break
m = re_keyword.match(line)
if m:
name = m.group('name')
val = process_keyword_value(m.group('value'))
# IPAC allows for continuation keywords, e.g.
# \SQL = 'WHERE '
# \SQL = 'SELECT (25 column names follow in next row.)'
if name in keywords and isinstance(val, str):
prev_val = keywords[name]['value']
if isinstance(prev_val, str):
val = prev_val + val
keywords[name] = {'value': val}
else:
# Comment is required to start with "\ "
if line.startswith('\\ '):
val = line[2:].strip()
if val:
table_meta['comments'].append(val)
def get_col_type(self, col):
for (col_type_key, col_type) in self.col_type_list:
if col_type_key.startswith(col.raw_type.lower()):
return col_type
else:
raise ValueError('Unknown data type ""{}"" for column "{}"'.format(
col.raw_type, col.name))
def get_cols(self, lines):
"""
Initialize the header Column objects from the table ``lines``.
Based on the previously set Header attributes find or create the column names.
Sets ``self.cols`` with the list of Columns.
Parameters
----------
lines : list
List of table lines
"""
header_lines = self.process_lines(lines) # generator returning valid header lines
header_vals = [vals for vals in self.splitter(header_lines)]
if len(header_vals) == 0:
raise ValueError('At least one header line beginning and ending with '
'delimiter required')
elif len(header_vals) > 4:
raise ValueError('More than four header lines were found')
# Generate column definitions
cols = []
start = 1
for i, name in enumerate(header_vals[0]):
col = core.Column(name=name.strip(' -'))
col.start = start
col.end = start + len(name)
if len(header_vals) > 1:
col.raw_type = header_vals[1][i].strip(' -')
col.type = self.get_col_type(col)
if len(header_vals) > 2:
col.unit = header_vals[2][i].strip() or None # Can't strip dashes here
if len(header_vals) > 3:
# The IPAC null value corresponds to the io.ascii bad_value.
# In this case there isn't a fill_value defined, so just put
# in the minimal entry that is sure to convert properly to the
# required type.
#
# Strip spaces but not dashes (not allowed in NULL row per
# https://github.com/astropy/astropy/issues/361)
null = header_vals[3][i].strip()
fillval = '' if issubclass(col.type, core.StrType) else '0'
self.data.fill_values.append((null, fillval, col.name))
start = col.end + 1
cols.append(col)
# Correct column start/end based on definition
if self.ipac_definition == 'right':
col.start -= 1
elif self.ipac_definition == 'left':
col.end += 1
self.names = [x.name for x in cols]
self.cols = cols
def str_vals(self):
if self.DBMS:
IpacFormatE = IpacFormatErrorDBMS
else:
IpacFormatE = IpacFormatError
namelist = self.colnames
if self.DBMS:
countnamelist = defaultdict(int)
for name in self.colnames:
countnamelist[name.lower()] += 1
doublenames = [x for x in countnamelist if countnamelist[x] > 1]
if doublenames != []:
raise IpacFormatE('IPAC DBMS tables are not case sensitive. '
'This causes duplicate column names: {}'.format(doublenames))
for name in namelist:
m = re.match(r'\w+', name)
if m.end() != len(name):
raise IpacFormatE('{} - Only alphanumeric characters and _ '
'are allowed in column names.'.format(name))
if self.DBMS and not(name[0].isalpha() or (name[0] == '_')):
raise IpacFormatE(f'Column name cannot start with numbers: {name}')
if self.DBMS:
if name in ['x', 'y', 'z', 'X', 'Y', 'Z']:
raise IpacFormatE('{} - x, y, z, X, Y, Z are reserved names and '
'cannot be used as column names.'.format(name))
if len(name) > 16:
raise IpacFormatE(
f'{name} - Maximum length for column name is 16 characters')
else:
if len(name) > 40:
raise IpacFormatE(
f'{name} - Maximum length for column name is 40 characters.')
dtypelist = []
unitlist = []
nullist = []
for col in self.cols:
col_dtype = col.info.dtype
col_unit = col.info.unit
col_format = col.info.format
if col_dtype.kind in ['i', 'u']:
if col_dtype.itemsize <= 2:
dtypelist.append('int')
else:
dtypelist.append('long')
elif col_dtype.kind == 'f':
if col_dtype.itemsize <= 4:
dtypelist.append('float')
else:
dtypelist.append('double')
else:
dtypelist.append('char')
if col_unit is None:
unitlist.append('')
else:
unitlist.append(str(col.info.unit))
# This may be incompatible with mixin columns
null = col.fill_values[core.masked]
try:
auto_format_func = get_auto_format_func(col)
format_func = col.info._format_funcs.get(col_format, auto_format_func)
nullist.append((format_func(col_format, null)).strip())
except Exception:
# It is possible that null and the column values have different
# data types (e.g. number and null = 'null' (i.e. a string).
# This could cause all kinds of exceptions, so a catch all
# block is needed here
nullist.append(str(null).strip())
return [namelist, dtypelist, unitlist, nullist]
def write(self, lines, widths):
'''Write header.
The width of each column is determined in Ipac.write. Writing the header
must be delayed until that time.
This function is called from there, once the width information is
available.'''
for vals in self.str_vals():
lines.append(self.splitter.join(vals, widths))
return lines
class IpacDataSplitter(fixedwidth.FixedWidthSplitter):
delimiter = ' '
delimiter_pad = ''
bookend = True
class IpacData(fixedwidth.FixedWidthData):
"""IPAC table data reader"""
comment = r'[|\\]'
start_line = 0
splitter_class = IpacDataSplitter
fill_values = [(core.masked, 'null')]
def write(self, lines, widths, vals_list):
""" IPAC writer, modified from FixedWidth writer """
for vals in vals_list:
lines.append(self.splitter.join(vals, widths))
return lines
class Ipac(basic.Basic):
r"""IPAC format table.
See: https://irsa.ipac.caltech.edu/applications/DDGEN/Doc/ipac_tbl.html
Example::
\\name=value
\\ Comment
| column1 | column2 | column3 | column4 | column5 |
| double | double | int | double | char |
| unit | unit | unit | unit | unit |
| null | null | null | null | null |
2.0978 29.09056 73765 2.06000 B8IVpMnHg
Or::
|-----ra---|----dec---|---sao---|------v---|----sptype--------|
2.09708 29.09056 73765 2.06000 B8IVpMnHg
The comments and keywords defined in the header are available via the output
table ``meta`` attribute::
>>> import os
>>> from astropy.io import ascii
>>> filename = os.path.join(ascii.__path__[0], 'tests/data/ipac.dat')
>>> data = ascii.read(filename)
>>> print(data.meta['comments'])
['This is an example of a valid comment']
>>> for name, keyword in data.meta['keywords'].items():
... print(name, keyword['value'])
...
intval 1
floatval 2300.0
date Wed Sp 20 09:48:36 1995
key_continue IPAC keywords can continue across lines
Note that there are different conventions for characters occurring below the
position of the ``|`` symbol in IPAC tables. By default, any character
below a ``|`` will be ignored (since this is the current standard),
but if you need to read files that assume characters below the ``|``
symbols belong to the column before or after the ``|``, you can specify
``definition='left'`` or ``definition='right'`` respectively when reading
the table (the default is ``definition='ignore'``). The following examples
demonstrate the different conventions:
* ``definition='ignore'``::
| ra | dec |
| float | float |
1.2345 6.7890
* ``definition='left'``::
| ra | dec |
| float | float |
1.2345 6.7890
* ``definition='right'``::
| ra | dec |
| float | float |
1.2345 6.7890
IPAC tables can specify a null value in the header that is shown in place
of missing or bad data. On writing, this value defaults to ``null``.
To specify a different null value, use the ``fill_values`` option to
replace masked values with a string or number of your choice as
described in :ref:`astropy:io_ascii_write_parameters`::
>>> from astropy.io.ascii import masked
>>> fill = [(masked, 'N/A', 'ra'), (masked, -999, 'sptype')]
>>> ascii.write(data, format='ipac', fill_values=fill)
\ This is an example of a valid comment
...
| ra| dec| sai| v2| sptype|
| double| double| long| double| char|
| unit| unit| unit| unit| ergs|
| N/A| null| null| null| -999|
N/A 29.09056 null 2.06 -999
2345678901.0 3456789012.0 456789012 4567890123.0 567890123456789012
When writing a table with a column of integers, the data type is output
as ``int`` when the column ``dtype.itemsize`` is less than or equal to 2;
otherwise the data type is ``long``. For a column of floating-point values,
the data type is ``float`` when ``dtype.itemsize`` is less than or equal
to 4; otherwise the data type is ``double``.
Parameters
----------
definition : str, optional
Specify the convention for characters in the data table that occur
directly below the pipe (``|``) symbol in the header column definition:
* 'ignore' - Any character beneath a pipe symbol is ignored (default)
* 'right' - Character is associated with the column to the right
* 'left' - Character is associated with the column to the left
DBMS : bool, optional
If true, this verifies that written tables adhere (semantically)
to the `IPAC/DBMS
<https://irsa.ipac.caltech.edu/applications/DDGEN/Doc/DBMSrestriction.html>`_
definition of IPAC tables. If 'False' it only checks for the (less strict)
`IPAC <https://irsa.ipac.caltech.edu/applications/DDGEN/Doc/ipac_tbl.html>`_
definition.
"""
_format_name = 'ipac'
_io_registry_format_aliases = ['ipac']
_io_registry_can_write = True
_description = 'IPAC format table'
data_class = IpacData
header_class = IpacHeader
def __init__(self, definition='ignore', DBMS=False):
super().__init__()
# Usually the header is not defined in __init__, but here it need a keyword
if definition in ['ignore', 'left', 'right']:
self.header.ipac_definition = definition
else:
raise ValueError("definition should be one of ignore/left/right")
self.header.DBMS = DBMS
def write(self, table):
"""
Write ``table`` as list of strings.
Parameters
----------
table : `~astropy.table.Table`
Input table data
Returns
-------
lines : list
List of strings corresponding to ASCII table
"""
# Set a default null value for all columns by adding at the end, which
# is the position with the lowest priority.
# We have to do it this late, because the fill_value
# defined in the class can be overwritten by ui.write
self.data.fill_values.append((core.masked, 'null'))
# Check column names before altering
self.header.cols = list(table.columns.values())
self.header.check_column_names(self.names, self.strict_names, self.guessing)
core._apply_include_exclude_names(table, self.names, self.include_names, self.exclude_names)
# Check that table has only 1-d columns.
self._check_multidim_table(table)
# Now use altered columns
new_cols = list(table.columns.values())
# link information about the columns to the writer object (i.e. self)
self.header.cols = new_cols
self.data.cols = new_cols
# Write header and data to lines list
lines = []
# Write meta information
if 'comments' in table.meta:
for comment in table.meta['comments']:
if len(str(comment)) > 78:
warn('Comment string > 78 characters was automatically wrapped.',
AstropyUserWarning)
for line in wrap(str(comment), 80, initial_indent='\\ ', subsequent_indent='\\ '):
lines.append(line)
if 'keywords' in table.meta:
keydict = table.meta['keywords']
for keyword in keydict:
try:
val = keydict[keyword]['value']
lines.append(f'\\{keyword.strip()}={val!r}')
# meta is not standardized: Catch some common Errors.
except TypeError:
warn("Table metadata keyword {0} has been skipped. "
"IPAC metadata must be in the form {{'keywords':"
"{{'keyword': {{'value': value}} }}".format(keyword),
AstropyUserWarning)
ignored_keys = [key for key in table.meta if key not in ('keywords', 'comments')]
if any(ignored_keys):
warn("Table metadata keyword(s) {0} were not written. "
"IPAC metadata must be in the form {{'keywords':"
"{{'keyword': {{'value': value}} }}".format(ignored_keys),
AstropyUserWarning
)
# Usually, this is done in data.write, but since the header is written
# first, we need that here.
self.data._set_fill_values(self.data.cols)
# get header and data as strings to find width of each column
for i, col in enumerate(table.columns.values()):
col.headwidth = max(len(vals[i]) for vals in self.header.str_vals())
# keep data_str_vals because they take some time to make
data_str_vals = []
col_str_iters = self.data.str_vals()
for vals in zip(*col_str_iters):
data_str_vals.append(vals)
for i, col in enumerate(table.columns.values()):
# FIXME: In Python 3.4, use max([], default=0).
# See: https://docs.python.org/3/library/functions.html#max
if data_str_vals:
col.width = max(len(vals[i]) for vals in data_str_vals)
else:
col.width = 0
widths = [max(col.width, col.headwidth) for col in table.columns.values()]
# then write table
self.header.write(lines, widths)
self.data.write(lines, widths, data_str_vals)
return lines
|
81f1a040b9ac39b41c64c0475964e173d0e1b45b8dfb53ef8864e2b736f1b1a8 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""An extensible HTML table reader and writer.
html.py:
Classes to read and write HTML tables
`BeautifulSoup <http://www.crummy.com/software/BeautifulSoup/>`_
must be installed to read HTML tables.
"""
import warnings
from . import core
from astropy.table import Column
from astropy.utils.xml import writer
from copy import deepcopy
class SoupString(str):
"""
Allows for strings to hold BeautifulSoup data.
"""
def __new__(cls, *args, **kwargs):
return str.__new__(cls, *args, **kwargs)
def __init__(self, val):
self.soup = val
class ListWriter:
"""
Allows for XMLWriter to write to a list instead of a file.
"""
def __init__(self, out):
self.out = out
def write(self, data):
self.out.append(data)
def identify_table(soup, htmldict, numtable):
"""
Checks whether the given BeautifulSoup tag is the table
the user intends to process.
"""
if soup is None or soup.name != 'table':
return False # Tag is not a <table>
elif 'table_id' not in htmldict:
return numtable == 1
table_id = htmldict['table_id']
if isinstance(table_id, str):
return 'id' in soup.attrs and soup['id'] == table_id
elif isinstance(table_id, int):
return table_id == numtable
# Return False if an invalid parameter is given
return False
class HTMLInputter(core.BaseInputter):
"""
Input lines of HTML in a valid form.
This requires `BeautifulSoup
<http://www.crummy.com/software/BeautifulSoup/>`_ to be installed.
"""
def process_lines(self, lines):
"""
Convert the given input into a list of SoupString rows
for further processing.
"""
try:
from bs4 import BeautifulSoup
except ImportError:
raise core.OptionalTableImportError('BeautifulSoup must be '
'installed to read HTML tables')
if 'parser' not in self.html:
with warnings.catch_warnings():
# Ignore bs4 parser warning #4550.
warnings.filterwarnings('ignore', '.*no parser was explicitly specified.*')
soup = BeautifulSoup('\n'.join(lines))
else: # use a custom backend parser
soup = BeautifulSoup('\n'.join(lines), self.html['parser'])
tables = soup.find_all('table')
for i, possible_table in enumerate(tables):
if identify_table(possible_table, self.html, i + 1):
table = possible_table # Find the correct table
break
else:
if isinstance(self.html['table_id'], int):
err_descr = f"number {self.html['table_id']}"
else:
err_descr = f"id '{self.html['table_id']}'"
raise core.InconsistentTableError(
f'ERROR: HTML table {err_descr} not found')
# Get all table rows
soup_list = [SoupString(x) for x in table.find_all('tr')]
return soup_list
class HTMLSplitter(core.BaseSplitter):
"""
Split HTML table data.
"""
def __call__(self, lines):
"""
Return HTML data from lines as a generator.
"""
for line in lines:
if not isinstance(line, SoupString):
raise TypeError('HTML lines should be of type SoupString')
soup = line.soup
header_elements = soup.find_all('th')
if header_elements:
# Return multicolumns as tuples for HTMLHeader handling
yield [(el.text.strip(), el['colspan']) if el.has_attr('colspan')
else el.text.strip() for el in header_elements]
data_elements = soup.find_all('td')
if data_elements:
yield [el.text.strip() for el in data_elements]
if len(lines) == 0:
raise core.InconsistentTableError('HTML tables must contain data '
'in a <table> tag')
class HTMLOutputter(core.TableOutputter):
"""
Output the HTML data as an ``astropy.table.Table`` object.
This subclass allows for the final table to contain
multidimensional columns (defined using the colspan attribute
of <th>).
"""
default_converters = [core.convert_numpy(int),
core.convert_numpy(float),
core.convert_numpy(str)]
def __call__(self, cols, meta):
"""
Process the data in multidimensional columns.
"""
new_cols = []
col_num = 0
while col_num < len(cols):
col = cols[col_num]
if hasattr(col, 'colspan'):
# Join elements of spanned columns together into list of tuples
span_cols = cols[col_num:col_num + col.colspan]
new_col = core.Column(col.name)
new_col.str_vals = list(zip(*[x.str_vals for x in span_cols]))
new_cols.append(new_col)
col_num += col.colspan
else:
new_cols.append(col)
col_num += 1
return super().__call__(new_cols, meta)
class HTMLHeader(core.BaseHeader):
splitter_class = HTMLSplitter
def start_line(self, lines):
"""
Return the line number at which header data begins.
"""
for i, line in enumerate(lines):
if not isinstance(line, SoupString):
raise TypeError('HTML lines should be of type SoupString')
soup = line.soup
if soup.th is not None:
return i
return None
def _set_cols_from_names(self):
"""
Set columns from header names, handling multicolumns appropriately.
"""
self.cols = []
new_names = []
for name in self.names:
if isinstance(name, tuple):
col = core.Column(name=name[0])
col.colspan = int(name[1])
self.cols.append(col)
new_names.append(name[0])
for i in range(1, int(name[1])):
# Add dummy columns
self.cols.append(core.Column(''))
new_names.append('')
else:
self.cols.append(core.Column(name=name))
new_names.append(name)
self.names = new_names
class HTMLData(core.BaseData):
splitter_class = HTMLSplitter
def start_line(self, lines):
"""
Return the line number at which table data begins.
"""
for i, line in enumerate(lines):
if not isinstance(line, SoupString):
raise TypeError('HTML lines should be of type SoupString')
soup = line.soup
if soup.td is not None:
if soup.th is not None:
raise core.InconsistentTableError('HTML tables cannot '
'have headings and data in the same row')
return i
raise core.InconsistentTableError('No start line found for HTML data')
def end_line(self, lines):
"""
Return the line number at which table data ends.
"""
last_index = -1
for i, line in enumerate(lines):
if not isinstance(line, SoupString):
raise TypeError('HTML lines should be of type SoupString')
soup = line.soup
if soup.td is not None:
last_index = i
if last_index == -1:
return None
return last_index + 1
class HTML(core.BaseReader):
"""HTML format table.
In order to customize input and output, a dict of parameters may
be passed to this class holding specific customizations.
**htmldict** : Dictionary of parameters for HTML input/output.
* css : Customized styling
If present, this parameter will be included in a <style>
tag and will define stylistic attributes of the output.
* table_id : ID for the input table
If a string, this defines the HTML id of the table to be processed.
If an integer, this specifies the index of the input table in the
available tables. Unless this parameter is given, the reader will
use the first table found in the input file.
* multicol : Use multi-dimensional columns for output
The writer will output tuples as elements of multi-dimensional
columns if this parameter is true, and if not then it will
use the syntax 1.36583e-13 .. 1.36583e-13 for output. If not
present, this parameter will be true by default.
* raw_html_cols : column name or list of names with raw HTML content
This allows one to include raw HTML content in the column output,
for instance to include link references in a table. This option
requires that the bleach package be installed. Only whitelisted
tags are allowed through for security reasons (see the
raw_html_clean_kwargs arg).
* raw_html_clean_kwargs : dict of keyword args controlling HTML cleaning
Raw HTML will be cleaned to prevent unsafe HTML from ending up in
the table output. This is done by calling ``bleach.clean(data,
**raw_html_clean_kwargs)``. For details on the available options
(e.g. tag whitelist) see:
https://bleach.readthedocs.io/en/latest/clean.html
* parser : Specific HTML parsing library to use
If specified, this specifies which HTML parsing library
BeautifulSoup should use as a backend. The options to choose
from are 'html.parser' (the standard library parser), 'lxml'
(the recommended parser), 'xml' (lxml's XML parser), and
'html5lib'. html5lib is a highly lenient parser and therefore
might work correctly for unusual input if a different parser
fails.
* jsfiles : list of js files to include when writing table.
* cssfiles : list of css files to include when writing table.
* js : js script to include in the body when writing table.
* table_class : css class for the table
"""
_format_name = 'html'
_io_registry_format_aliases = ['html']
_io_registry_suffix = '.html'
_description = 'HTML table'
header_class = HTMLHeader
data_class = HTMLData
inputter_class = HTMLInputter
max_ndim = 2 # HTML supports writing 2-d columns with shape (n, m)
def __init__(self, htmldict={}):
"""
Initialize classes for HTML reading and writing.
"""
super().__init__()
self.html = deepcopy(htmldict)
if 'multicol' not in htmldict:
self.html['multicol'] = True
if 'table_id' not in htmldict:
self.html['table_id'] = 1
self.inputter.html = self.html
def read(self, table):
"""
Read the ``table`` in HTML format and return a resulting ``Table``.
"""
self.outputter = HTMLOutputter()
return super().read(table)
def write(self, table):
"""
Return data in ``table`` converted to HTML as a list of strings.
"""
# Check that table has only 1-d or 2-d columns. Above that fails.
self._check_multidim_table(table)
cols = list(table.columns.values())
self.data.header.cols = cols
self.data.cols = cols
if isinstance(self.data.fill_values, tuple):
self.data.fill_values = [self.data.fill_values]
self.data._set_fill_values(cols)
self.data._set_col_formats()
lines = []
# Set HTML escaping to False for any column in the raw_html_cols input
raw_html_cols = self.html.get('raw_html_cols', [])
if isinstance(raw_html_cols, str):
raw_html_cols = [raw_html_cols] # Allow for a single string as input
cols_escaped = [col.info.name not in raw_html_cols for col in cols]
# Kwargs that get passed on to bleach.clean() if that is available.
raw_html_clean_kwargs = self.html.get('raw_html_clean_kwargs', {})
# Use XMLWriter to output HTML to lines
w = writer.XMLWriter(ListWriter(lines))
with w.tag('html'):
with w.tag('head'):
# Declare encoding and set CSS style for table
with w.tag('meta', attrib={'charset': 'utf-8'}):
pass
with w.tag('meta', attrib={'http-equiv': 'Content-type',
'content': 'text/html;charset=UTF-8'}):
pass
if 'css' in self.html:
with w.tag('style'):
w.data(self.html['css'])
if 'cssfiles' in self.html:
for filename in self.html['cssfiles']:
with w.tag('link', rel="stylesheet", href=filename, type='text/css'):
pass
if 'jsfiles' in self.html:
for filename in self.html['jsfiles']:
with w.tag('script', src=filename):
w.data('') # need this instead of pass to get <script></script>
with w.tag('body'):
if 'js' in self.html:
with w.xml_cleaning_method('none'):
with w.tag('script'):
w.data(self.html['js'])
if isinstance(self.html['table_id'], str):
html_table_id = self.html['table_id']
else:
html_table_id = None
if 'table_class' in self.html:
html_table_class = self.html['table_class']
attrib = {"class": html_table_class}
else:
attrib = {}
with w.tag('table', id=html_table_id, attrib=attrib):
with w.tag('thead'):
with w.tag('tr'):
for col in cols:
if len(col.shape) > 1 and self.html['multicol']:
# Set colspan attribute for multicolumns
w.start('th', colspan=col.shape[1])
else:
w.start('th')
w.data(col.info.name.strip())
w.end(indent=False)
col_str_iters = []
new_cols_escaped = []
# Make a container to hold any new_col objects created
# below for multicolumn elements. This is purely to
# maintain a reference for these objects during
# subsequent iteration to format column values. This
# requires that the weakref info._parent be maintained.
new_cols = []
for col, col_escaped in zip(cols, cols_escaped):
if len(col.shape) > 1 and self.html['multicol']:
span = col.shape[1]
for i in range(span):
# Split up multicolumns into separate columns
new_col = Column([el[i] for el in col])
new_col_iter_str_vals = self.fill_values(
col, new_col.info.iter_str_vals())
col_str_iters.append(new_col_iter_str_vals)
new_cols_escaped.append(col_escaped)
new_cols.append(new_col)
else:
col_iter_str_vals = self.fill_values(col, col.info.iter_str_vals())
col_str_iters.append(col_iter_str_vals)
new_cols_escaped.append(col_escaped)
for row in zip(*col_str_iters):
with w.tag('tr'):
for el, col_escaped in zip(row, new_cols_escaped):
# Potentially disable HTML escaping for column
method = ('escape_xml' if col_escaped else 'bleach_clean')
with w.xml_cleaning_method(method, **raw_html_clean_kwargs):
w.start('td')
w.data(el.strip())
w.end(indent=False)
# Fixes XMLWriter's insertion of unwanted line breaks
return [''.join(lines)]
def fill_values(self, col, col_str_iters):
"""
Return an iterator of the values with replacements based on fill_values
"""
# check if the col is a masked column and has fill values
is_masked_column = hasattr(col, 'mask')
has_fill_values = hasattr(col, 'fill_values')
for idx, col_str in enumerate(col_str_iters):
if is_masked_column and has_fill_values:
if col.mask[idx]:
yield col.fill_values[core.masked]
continue
if has_fill_values:
if col_str in col.fill_values:
yield col.fill_values[col_str]
continue
yield col_str
|
ee8079714be65be23900f9c9ea85e48a75eb4beb25d1f025beb9101affc82416 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""An extensible ASCII table reader and writer.
ui.py:
Provides the main user functions for reading and writing tables.
:Copyright: Smithsonian Astrophysical Observatory (2010)
:Author: Tom Aldcroft ([email protected])
"""
import re
import os
import sys
import copy
import time
import warnings
import contextlib
import collections
from io import StringIO
import numpy as np
from . import core
from . import basic
from . import cds
from . import mrt
from . import daophot
from . import ecsv
from . import sextractor
from . import ipac
from . import latex
from . import html
from . import rst
from . import fastbasic
from . import cparser
from . import fixedwidth
from .docs import READ_KWARG_TYPES, WRITE_KWARG_TYPES
from astropy.table import Table, MaskedColumn
from astropy.utils.data import get_readable_fileobj
from astropy.utils.exceptions import AstropyWarning
from astropy.utils.misc import NOT_OVERWRITING_MSG
_read_trace = []
# Default setting for guess parameter in read()
_GUESS = True
def _probably_html(table, maxchars=100000):
"""
Determine if ``table`` probably contains HTML content. See PR #3693 and issue
#3691 for context.
"""
if not isinstance(table, str):
try:
# If table is an iterable (list of strings) then take the first
# maxchars of these. Make sure this is something with random
# access to exclude a file-like object
table[0]
table[:1]
size = 0
for i, line in enumerate(table):
size += len(line)
if size > maxchars:
table = table[:i + 1]
break
table = os.linesep.join(table)
except Exception:
pass
if isinstance(table, str):
# Look for signs of an HTML table in the first maxchars characters
table = table[:maxchars]
# URL ending in .htm or .html
if re.match(r'( http[s]? | ftp | file ) :// .+ \.htm[l]?$', table,
re.IGNORECASE | re.VERBOSE):
return True
# Filename ending in .htm or .html which exists
if (re.search(r'\.htm[l]?$', table[-5:], re.IGNORECASE) and
os.path.exists(os.path.expanduser(table))):
return True
# Table starts with HTML document type declaration
if re.match(r'\s* <! \s* DOCTYPE \s* HTML', table, re.IGNORECASE | re.VERBOSE):
return True
# Look for <TABLE .. >, <TR .. >, <TD .. > tag openers.
if all(re.search(fr'< \s* {element} [^>]* >', table, re.IGNORECASE | re.VERBOSE)
for element in ('table', 'tr', 'td')):
return True
return False
def set_guess(guess):
"""
Set the default value of the ``guess`` parameter for read()
Parameters
----------
guess : bool
New default ``guess`` value (e.g., True or False)
"""
global _GUESS
_GUESS = guess
def get_reader(Reader=None, Inputter=None, Outputter=None, **kwargs):
"""
Initialize a table reader allowing for common customizations. Most of the
default behavior for various parameters is determined by the Reader class.
Parameters
----------
Reader : `~astropy.io.ascii.BaseReader`
Reader class (DEPRECATED). Default is :class:`Basic`.
Inputter : `~astropy.io.ascii.BaseInputter`
Inputter class
Outputter : `~astropy.io.ascii.BaseOutputter`
Outputter class
delimiter : str
Column delimiter string
comment : str
Regular expression defining a comment line in table
quotechar : str
One-character string to quote fields containing special characters
header_start : int
Line index for the header line not counting comment or blank lines.
A line with only whitespace is considered blank.
data_start : int
Line index for the start of data not counting comment or blank lines.
A line with only whitespace is considered blank.
data_end : int
Line index for the end of data not counting comment or blank lines.
This value can be negative to count from the end.
converters : dict
Dict of converters.
data_Splitter : `~astropy.io.ascii.BaseSplitter`
Splitter class to split data columns.
header_Splitter : `~astropy.io.ascii.BaseSplitter`
Splitter class to split header columns.
names : list
List of names corresponding to each data column.
include_names : list, optional
List of names to include in output.
exclude_names : list
List of names to exclude from output (applied after ``include_names``).
fill_values : tuple, list of tuple
Specification of fill values for bad or missing table values.
fill_include_names : list
List of names to include in fill_values.
fill_exclude_names : list
List of names to exclude from fill_values (applied after ``fill_include_names``).
Returns
-------
reader : `~astropy.io.ascii.BaseReader` subclass
ASCII format reader instance
"""
# This function is a light wrapper around core._get_reader to provide a
# public interface with a default Reader.
if Reader is None:
# Default reader is Basic unless fast reader is forced
fast_reader = _get_fast_reader_dict(kwargs)
if fast_reader['enable'] == 'force':
Reader = fastbasic.FastBasic
else:
Reader = basic.Basic
reader = core._get_reader(Reader, Inputter=Inputter, Outputter=Outputter, **kwargs)
return reader
def _get_format_class(format, ReaderWriter, label):
if format is not None and ReaderWriter is not None:
raise ValueError(f'Cannot supply both format and {label} keywords')
if format is not None:
if format in core.FORMAT_CLASSES:
ReaderWriter = core.FORMAT_CLASSES[format]
else:
raise ValueError('ASCII format {!r} not in allowed list {}'
.format(format, sorted(core.FORMAT_CLASSES)))
return ReaderWriter
def _get_fast_reader_dict(kwargs):
"""Convert 'fast_reader' key in kwargs into a dict if not already and make sure
'enable' key is available.
"""
fast_reader = copy.deepcopy(kwargs.get('fast_reader', True))
if isinstance(fast_reader, dict):
fast_reader.setdefault('enable', 'force')
else:
fast_reader = {'enable': fast_reader}
return fast_reader
def _validate_read_write_kwargs(read_write, **kwargs):
"""Validate types of keyword arg inputs to read() or write()."""
def is_ducktype(val, cls):
"""Check if ``val`` is an instance of ``cls`` or "seems" like one:
``cls(val) == val`` does not raise and exception and is `True`. In
this way you can pass in ``np.int16(2)`` and have that count as `int`.
This has a special-case of ``cls`` being 'list-like', meaning it is
an iterable but not a string.
"""
if cls == 'list-like':
ok = (not isinstance(val, str)
and isinstance(val, collections.abc.Iterable))
else:
ok = isinstance(val, cls)
if not ok:
# See if ``val`` walks and quacks like a ``cls```.
try:
new_val = cls(val)
assert new_val == val
except Exception:
ok = False
else:
ok = True
return ok
kwarg_types = READ_KWARG_TYPES if read_write == 'read' else WRITE_KWARG_TYPES
for arg, val in kwargs.items():
# Kwarg type checking is opt-in, so kwargs not in the list are considered OK.
# This reflects that some readers allow additional arguments that may not
# be well-specified, e.g. ```__init__(self, **kwargs)`` is an option.
if arg not in kwarg_types or val is None:
continue
# Single type or tuple of types for this arg (like isinstance())
types = kwarg_types[arg]
err_msg = (f"{read_write}() argument '{arg}' must be a "
f"{types} object, got {type(val)} instead")
# Force `types` to be a tuple for the any() check below
if not isinstance(types, tuple):
types = (types,)
if not any(is_ducktype(val, cls) for cls in types):
raise TypeError(err_msg)
def _expand_user_if_path(argument):
if isinstance(argument, (str, bytes, os.PathLike)):
# For the `read()` method, a `str` input can be either a file path or
# the table data itself. File names for io.ascii cannot have newlines
# in them and io.ascii does not accept table data as `bytes`, so we can
# attempt to detect data strings like this.
is_str_data = (isinstance(argument, str)
and ('\n' in argument or '\r' in argument))
if not is_str_data:
# Remain conservative in expanding the presumed-path
ex_user = os.path.expanduser(argument)
if os.path.exists(ex_user):
argument = ex_user
return argument
def read(table, guess=None, **kwargs):
# This the final output from reading. Static analysis indicates the reading
# logic (which is indeed complex) might not define `dat`, thus do so here.
dat = None
# Docstring defined below
del _read_trace[:]
# Downstream readers might munge kwargs
kwargs = copy.deepcopy(kwargs)
_validate_read_write_kwargs('read', **kwargs)
# Convert 'fast_reader' key in kwargs into a dict if not already and make sure
# 'enable' key is available.
fast_reader = _get_fast_reader_dict(kwargs)
kwargs['fast_reader'] = fast_reader
if fast_reader['enable'] and fast_reader.get('chunk_size'):
return _read_in_chunks(table, **kwargs)
if 'fill_values' not in kwargs:
kwargs['fill_values'] = [('', '0')]
# If an Outputter is supplied in kwargs that will take precedence.
if 'Outputter' in kwargs: # user specified Outputter, not supported for fast reading
fast_reader['enable'] = False
format = kwargs.get('format')
# Dictionary arguments are passed by reference per default and thus need
# special protection:
new_kwargs = copy.deepcopy(kwargs)
kwargs['fast_reader'] = copy.deepcopy(fast_reader)
# Get the Reader class based on possible format and Reader kwarg inputs.
Reader = _get_format_class(format, kwargs.get('Reader'), 'Reader')
if Reader is not None:
new_kwargs['Reader'] = Reader
format = Reader._format_name
# Remove format keyword if there, this is only allowed in read() not get_reader()
if 'format' in new_kwargs:
del new_kwargs['format']
if guess is None:
guess = _GUESS
if guess:
# If ``table`` is probably an HTML file then tell guess function to add
# the HTML reader at the top of the guess list. This is in response to
# issue #3691 (and others) where libxml can segfault on a long non-HTML
# file, thus prompting removal of the HTML reader from the default
# guess list.
new_kwargs['guess_html'] = _probably_html(table)
# If `table` is a filename or readable file object then read in the
# file now. This prevents problems in Python 3 with the file object
# getting closed or left at the file end. See #3132, #3013, #3109,
# #2001. If a `readme` arg was passed that implies CDS format, in
# which case the original `table` as the data filename must be left
# intact.
if 'readme' not in new_kwargs:
encoding = kwargs.get('encoding')
try:
table = _expand_user_if_path(table)
with get_readable_fileobj(table, encoding=encoding) as fileobj:
table = fileobj.read()
except ValueError: # unreadable or invalid binary file
raise
except Exception:
pass
else:
# Ensure that `table` has at least one \r or \n in it
# so that the core.BaseInputter test of
# ('\n' not in table and '\r' not in table)
# will fail and so `table` cannot be interpreted there
# as a filename. See #4160.
if not re.search(r'[\r\n]', table):
table = table + os.linesep
# If the table got successfully read then look at the content
# to see if is probably HTML, but only if it wasn't already
# identified as HTML based on the filename.
if not new_kwargs['guess_html']:
new_kwargs['guess_html'] = _probably_html(table)
# Get the table from guess in ``dat``. If ``dat`` comes back as None
# then there was just one set of kwargs in the guess list so fall
# through below to the non-guess way so that any problems result in a
# more useful traceback.
dat = _guess(table, new_kwargs, format, fast_reader)
if dat is None:
guess = False
if not guess:
if format is None:
reader = get_reader(**new_kwargs)
format = reader._format_name
table = _expand_user_if_path(table)
# Try the fast reader version of `format` first if applicable. Note that
# if user specified a fast format (e.g. format='fast_basic') this test
# will fail and the else-clause below will be used.
if fast_reader['enable'] and f'fast_{format}' in core.FAST_CLASSES:
fast_kwargs = copy.deepcopy(new_kwargs)
fast_kwargs['Reader'] = core.FAST_CLASSES[f'fast_{format}']
fast_reader_rdr = get_reader(**fast_kwargs)
try:
dat = fast_reader_rdr.read(table)
_read_trace.append({'kwargs': copy.deepcopy(fast_kwargs),
'Reader': fast_reader_rdr.__class__,
'status': 'Success with fast reader (no guessing)'})
except (core.ParameterError, cparser.CParserError, UnicodeEncodeError) as err:
# special testing value to avoid falling back on the slow reader
if fast_reader['enable'] == 'force':
raise core.InconsistentTableError(
f'fast reader {fast_reader_rdr.__class__} exception: {err}')
# If the fast reader doesn't work, try the slow version
reader = get_reader(**new_kwargs)
dat = reader.read(table)
_read_trace.append({'kwargs': copy.deepcopy(new_kwargs),
'Reader': reader.__class__,
'status': 'Success with slow reader after failing'
' with fast (no guessing)'})
else:
reader = get_reader(**new_kwargs)
dat = reader.read(table)
_read_trace.append({'kwargs': copy.deepcopy(new_kwargs),
'Reader': reader.__class__,
'status': 'Success with specified Reader class '
'(no guessing)'})
# Static analysis (pyright) indicates `dat` might be left undefined, so just
# to be sure define it at the beginning and check here.
if dat is None:
raise RuntimeError('read() function failed due to code logic error, '
'please report this bug on github')
return dat
read.__doc__ = core.READ_DOCSTRING
def _guess(table, read_kwargs, format, fast_reader):
"""
Try to read the table using various sets of keyword args. Start with the
standard guess list and filter to make it unique and consistent with
user-supplied read keyword args. Finally, if none of those work then
try the original user-supplied keyword args.
Parameters
----------
table : str, file-like, list
Input table as a file name, file-like object, list of strings, or
single newline-separated string.
read_kwargs : dict
Keyword arguments from user to be supplied to reader
format : str
Table format
fast_reader : dict
Options for the C engine fast reader. See read() function for details.
Returns
-------
dat : `~astropy.table.Table` or None
Output table or None if only one guess format was available
"""
# Keep a trace of all failed guesses kwarg
failed_kwargs = []
# Get an ordered list of read() keyword arg dicts that will be cycled
# through in order to guess the format.
full_list_guess = _get_guess_kwargs_list(read_kwargs)
# If a fast version of the reader is available, try that before the slow version
if (fast_reader['enable'] and format is not None and f'fast_{format}' in
core.FAST_CLASSES):
fast_kwargs = copy.deepcopy(read_kwargs)
fast_kwargs['Reader'] = core.FAST_CLASSES[f'fast_{format}']
full_list_guess = [fast_kwargs] + full_list_guess
else:
fast_kwargs = None
# Filter the full guess list so that each entry is consistent with user kwarg inputs.
# This also removes any duplicates from the list.
filtered_guess_kwargs = []
fast_reader = read_kwargs.get('fast_reader')
for guess_kwargs in full_list_guess:
# If user specified slow reader then skip all fast readers
if (fast_reader['enable'] is False
and guess_kwargs['Reader'] in core.FAST_CLASSES.values()):
_read_trace.append({'kwargs': copy.deepcopy(guess_kwargs),
'Reader': guess_kwargs['Reader'].__class__,
'status': 'Disabled: reader only available in fast version',
'dt': f'{0.0:.3f} ms'})
continue
# If user required a fast reader then skip all non-fast readers
if (fast_reader['enable'] == 'force'
and guess_kwargs['Reader'] not in core.FAST_CLASSES.values()):
_read_trace.append({'kwargs': copy.deepcopy(guess_kwargs),
'Reader': guess_kwargs['Reader'].__class__,
'status': 'Disabled: no fast version of reader available',
'dt': f'{0.0:.3f} ms'})
continue
guess_kwargs_ok = True # guess_kwargs are consistent with user_kwargs?
for key, val in read_kwargs.items():
# Do guess_kwargs.update(read_kwargs) except that if guess_args has
# a conflicting key/val pair then skip this guess entirely.
if key not in guess_kwargs:
guess_kwargs[key] = copy.deepcopy(val)
elif val != guess_kwargs[key] and guess_kwargs != fast_kwargs:
guess_kwargs_ok = False
break
if not guess_kwargs_ok:
# User-supplied kwarg is inconsistent with the guess-supplied kwarg, e.g.
# user supplies delimiter="|" but the guess wants to try delimiter=" ",
# so skip the guess entirely.
continue
# Add the guess_kwargs to filtered list only if it is not already there.
if guess_kwargs not in filtered_guess_kwargs:
filtered_guess_kwargs.append(guess_kwargs)
# If there are not at least two formats to guess then return no table
# (None) to indicate that guessing did not occur. In that case the
# non-guess read() will occur and any problems will result in a more useful
# traceback.
if len(filtered_guess_kwargs) <= 1:
return None
# Define whitelist of exceptions that are expected from readers when
# processing invalid inputs. Note that OSError must fall through here
# so one cannot simply catch any exception.
guess_exception_classes = (core.InconsistentTableError, ValueError, TypeError,
AttributeError, core.OptionalTableImportError,
core.ParameterError, cparser.CParserError)
# Now cycle through each possible reader and associated keyword arguments.
# Try to read the table using those args, and if an exception occurs then
# keep track of the failed guess and move on.
for guess_kwargs in filtered_guess_kwargs:
t0 = time.time()
try:
# If guessing will try all Readers then use strict req'ts on column names
if 'Reader' not in read_kwargs:
guess_kwargs['strict_names'] = True
reader = get_reader(**guess_kwargs)
reader.guessing = True
dat = reader.read(table)
_read_trace.append({'kwargs': copy.deepcopy(guess_kwargs),
'Reader': reader.__class__,
'status': 'Success (guessing)',
'dt': f'{(time.time() - t0) * 1000:.3f} ms'})
return dat
except guess_exception_classes as err:
_read_trace.append({'kwargs': copy.deepcopy(guess_kwargs),
'status': f'{err.__class__.__name__}: {str(err)}',
'dt': f'{(time.time() - t0) * 1000:.3f} ms'})
failed_kwargs.append(guess_kwargs)
else:
# Failed all guesses, try the original read_kwargs without column requirements
try:
reader = get_reader(**read_kwargs)
dat = reader.read(table)
_read_trace.append({'kwargs': copy.deepcopy(read_kwargs),
'Reader': reader.__class__,
'status': 'Success with original kwargs without strict_names '
'(guessing)'})
return dat
except guess_exception_classes as err:
_read_trace.append({'kwargs': copy.deepcopy(read_kwargs),
'status': f'{err.__class__.__name__}: {str(err)}'})
failed_kwargs.append(read_kwargs)
lines = ['\nERROR: Unable to guess table format with the guesses listed below:']
for kwargs in failed_kwargs:
sorted_keys = sorted(x for x in sorted(kwargs) if x not in ('Reader', 'Outputter'))
reader_repr = repr(kwargs.get('Reader', basic.Basic))
keys_vals = ['Reader:' + re.search(r"\.(\w+)'>", reader_repr).group(1)]
kwargs_sorted = ((key, kwargs[key]) for key in sorted_keys)
keys_vals.extend([f'{key}: {val!r}' for key, val in kwargs_sorted])
lines.append(' '.join(keys_vals))
msg = ['',
'************************************************************************',
'** ERROR: Unable to guess table format with the guesses listed above. **',
'** **',
'** To figure out why the table did not read, use guess=False and **',
'** fast_reader=False, along with any appropriate arguments to read(). **',
'** In particular specify the format and any known attributes like the **',
'** delimiter. **',
'************************************************************************']
lines.extend(msg)
raise core.InconsistentTableError('\n'.join(lines))
def _get_guess_kwargs_list(read_kwargs):
"""
Get the full list of reader keyword argument dicts that are the basis
for the format guessing process. The returned full list will then be:
- Filtered to be consistent with user-supplied kwargs
- Cleaned to have only unique entries
- Used one by one to try reading the input table
Note that the order of the guess list has been tuned over years of usage.
Maintainers need to be very careful about any adjustments as the
reasoning may not be immediately evident in all cases.
This list can (and usually does) include duplicates. This is a result
of the order tuning, but these duplicates get removed later.
Parameters
----------
read_kwargs : dict
User-supplied read keyword args
Returns
-------
guess_kwargs_list : list
List of read format keyword arg dicts
"""
guess_kwargs_list = []
# If the table is probably HTML based on some heuristics then start with the
# HTML reader.
if read_kwargs.pop('guess_html', None):
guess_kwargs_list.append(dict(Reader=html.HTML))
# Start with ECSV because an ECSV file will be read by Basic. This format
# has very specific header requirements and fails out quickly.
guess_kwargs_list.append(dict(Reader=ecsv.Ecsv))
# Now try readers that accept the user-supplied keyword arguments
# (actually include all here - check for compatibility of arguments later).
# FixedWidthTwoLine would also be read by Basic, so it needs to come first;
# same for RST.
for reader in (fixedwidth.FixedWidthTwoLine, rst.RST,
fastbasic.FastBasic, basic.Basic,
fastbasic.FastRdb, basic.Rdb,
fastbasic.FastTab, basic.Tab,
cds.Cds, mrt.Mrt, daophot.Daophot, sextractor.SExtractor,
ipac.Ipac, latex.Latex, latex.AASTex):
guess_kwargs_list.append(dict(Reader=reader))
# Cycle through the basic-style readers using all combinations of delimiter
# and quotechar.
for Reader in (fastbasic.FastCommentedHeader, basic.CommentedHeader,
fastbasic.FastBasic, basic.Basic,
fastbasic.FastNoHeader, basic.NoHeader):
for delimiter in ("|", ",", " ", r"\s"):
for quotechar in ('"', "'"):
guess_kwargs_list.append(dict(
Reader=Reader, delimiter=delimiter, quotechar=quotechar))
return guess_kwargs_list
def _read_in_chunks(table, **kwargs):
"""
For fast_reader read the ``table`` in chunks and vstack to create
a single table, OR return a generator of chunk tables.
"""
fast_reader = kwargs['fast_reader']
chunk_size = fast_reader.pop('chunk_size')
chunk_generator = fast_reader.pop('chunk_generator', False)
fast_reader['parallel'] = False # No parallel with chunks
tbl_chunks = _read_in_chunks_generator(table, chunk_size, **kwargs)
if chunk_generator:
return tbl_chunks
tbl0 = next(tbl_chunks)
masked = tbl0.masked
# Numpy won't allow resizing the original so make a copy here.
out_cols = {col.name: col.data.copy() for col in tbl0.itercols()}
str_kinds = ('S', 'U')
for tbl in tbl_chunks:
masked |= tbl.masked
for name, col in tbl.columns.items():
# Concatenate current column data and new column data
# If one of the inputs is string-like and the other is not, then
# convert the non-string to a string. In a perfect world this would
# be handled by numpy, but as of numpy 1.13 this results in a string
# dtype that is too long (https://github.com/numpy/numpy/issues/10062).
col1, col2 = out_cols[name], col.data
if col1.dtype.kind in str_kinds and col2.dtype.kind not in str_kinds:
col2 = np.array(col2.tolist(), dtype=col1.dtype.kind)
elif col2.dtype.kind in str_kinds and col1.dtype.kind not in str_kinds:
col1 = np.array(col1.tolist(), dtype=col2.dtype.kind)
# Choose either masked or normal concatenation
concatenate = np.ma.concatenate if masked else np.concatenate
out_cols[name] = concatenate([col1, col2])
# Make final table from numpy arrays, converting dict to list
out_cols = [out_cols[name] for name in tbl0.colnames]
out = tbl0.__class__(out_cols, names=tbl0.colnames, meta=tbl0.meta,
copy=False)
return out
def _read_in_chunks_generator(table, chunk_size, **kwargs):
"""
For fast_reader read the ``table`` in chunks and return a generator
of tables for each chunk.
"""
@contextlib.contextmanager
def passthrough_fileobj(fileobj, encoding=None):
"""Stub for get_readable_fileobj, which does not seem to work in Py3
for input file-like object, see #6460"""
yield fileobj
# Set up to coerce `table` input into a readable file object by selecting
# an appropriate function.
# Convert table-as-string to a File object. Finding a newline implies
# that the string is not a filename.
if (isinstance(table, str) and ('\n' in table or '\r' in table)):
table = StringIO(table)
fileobj_context = passthrough_fileobj
elif hasattr(table, 'read') and hasattr(table, 'seek'):
fileobj_context = passthrough_fileobj
else:
# string filename or pathlib
fileobj_context = get_readable_fileobj
# Set up for iterating over chunks
kwargs['fast_reader']['return_header_chars'] = True
header = '' # Table header (up to start of data)
prev_chunk_chars = '' # Chars from previous chunk after last newline
first_chunk = True # True for the first chunk, False afterward
with fileobj_context(table, encoding=kwargs.get('encoding')) as fh:
while True:
chunk = fh.read(chunk_size)
# Got fewer chars than requested, must be end of file
final_chunk = len(chunk) < chunk_size
# If this is the last chunk and there is only whitespace then break
if final_chunk and not re.search(r'\S', chunk):
break
# Step backwards from last character in chunk and find first newline
for idx in range(len(chunk) - 1, -1, -1):
if final_chunk or chunk[idx] == '\n':
break
else:
raise ValueError('no newline found in chunk (chunk_size too small?)')
# Stick on the header to the chunk part up to (and including) the
# last newline. Make sure the small strings are concatenated first.
complete_chunk = (header + prev_chunk_chars) + chunk[:idx + 1]
prev_chunk_chars = chunk[idx + 1:]
# Now read the chunk as a complete table
tbl = read(complete_chunk, guess=False, **kwargs)
# For the first chunk pop the meta key which contains the header
# characters (everything up to the start of data) then fix kwargs
# so it doesn't return that in meta any more.
if first_chunk:
header = tbl.meta.pop('__ascii_fast_reader_header_chars__')
first_chunk = False
yield tbl
if final_chunk:
break
extra_writer_pars = ('delimiter', 'comment', 'quotechar', 'formats',
'names', 'include_names', 'exclude_names', 'strip_whitespace')
def get_writer(Writer=None, fast_writer=True, **kwargs):
"""
Initialize a table writer allowing for common customizations. Most of the
default behavior for various parameters is determined by the Writer class.
Parameters
----------
Writer : ``Writer``
Writer class (DEPRECATED). Defaults to :class:`Basic`.
delimiter : str
Column delimiter string
comment : str
String defining a comment line in table
quotechar : str
One-character string to quote fields containing special characters
formats : dict
Dictionary of format specifiers or formatting functions
strip_whitespace : bool
Strip surrounding whitespace from column values.
names : list
List of names corresponding to each data column
include_names : list
List of names to include in output.
exclude_names : list
List of names to exclude from output (applied after ``include_names``)
fast_writer : bool
Whether to use the fast Cython writer.
Returns
-------
writer : `~astropy.io.ascii.BaseReader` subclass
ASCII format writer instance
"""
if Writer is None:
Writer = basic.Basic
if 'strip_whitespace' not in kwargs:
kwargs['strip_whitespace'] = True
writer = core._get_writer(Writer, fast_writer, **kwargs)
# Handle the corner case of wanting to disable writing table comments for the
# commented_header format. This format *requires* a string for `write_comment`
# because that is used for the header column row, so it is not possible to
# set the input `comment` to None. Without adding a new keyword or assuming
# a default comment character, there is no other option but to tell user to
# simply remove the meta['comments'].
if (isinstance(writer, (basic.CommentedHeader, fastbasic.FastCommentedHeader))
and not isinstance(kwargs.get('comment', ''), str)):
raise ValueError("for the commented_header writer you must supply a string\n"
"value for the `comment` keyword. In order to disable writing\n"
"table comments use `del t.meta['comments']` prior to writing.")
return writer
def write(table, output=None, format=None, Writer=None, fast_writer=True, *,
overwrite=False, **kwargs):
# Docstring inserted below
_validate_read_write_kwargs('write', format=format, fast_writer=fast_writer,
overwrite=overwrite, **kwargs)
if isinstance(output, (str, bytes, os.PathLike)):
output = os.path.expanduser(output)
if not overwrite and os.path.lexists(output):
raise OSError(NOT_OVERWRITING_MSG.format(output))
if output is None:
output = sys.stdout
# Ensure that `table` is a Table subclass.
names = kwargs.get('names')
if isinstance(table, Table):
# While we are only going to read data from columns, we may need to
# to adjust info attributes such as format, so we make a shallow copy.
table = table.__class__(table, names=names, copy=False)
else:
# Otherwise, create a table from the input.
table = Table(table, names=names, copy=False)
table0 = table[:0].copy()
core._apply_include_exclude_names(table0, kwargs.get('names'),
kwargs.get('include_names'), kwargs.get('exclude_names'))
diff_format_with_names = set(kwargs.get('formats', [])) - set(table0.colnames)
if diff_format_with_names:
warnings.warn(
'The key(s) {} specified in the formats argument do not match a column name.'
.format(diff_format_with_names), AstropyWarning)
if table.has_mixin_columns:
fast_writer = False
Writer = _get_format_class(format, Writer, 'Writer')
writer = get_writer(Writer=Writer, fast_writer=fast_writer, **kwargs)
if writer._format_name in core.FAST_CLASSES:
writer.write(table, output)
return
lines = writer.write(table)
# Write the lines to output
outstr = os.linesep.join(lines)
if not hasattr(output, 'write'):
# NOTE: we need to specify newline='', otherwise the default
# behavior is for Python to translate \r\n (which we write because
# of os.linesep) into \r\r\n. Specifying newline='' disables any
# auto-translation.
output = open(output, 'w', newline='')
output.write(outstr)
output.write(os.linesep)
output.close()
else:
output.write(outstr)
output.write(os.linesep)
write.__doc__ = core.WRITE_DOCSTRING
def get_read_trace():
"""
Return a traceback of the attempted read formats for the last call to
`~astropy.io.ascii.read` where guessing was enabled. This is primarily for
debugging.
The return value is a list of dicts, where each dict includes the keyword
args ``kwargs`` used in the read call and the returned ``status``.
Returns
-------
trace : list of dict
Ordered list of format guesses and status
"""
return copy.deepcopy(_read_trace)
|
f32408958614a66f9b6dfbce30ea352f819b54ce0713398a74ee520ff08a7a27 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
An extensible ASCII table reader and writer.
Classes to read DAOphot table format
:Copyright: Smithsonian Astrophysical Observatory (2011)
:Author: Tom Aldcroft ([email protected])
"""
import re
import numpy as np
import itertools as itt
from collections import defaultdict, OrderedDict
from . import core
from . import fixedwidth
from .misc import first_true_index, first_false_index, groupmore
class DaophotHeader(core.BaseHeader):
"""
Read the header from a file produced by the IRAF DAOphot routine.
"""
comment = r'\s*#K'
# Regex for extracting the format strings
re_format = re.compile(r'%-?(\d+)\.?\d?[sdfg]')
re_header_keyword = re.compile(r'[#]K'
r'\s+ (?P<name> \w+)'
r'\s* = (?P<stuff> .+) $',
re.VERBOSE)
aperture_values = ()
def __init__(self):
core.BaseHeader.__init__(self)
def parse_col_defs(self, grouped_lines_dict):
"""
Parse a series of column definition lines like below. There may be several
such blocks in a single file (where continuation characters have already been
stripped).
#N ID XCENTER YCENTER MAG MERR MSKY NITER
#U ## pixels pixels magnitudes magnitudes counts ##
#F %-9d %-10.3f %-10.3f %-12.3f %-14.3f %-15.7g %-6d
"""
line_ids = ('#N', '#U', '#F')
coldef_dict = defaultdict(list)
# Function to strip identifier lines
stripper = lambda s: s[2:].strip(' \\')
for defblock in zip(*map(grouped_lines_dict.get, line_ids)):
for key, line in zip(line_ids, map(stripper, defblock)):
coldef_dict[key].append(line.split())
# Save the original columns so we can use it later to reconstruct the
# original header for writing
if self.data.is_multiline:
# Database contains multi-aperture data.
# Autogen column names, units, formats from last row of column headers
last_names, last_units, last_formats = list(zip(*map(coldef_dict.get, line_ids)))[-1]
N_multiline = len(self.data.first_block)
for i in np.arange(1, N_multiline + 1).astype('U2'):
# extra column names eg. RAPERT2, SUM2 etc...
extended_names = list(map(''.join, zip(last_names, itt.repeat(i))))
if i == '1': # Enumerate the names starting at 1
coldef_dict['#N'][-1] = extended_names
else:
coldef_dict['#N'].append(extended_names)
coldef_dict['#U'].append(last_units)
coldef_dict['#F'].append(last_formats)
# Get column widths from column format specifiers
get_col_width = lambda s: int(self.re_format.search(s).groups()[0])
col_widths = [[get_col_width(f) for f in formats]
for formats in coldef_dict['#F']]
# original data format might be shorter than 80 characters and filled with spaces
row_widths = np.fromiter(map(sum, col_widths), int)
row_short = Daophot.table_width - row_widths
# fix last column widths
for w, r in zip(col_widths, row_short):
w[-1] += r
self.col_widths = col_widths
# merge the multi-line header data into single line data
coldef_dict = {k: sum(v, []) for (k, v) in coldef_dict.items()}
return coldef_dict
def update_meta(self, lines, meta):
"""
Extract table-level keywords for DAOphot table. These are indicated by
a leading '#K ' prefix.
"""
table_meta = meta['table']
# self.lines = self.get_header_lines(lines)
Nlines = len(self.lines)
if Nlines > 0:
# Group the header lines according to their line identifiers (#K,
# #N, #U, #F or just # (spacer line)) function that grabs the line
# identifier
get_line_id = lambda s: s.split(None, 1)[0]
# Group lines by the line identifier ('#N', '#U', '#F', '#K') and
# capture line index
gid, groups = zip(*groupmore(get_line_id, self.lines, range(Nlines)))
# Groups of lines and their indices
grouped_lines, gix = zip(*groups)
# Dict of line groups keyed by line identifiers
grouped_lines_dict = dict(zip(gid, grouped_lines))
# Update the table_meta keywords if necessary
if '#K' in grouped_lines_dict:
keywords = OrderedDict(map(self.extract_keyword_line, grouped_lines_dict['#K']))
table_meta['keywords'] = keywords
coldef_dict = self.parse_col_defs(grouped_lines_dict)
line_ids = ('#N', '#U', '#F')
for name, unit, fmt in zip(*map(coldef_dict.get, line_ids)):
meta['cols'][name] = {'unit': unit,
'format': fmt}
self.meta = meta
self.names = coldef_dict['#N']
def extract_keyword_line(self, line):
"""
Extract info from a header keyword line (#K)
"""
m = self.re_header_keyword.match(line)
if m:
vals = m.group('stuff').strip().rsplit(None, 2)
keyword_dict = {'units': vals[-2],
'format': vals[-1],
'value': (vals[0] if len(vals) > 2 else "")}
return m.group('name'), keyword_dict
def get_cols(self, lines):
"""
Initialize the header Column objects from the table ``lines`` for a DAOphot
header. The DAOphot header is specialized so that we just copy the entire BaseHeader
get_cols routine and modify as needed.
Parameters
----------
lines : list
List of table lines
Returns
-------
col : list
List of table Columns
"""
if not self.names:
raise core.InconsistentTableError('No column names found in DAOphot header')
# Create the list of io.ascii column objects
self._set_cols_from_names()
# Set unit and format as needed.
coldefs = self.meta['cols']
for col in self.cols:
unit, fmt = map(coldefs[col.name].get, ('unit', 'format'))
if unit != '##':
col.unit = unit
if fmt != '##':
col.format = fmt
# Set column start and end positions.
col_width = sum(self.col_widths, [])
ends = np.cumsum(col_width)
starts = ends - col_width
for i, col in enumerate(self.cols):
col.start, col.end = starts[i], ends[i]
col.span = col.end - col.start
if hasattr(col, 'format'):
if any(x in col.format for x in 'fg'):
col.type = core.FloatType
elif 'd' in col.format:
col.type = core.IntType
elif 's' in col.format:
col.type = core.StrType
# INDEF is the missing value marker
self.data.fill_values.append(('INDEF', '0'))
class DaophotData(core.BaseData):
splitter_class = fixedwidth.FixedWidthSplitter
start_line = 0
comment = r'\s*#'
def __init__(self):
core.BaseData.__init__(self)
self.is_multiline = False
def get_data_lines(self, lines):
# Special case for multiline daophot databases. Extract the aperture
# values from the first multiline data block
if self.is_multiline:
# Grab the first column of the special block (aperture values) and
# recreate the aperture description string
aplist = next(zip(*map(str.split, self.first_block)))
self.header.aperture_values = tuple(map(float, aplist))
# Set self.data.data_lines to a slice of lines contain the data rows
core.BaseData.get_data_lines(self, lines)
class DaophotInputter(core.ContinuationLinesInputter):
continuation_char = '\\'
multiline_char = '*'
replace_char = ' '
re_multiline = re.compile(r'(#?)[^\\*#]*(\*?)(\\*) ?$')
def search_multiline(self, lines, depth=150):
"""
Search lines for special continuation character to determine number of
continued rows in a datablock. For efficiency, depth gives the upper
limit of lines to search.
"""
# The list of apertures given in the #K APERTURES keyword may not be
# complete!! This happens if the string description of the aperture
# list is longer than the field width of the #K APERTURES field. In
# this case we have to figure out how many apertures there are based on
# the file structure.
comment, special, cont = zip(*(self.re_multiline.search(line).groups()
for line in lines[:depth]))
# Find first non-comment line
data_start = first_false_index(comment)
# No data in lines[:depth]. This may be because there is no data in
# the file, or because the header is really huge. If the latter,
# increasing the search depth should help
if data_start is None:
return None, None, lines[:depth]
header_lines = lines[:data_start]
# Find first line ending on special row continuation character '*'
# indexed relative to data_start
first_special = first_true_index(special[data_start:depth])
if first_special is None: # no special lines
return None, None, header_lines
# last line ending on special '*', but not on line continue '/'
last_special = first_false_index(special[data_start + first_special:depth])
# index relative to first_special
# if first_special is None: #no end of special lines within search
# depth! increase search depth return self.search_multiline( lines,
# depth=2*depth )
# indexing now relative to line[0]
markers = np.cumsum([data_start, first_special, last_special])
# multiline portion of first data block
multiline_block = lines[markers[1]:markers[-1]]
return markers, multiline_block, header_lines
def process_lines(self, lines):
markers, block, header = self.search_multiline(lines)
self.data.is_multiline = markers is not None
self.data.markers = markers
self.data.first_block = block
# set the header lines returned by the search as a attribute of the header
self.data.header.lines = header
if markers is not None:
lines = lines[markers[0]:]
continuation_char = self.continuation_char
multiline_char = self.multiline_char
replace_char = self.replace_char
parts = []
outlines = []
for i, line in enumerate(lines):
mo = self.re_multiline.search(line)
if mo:
comment, special, cont = mo.groups()
if comment or cont:
line = line.replace(continuation_char, replace_char)
if special:
line = line.replace(multiline_char, replace_char)
if cont and not comment:
parts.append(line)
if not cont:
parts.append(line)
outlines.append(''.join(parts))
parts = []
else:
raise core.InconsistentTableError('multiline re could not match line '
'{}: {}'.format(i, line))
return outlines
class Daophot(core.BaseReader):
"""
DAOphot format table.
Example::
#K MERGERAD = INDEF scaleunit %-23.7g
#K IRAF = NOAO/IRAFV2.10EXPORT version %-23s
#K USER = davis name %-23s
#K HOST = tucana computer %-23s
#
#N ID XCENTER YCENTER MAG MERR MSKY NITER \\
#U ## pixels pixels magnitudes magnitudes counts ## \\
#F %-9d %-10.3f %-10.3f %-12.3f %-14.3f %-15.7g %-6d
#
#N SHARPNESS CHI PIER PERROR \\
#U ## ## ## perrors \\
#F %-23.3f %-12.3f %-6d %-13s
#
14 138.538 INDEF 15.461 0.003 34.85955 4 \\
-0.032 0.802 0 No_error
The keywords defined in the #K records are available via the output table
``meta`` attribute::
>>> import os
>>> from astropy.io import ascii
>>> filename = os.path.join(ascii.__path__[0], 'tests/data/daophot.dat')
>>> data = ascii.read(filename)
>>> for name, keyword in data.meta['keywords'].items():
... print(name, keyword['value'], keyword['units'], keyword['format'])
...
MERGERAD INDEF scaleunit %-23.7g
IRAF NOAO/IRAFV2.10EXPORT version %-23s
USER name %-23s
...
The unit and formats are available in the output table columns::
>>> for colname in data.colnames:
... col = data[colname]
... print(colname, col.unit, col.format)
...
ID None %-9d
XCENTER pixels %-10.3f
YCENTER pixels %-10.3f
...
Any column values of INDEF are interpreted as a missing value and will be
masked out in the resultant table.
In case of multi-aperture daophot files containing repeated entries for the last
row of fields, extra unique column names will be created by suffixing
corresponding field names with numbers starting from 2 to N (where N is the
total number of apertures).
For example,
first aperture radius will be RAPERT and corresponding magnitude will be MAG,
second aperture radius will be RAPERT2 and corresponding magnitude will be MAG2,
third aperture radius will be RAPERT3 and corresponding magnitude will be MAG3,
and so on.
"""
_format_name = 'daophot'
_io_registry_format_aliases = ['daophot']
_io_registry_can_write = False
_description = 'IRAF DAOphot format table'
header_class = DaophotHeader
data_class = DaophotData
inputter_class = DaophotInputter
table_width = 80
def __init__(self):
core.BaseReader.__init__(self)
# The inputter needs to know about the data (see DaophotInputter.process_lines)
self.inputter.data = self.data
def write(self, table=None):
raise NotImplementedError
|
e34f42f7c6e900f584e06a09544c607143069dc67212d65202f74e5af3ed5296 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package contains functions for reading and writing HDF5 tables that are
not meant to be used directly, but instead are available as readers/writers in
`astropy.table`. See :ref:`astropy:table_io` for more details.
"""
import os
import warnings
import numpy as np
# NOTE: Do not import anything from astropy.table here.
# https://github.com/astropy/astropy/issues/6604
from astropy.utils.exceptions import AstropyUserWarning
from astropy.utils.misc import NOT_OVERWRITING_MSG
HDF5_SIGNATURE = b'\x89HDF\r\n\x1a\n'
META_KEY = '__table_column_meta__'
__all__ = ['read_table_hdf5', 'write_table_hdf5']
def meta_path(path):
return path + '.' + META_KEY
def _find_all_structured_arrays(handle):
"""
Find all structured arrays in an HDF5 file
"""
import h5py
structured_arrays = []
def append_structured_arrays(name, obj):
if isinstance(obj, h5py.Dataset) and obj.dtype.kind == 'V':
structured_arrays.append(name)
handle.visititems(append_structured_arrays)
return structured_arrays
def is_hdf5(origin, filepath, fileobj, *args, **kwargs):
if fileobj is not None:
loc = fileobj.tell()
try:
signature = fileobj.read(8)
finally:
fileobj.seek(loc)
return signature == HDF5_SIGNATURE
elif filepath is not None:
return filepath.endswith(('.hdf5', '.h5'))
try:
import h5py
except ImportError:
return False
else:
return isinstance(args[0], (h5py.File, h5py.Group, h5py.Dataset))
def read_table_hdf5(input, path=None, character_as_bytes=True):
"""
Read a Table object from an HDF5 file
This requires `h5py <http://www.h5py.org/>`_ to be installed. If more than one
table is present in the HDF5 file or group, the first table is read in and
a warning is displayed.
Parameters
----------
input : str or :class:`h5py.File` or :class:`h5py.Group` or
:class:`h5py.Dataset` If a string, the filename to read the table from.
If an h5py object, either the file or the group object to read the
table from.
path : str
The path from which to read the table inside the HDF5 file.
This should be relative to the input file or group.
character_as_bytes : bool
If `True` then Table columns are left as bytes.
If `False` then Table columns are converted to unicode.
"""
try:
import h5py
except ImportError:
raise Exception("h5py is required to read and write HDF5 files")
# This function is iterative, and only gets to writing the file when
# the input is an hdf5 Group. Moreover, the input variable is changed in
# place.
# Here, we save its value to be used at the end when the conditions are
# right.
input_save = input
if isinstance(input, (h5py.File, h5py.Group)):
# If a path was specified, follow the path
if path is not None:
try:
input = input[path]
except (KeyError, ValueError):
raise OSError(f"Path {path} does not exist")
# `input` is now either a group or a dataset. If it is a group, we
# will search for all structured arrays inside the group, and if there
# is one we can proceed otherwise an error is raised. If it is a
# dataset, we just proceed with the reading.
if isinstance(input, h5py.Group):
# Find all structured arrays in group
arrays = _find_all_structured_arrays(input)
if len(arrays) == 0:
raise ValueError(f"no table found in HDF5 group {path}")
elif len(arrays) > 0:
path = arrays[0] if path is None else path + '/' + arrays[0]
if len(arrays) > 1:
warnings.warn("path= was not specified but multiple tables"
" are present, reading in first available"
" table (path={})".format(path),
AstropyUserWarning)
return read_table_hdf5(input, path=path)
elif not isinstance(input, h5py.Dataset):
# If a file object was passed, then we need to extract the filename
# because h5py cannot properly read in file objects.
if hasattr(input, 'read'):
try:
input = input.name
except AttributeError:
raise TypeError("h5py can only open regular files")
# Open the file for reading, and recursively call read_table_hdf5 with
# the file object and the path.
f = h5py.File(input, 'r')
try:
return read_table_hdf5(f, path=path, character_as_bytes=character_as_bytes)
finally:
f.close()
# If we are here, `input` should be a Dataset object, which we can now
# convert to a Table.
# Create a Table object
from astropy.table import Table, meta, serialize
table = Table(np.array(input))
# Read the meta-data from the file. For back-compatibility, we can read
# the old file format where the serialized metadata were saved in the
# attributes of the HDF5 dataset.
# In the new format, instead, metadata are stored in a new dataset in the
# same file. This is introduced in Astropy 3.0
old_version_meta = META_KEY in input.attrs
new_version_meta = path is not None and meta_path(path) in input_save
if old_version_meta or new_version_meta:
if new_version_meta:
header = meta.get_header_from_yaml(
h.decode('utf-8') for h in input_save[meta_path(path)])
else:
# Must be old_version_meta is True. if (A or B) and not A then B is True
header = meta.get_header_from_yaml(
h.decode('utf-8') for h in input.attrs[META_KEY])
if 'meta' in list(header.keys()):
table.meta = header['meta']
header_cols = {x['name']: x for x in header['datatype']}
for col in table.columns.values():
for attr in ('description', 'format', 'unit', 'meta'):
if attr in header_cols[col.name]:
setattr(col, attr, header_cols[col.name][attr])
# Construct new table with mixins, using tbl.meta['__serialized_columns__']
# as guidance.
table = serialize._construct_mixins_from_columns(table)
else:
# Read the meta-data from the file
table.meta.update(input.attrs)
if not character_as_bytes:
table.convert_bytestring_to_unicode()
return table
def _encode_mixins(tbl):
"""Encode a Table ``tbl`` that may have mixin columns to a Table with only
astropy Columns + appropriate meta-data to allow subsequent decoding.
"""
from astropy.table import serialize
from astropy import units as u
from astropy.utils.data_info import serialize_context_as
# Convert the table to one with no mixins, only Column objects. This adds
# meta data which is extracted with meta.get_yaml_from_table.
with serialize_context_as('hdf5'):
encode_tbl = serialize.represent_mixins_as_columns(tbl)
return encode_tbl
def write_table_hdf5(table, output, path=None, compression=False,
append=False, overwrite=False, serialize_meta=False,
**create_dataset_kwargs):
"""
Write a Table object to an HDF5 file
This requires `h5py <http://www.h5py.org/>`_ to be installed.
Parameters
----------
table : `~astropy.table.Table`
Data table that is to be written to file.
output : str or :class:`h5py.File` or :class:`h5py.Group`
If a string, the filename to write the table to. If an h5py object,
either the file or the group object to write the table to.
path : str
The path to which to write the table inside the HDF5 file.
This should be relative to the input file or group.
If not specified, defaults to ``__astropy_table__``.
compression : bool or str or int
Whether to compress the table inside the HDF5 file. If set to `True`,
``'gzip'`` compression is used. If a string is specified, it should be
one of ``'gzip'``, ``'szip'``, or ``'lzf'``. If an integer is
specified (in the range 0-9), ``'gzip'`` compression is used, and the
integer denotes the compression level.
append : bool
Whether to append the table to an existing HDF5 file.
overwrite : bool
Whether to overwrite any existing file without warning.
If ``append=True`` and ``overwrite=True`` then only the dataset will be
replaced; the file/group will not be overwritten.
serialize_meta : bool
Whether to serialize rich table meta-data when writing the HDF5 file, in
particular such data required to write and read back mixin columns like
``Time``, ``SkyCoord``, or ``Quantity`` to the file.
**create_dataset_kwargs
Additional keyword arguments are passed to
``h5py.File.create_dataset()`` or ``h5py.Group.create_dataset()``.
"""
from astropy.table import meta
try:
import h5py
except ImportError:
raise Exception("h5py is required to read and write HDF5 files")
if path is None:
# table is just an arbitrary, hardcoded string here.
path = '__astropy_table__'
elif path.endswith('/'):
raise ValueError("table path should end with table name, not /")
if '/' in path:
group, name = path.rsplit('/', 1)
else:
group, name = None, path
if isinstance(output, (h5py.File, h5py.Group)):
if len(list(output.keys())) > 0 and name == '__astropy_table__':
raise ValueError("table path should always be set via the "
"path= argument when writing to existing "
"files")
elif name == '__astropy_table__':
warnings.warn("table path was not set via the path= argument; "
"using default path {}".format(path))
if group:
try:
output_group = output[group]
except (KeyError, ValueError):
output_group = output.create_group(group)
else:
output_group = output
elif isinstance(output, str):
if os.path.exists(output) and not append:
if overwrite and not append:
os.remove(output)
else:
raise OSError(NOT_OVERWRITING_MSG.format(output))
# Open the file for appending or writing
f = h5py.File(output, 'a' if append else 'w')
# Recursively call the write function
try:
return write_table_hdf5(table, f, path=path,
compression=compression, append=append,
overwrite=overwrite,
serialize_meta=serialize_meta)
finally:
f.close()
else:
raise TypeError('output should be a string or an h5py File or '
'Group object')
# Check whether table already exists
if name in output_group:
if append and overwrite:
# Delete only the dataset itself
del output_group[name]
if serialize_meta and name + '.__table_column_meta__' in output_group:
del output_group[name + '.__table_column_meta__']
else:
raise OSError(f"Table {path} already exists")
# Encode any mixin columns as plain columns + appropriate metadata
table = _encode_mixins(table)
# Table with numpy unicode strings can't be written in HDF5 so
# to write such a table a copy of table is made containing columns as
# bytestrings. Now this copy of the table can be written in HDF5.
if any(col.info.dtype.kind == 'U' for col in table.itercols()):
table = table.copy(copy_data=False)
table.convert_unicode_to_bytestring()
# Warn if information will be lost when serialize_meta=False. This is
# hardcoded to the set difference between column info attributes and what
# HDF5 can store natively (name, dtype) with no meta.
if serialize_meta is False:
for col in table.itercols():
for attr in ('unit', 'format', 'description', 'meta'):
if getattr(col.info, attr, None) not in (None, {}):
warnings.warn("table contains column(s) with defined 'unit', 'format',"
" 'description', or 'meta' info attributes. These will"
" be dropped since serialize_meta=False.",
AstropyUserWarning)
# Write the table to the file
if compression:
if compression is True:
compression = 'gzip'
dset = output_group.create_dataset(name, data=table.as_array(),
compression=compression,
**create_dataset_kwargs)
else:
dset = output_group.create_dataset(name, data=table.as_array(),
**create_dataset_kwargs)
if serialize_meta:
header_yaml = meta.get_yaml_from_table(table)
header_encoded = np.array([h.encode('utf-8') for h in header_yaml])
output_group.create_dataset(meta_path(name),
data=header_encoded)
else:
# Write the Table meta dict key:value pairs to the file as HDF5
# attributes. This works only for a limited set of scalar data types
# like numbers, strings, etc., but not any complex types. This path
# also ignores column meta like unit or format.
for key in table.meta:
val = table.meta[key]
try:
dset.attrs[key] = val
except TypeError:
warnings.warn("Attribute `{}` of type {} cannot be written to "
"HDF5 files - skipping. (Consider specifying "
"serialize_meta=True to write all meta data)".format(key, type(val)),
AstropyUserWarning)
def register_hdf5():
"""
Register HDF5 with Unified I/O.
"""
from astropy.io import registry as io_registry
from astropy.table import Table
io_registry.register_reader('hdf5', Table, read_table_hdf5)
io_registry.register_writer('hdf5', Table, write_table_hdf5)
io_registry.register_identifier('hdf5', Table, is_hdf5)
|
868efc68aa5f62dc295bfcaea9a8e8cdc9e17ce3eedf605f1dd2d244efd86134 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package contains functions for reading and writing Parquet
tables that are not meant to be used directly, but instead are
available as readers/writers in `astropy.table`. See
:ref:`astropy:table_io` for more details.
"""
import os
import warnings
import numpy as np
# NOTE: Do not import anything from astropy.table here.
# https://github.com/astropy/astropy/issues/6604
from astropy.utils.exceptions import AstropyUserWarning
from astropy.utils.misc import NOT_OVERWRITING_MSG
from astropy.utils import minversion
PARQUET_SIGNATURE = b'PAR1'
__all__ = [] # nothing is publicly scoped
def parquet_identify(origin, filepath, fileobj, *args, **kwargs):
"""Checks if input is in the Parquet format.
Parameters
----------
origin : Any
filepath : str or None
fileobj : `~pyarrow.NativeFile` or None
*args, **kwargs
Returns
-------
is_parquet : bool
True if 'fileobj' is not None and is a pyarrow file, or if
'filepath' is a string ending with '.parquet' or '.parq'.
False otherwise.
"""
if fileobj is not None:
try: # safely test if pyarrow file
pos = fileobj.tell() # store current stream position
except AttributeError:
return False
signature = fileobj.read(4) # read first 4 bytes
fileobj.seek(pos) # return to original location
return signature == PARQUET_SIGNATURE
elif filepath is not None:
return filepath.endswith(('.parquet', '.parq'))
else:
return False
def read_table_parquet(input, include_names=None, exclude_names=None,
schema_only=False, filters=None):
"""
Read a Table object from a Parquet file.
This requires `pyarrow <https://arrow.apache.org/docs/python/>`_
to be installed.
The ``filters`` parameter consists of predicates that are expressed
in disjunctive normal form (DNF), like ``[[('x', '=', 0), ...], ...]``.
DNF allows arbitrary boolean logical combinations of single column
predicates. The innermost tuples each describe a single column predicate.
The list of inner predicates is interpreted as a conjunction (AND),
forming a more selective and multiple column predicate. Finally, the most
outer list combines these filters as a disjunction (OR).
Predicates may also be passed as List[Tuple]. This form is interpreted
as a single conjunction. To express OR in predicates, one must
use the (preferred) List[List[Tuple]] notation.
Each tuple has format: (``key``, ``op``, ``value``) and compares the
``key`` with the ``value``.
The supported ``op`` are: ``=`` or ``==``, ``!=``, ``<``, ``>``, ``<=``,
``>=``, ``in`` and ``not in``. If the ``op`` is ``in`` or ``not in``, the
``value`` must be a collection such as a ``list``, a ``set`` or a
``tuple``.
Examples:
.. code-block:: python
('x', '=', 0)
('y', 'in', ['a', 'b', 'c'])
('z', 'not in', {'a','b'})
Parameters
----------
input : str or path-like or file-like object
If a string or path-like object, the filename to read the table from.
If a file-like object, the stream to read data.
include_names : list [str], optional
List of names to include in output. If not supplied, then
include all columns.
exclude_names : list [str], optional
List of names to exclude from output (applied after ``include_names``).
If not supplied then no columns are excluded.
schema_only : bool, optional
Only read the schema/metadata with table information.
filters : list [tuple] or list [list [tuple] ] or None, optional
Rows which do not match the filter predicate will be removed from
scanned data. See `pyarrow.parquet.read_table()` for details.
Returns
-------
table : `~astropy.table.Table`
Table will have zero rows and only metadata information
if schema_only is True.
"""
pa, parquet, _ = get_pyarrow()
if not isinstance(input, (str, os.PathLike)):
# The 'read' attribute is the key component of a generic
# file-like object.
if not hasattr(input, 'read'):
raise TypeError("pyarrow can only open path-like or file-like objects.")
schema = parquet.read_schema(input)
# Pyarrow stores all metadata as byte-strings, so we convert
# to UTF-8 strings here.
if schema.metadata is not None:
md = {k.decode('UTF-8'): v.decode('UTF-8') for k, v in schema.metadata.items()}
else:
md = {}
from astropy.table import Table, meta, serialize
# parse metadata from table yaml
meta_dict = {}
if 'table_meta_yaml' in md:
meta_yaml = md.pop('table_meta_yaml').split('\n')
meta_hdr = meta.get_header_from_yaml(meta_yaml)
if 'meta' in meta_hdr:
meta_dict = meta_hdr['meta']
else:
meta_hdr = None
# parse and set serialized columns
full_table_columns = {name: name for name in schema.names}
has_serialized_columns = False
if '__serialized_columns__' in meta_dict:
has_serialized_columns = True
serialized_columns = meta_dict['__serialized_columns__']
for scol in serialized_columns:
for name in _get_names(serialized_columns[scol]):
full_table_columns[name] = scol
use_names = set(full_table_columns.values())
# Apply include_names before exclude_names
if include_names is not None:
use_names.intersection_update(include_names)
if exclude_names is not None:
use_names.difference_update(exclude_names)
# Preserve column ordering via list, and use this dict trick
# to remove duplicates and preserve ordering (for mixin columns)
use_names = list(dict.fromkeys([x for x in full_table_columns.values() if x in use_names]))
# names_to_read is a list of actual serialized column names, where
# e.g. the requested name 'time' becomes ['time.jd1', 'time.jd2']
names_to_read = []
for name in use_names:
names = [n for n, col in full_table_columns.items() if name == col]
names_to_read.extend(names)
if not names_to_read:
raise ValueError("No include_names specified were found in the table.")
# We need to pop any unread serialized columns out of the meta_dict.
if has_serialized_columns:
for scol in list(meta_dict['__serialized_columns__'].keys()):
if scol not in use_names:
meta_dict['__serialized_columns__'].pop(scol)
# whether to return the whole table or a formatted empty table.
if not schema_only:
# Read the pyarrow table, specifying columns and filters.
pa_table = parquet.read_table(input, columns=names_to_read, filters=filters)
num_rows = pa_table.num_rows
else:
num_rows = 0
# Now need to convert parquet table to Astropy
dtype = []
for name in names_to_read:
# Pyarrow string and byte columns do not have native length information
# so we must determine those here.
if schema.field(name).type not in (pa.string(), pa.binary()):
# Convert the pyarrow type into a numpy dtype (which is returned
# by the to_pandas_type() method).
dtype.append(schema.field(name).type.to_pandas_dtype())
continue
# Special-case for string and binary columns
md_name = f'table::len::{name}'
if md_name in md:
# String/bytes length from header.
strlen = int(md[md_name])
elif schema_only: # Find the maximum string length.
# Choose an arbitrary string length since
# are not reading in the table.
strlen = 10
warnings.warn(f"No {md_name} found in metadata. "
f"Guessing {{strlen}} for schema.",
AstropyUserWarning)
else:
strlen = max(len(row.as_py()) for row in pa_table[name])
warnings.warn(f"No {md_name} found in metadata. "
f"Using longest string ({{strlen}} characters).",
AstropyUserWarning)
dtype.append(f'U{strlen}' if schema.field(name).type == pa.string() else f'|S{strlen}')
# Create the empty numpy record array to store the pyarrow data.
data = np.zeros(num_rows, dtype=list(zip(names_to_read, dtype)))
if not schema_only:
# Convert each column in the pyarrow table to a numpy array
for name in names_to_read:
data[name][:] = pa_table[name].to_numpy()
table = Table(data=data, meta=meta_dict)
if meta_hdr is not None:
# Set description, format, unit, meta from the column
# metadata that was serialized with the table.
header_cols = {x['name']: x for x in meta_hdr['datatype']}
for col in table.columns.values():
for attr in ('description', 'format', 'unit', 'meta'):
if attr in header_cols[col.name]:
setattr(col, attr, header_cols[col.name][attr])
# Convert all compound columns to astropy objects
# (e.g. time.jd1, time.jd2 into a single time column)
table = serialize._construct_mixins_from_columns(table)
return table
def write_table_parquet(table, output, overwrite=False):
"""
Write a Table object to a Parquet file
This requires `pyarrow <https://arrow.apache.org/docs/python/>`_
to be installed.
Parameters
----------
table : `~astropy.table.Table`
Data table that is to be written to file.
output : str or path-like
The filename to write the table to.
overwrite : bool, optional
Whether to overwrite any existing file without warning. Default `False`.
"""
from astropy.table import meta, serialize
from astropy.utils.data_info import serialize_context_as
pa, parquet, writer_version = get_pyarrow()
if not isinstance(output, (str, os.PathLike)):
raise TypeError(f'`output` should be a string or path-like, not {output}')
# Convert all compound columns into serialized column names, where
# e.g. 'time' becomes ['time.jd1', 'time.jd2'].
with serialize_context_as('parquet'):
encode_table = serialize.represent_mixins_as_columns(table)
# We store the encoded serialization metadata as a yaml string.
meta_yaml = meta.get_yaml_from_table(encode_table)
meta_yaml_str = '\n'.join(meta_yaml)
metadata = {}
for name, col in encode_table.columns.items():
# Parquet will retain the datatypes of columns, but string and
# byte column length is lost. Therefore, we special-case these
# types to record the length for precise round-tripping.
if col.dtype.type is np.str_:
metadata[f'table::len::{name}'] = str(col.dtype.itemsize//4)
elif col.dtype.type is np.bytes_:
metadata[f'table::len::{name}'] = str(col.dtype.itemsize)
metadata['table_meta_yaml'] = meta_yaml_str
# Pyarrow stores all metadata as byte strings, so we explicitly encode
# our unicode strings in metadata as UTF-8 byte strings here.
metadata_encode = {k.encode('UTF-8'): v.encode('UTF-8') for k, v in metadata.items()}
# Build the pyarrow schema by converting from the numpy dtype of each
# column to an equivalent pyarrow type with from_numpy_dtype()
type_list = [(name, pa.from_numpy_dtype(encode_table.dtype[name].type))
for name in encode_table.dtype.names]
schema = pa.schema(type_list, metadata=metadata_encode)
if os.path.exists(output):
if overwrite:
# We must remove the file prior to writing below.
os.remove(output)
else:
raise OSError(NOT_OVERWRITING_MSG.format(output))
# We use version='2.0' for full support of datatypes including uint32.
with parquet.ParquetWriter(output, schema, version=writer_version) as writer:
# Convert each Table column to a pyarrow array
arrays = [pa.array(col) for col in encode_table.itercols()]
# Create a pyarrow table from the list of arrays and the schema
pa_table = pa.Table.from_arrays(arrays, schema=schema)
# Write the pyarrow table to a file
writer.write_table(pa_table)
def _get_names(_dict):
"""Recursively find the names in a serialized column dictionary.
Parameters
----------
_dict : `dict`
Dictionary from astropy __serialized_columns__
Returns
-------
all_names : `list` [`str`]
All the column names mentioned in _dict and sub-dicts.
"""
all_names = []
for k, v in _dict.items():
if isinstance(v, dict):
all_names.extend(_get_names(v))
elif k == 'name':
all_names.append(v)
return all_names
def register_parquet():
"""
Register Parquet with Unified I/O.
"""
from astropy.io import registry as io_registry
from astropy.table import Table
io_registry.register_reader('parquet', Table, read_table_parquet)
io_registry.register_writer('parquet', Table, write_table_parquet)
io_registry.register_identifier('parquet', Table, parquet_identify)
def get_pyarrow():
try:
import pyarrow as pa
from pyarrow import parquet
except ImportError:
raise Exception("pyarrow is required to read and write parquet files")
if minversion(pa, '6.0.0'):
writer_version = '2.4'
else:
writer_version = '2.0'
return pa, parquet, writer_version
|
a170a6369d3c69b533dd4636585c24107ff64d8a76f7ce3792eff8a6eadac23b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# TODO: Test FITS parsing
# STDLIB
import io
import re
import gzip
import base64
import codecs
import urllib.request
import warnings
# THIRD-PARTY
import numpy as np
from numpy import ma
# LOCAL
from astropy.io import fits
from astropy import __version__ as astropy_version
from astropy.utils.collections import HomogeneousList
from astropy.utils.xml.writer import XMLWriter
from astropy.utils.exceptions import AstropyDeprecationWarning
from . import converters
from .exceptions import (warn_or_raise, vo_warn, vo_raise, vo_reraise,
warn_unknown_attrs, W06, W07, W08, W09, W10, W11, W12,
W13, W15, W17, W18, W19, W20, W21, W22, W26, W27, W28,
W29, W32, W33, W35, W36, W37, W38, W40, W41, W42, W43,
W44, W45, W50, W52, W53, W54, E06, E08, E09, E10, E11,
E12, E13, E15, E16, E17, E18, E19, E20, E21, E22, E23,
E25)
from . import ucd as ucd_mod
from . import util
from . import xmlutil
try:
from . import tablewriter
_has_c_tabledata_writer = True
except ImportError:
_has_c_tabledata_writer = False
__all__ = [
'Link', 'Info', 'Values', 'Field', 'Param', 'CooSys', 'TimeSys',
'FieldRef', 'ParamRef', 'Group', 'Table', 'Resource',
'VOTableFile', 'Element'
]
# The default number of rows to read in each chunk before converting
# to an array.
DEFAULT_CHUNK_SIZE = 256
RESIZE_AMOUNT = 1.5
######################################################################
# FACTORY FUNCTIONS
def _resize(masked, new_size):
"""
Masked arrays can not be resized inplace, and `np.resize` and
`ma.resize` are both incompatible with structured arrays.
Therefore, we do all this.
"""
new_array = ma.zeros((new_size,), dtype=masked.dtype)
length = min(len(masked), new_size)
new_array[:length] = masked[:length]
return new_array
def _lookup_by_attr_factory(attr, unique, iterator, element_name, doc):
"""
Creates a function useful for looking up an element by a given
attribute.
Parameters
----------
attr : str
The attribute name
unique : bool
Should be `True` if the attribute is unique and therefore this
should return only one value. Otherwise, returns a list of
values.
iterator : generator
A generator that iterates over some arbitrary set of elements
element_name : str
The XML element name of the elements being iterated over (used
for error messages only).
doc : str
A docstring to apply to the generated function.
Returns
-------
factory : function
A function that looks up an element by the given attribute.
"""
def lookup_by_attr(self, ref, before=None):
"""
Given a string *ref*, finds the first element in the iterator
where the given attribute == *ref*. If *before* is provided,
will stop searching at the object *before*. This is
important, since "forward references" are not allowed in the
VOTABLE format.
"""
for element in getattr(self, iterator)():
if element is before:
if getattr(element, attr, None) == ref:
vo_raise(
f"{element_name} references itself",
element._config, element._pos, KeyError)
break
if getattr(element, attr, None) == ref:
yield element
def lookup_by_attr_unique(self, ref, before=None):
for element in lookup_by_attr(self, ref, before=before):
return element
raise KeyError(
"No {} with {} '{}' found before the referencing {}".format(
element_name, attr, ref, element_name))
if unique:
lookup_by_attr_unique.__doc__ = doc
return lookup_by_attr_unique
else:
lookup_by_attr.__doc__ = doc
return lookup_by_attr
def _lookup_by_id_or_name_factory(iterator, element_name, doc):
"""
Like `_lookup_by_attr_factory`, but looks in both the "ID" and
"name" attributes.
"""
def lookup_by_id_or_name(self, ref, before=None):
"""
Given an key *ref*, finds the first element in the iterator
with the attribute ID == *ref* or name == *ref*. If *before*
is provided, will stop searching at the object *before*. This
is important, since "forward references" are not allowed in
the VOTABLE format.
"""
for element in getattr(self, iterator)():
if element is before:
if ref in (element.ID, element.name):
vo_raise(
f"{element_name} references itself",
element._config, element._pos, KeyError)
break
if ref in (element.ID, element.name):
return element
raise KeyError(
"No {} with ID or name '{}' found before the referencing {}".format(
element_name, ref, element_name))
lookup_by_id_or_name.__doc__ = doc
return lookup_by_id_or_name
def _get_default_unit_format(config):
"""
Get the default unit format as specified in the VOTable spec.
"""
# The unit format changed between VOTable versions 1.3 and 1.4,
# see issue #10791.
if config['version_1_4_or_later']:
return 'vounit'
else:
return 'cds'
def _get_unit_format(config):
"""
Get the unit format based on the configuration.
"""
if config.get('unit_format') is None:
format = _get_default_unit_format(config)
else:
format = config['unit_format']
return format
######################################################################
# ATTRIBUTE CHECKERS
def check_astroyear(year, field, config=None, pos=None):
"""
Raises a `~astropy.io.votable.exceptions.VOTableSpecError` if
*year* is not a valid astronomical year as defined by the VOTABLE
standard.
Parameters
----------
year : str
An astronomical year string
field : str
The name of the field this year was found in (used for error
message)
config, pos : optional
Information about the source of the value
"""
if (year is not None and
re.match(r"^[JB]?[0-9]+([.][0-9]*)?$", year) is None):
warn_or_raise(W07, W07, (field, year), config, pos)
return False
return True
def check_string(string, attr_name, config=None, pos=None):
"""
Raises a `~astropy.io.votable.exceptions.VOTableSpecError` if
*string* is not a string or Unicode string.
Parameters
----------
string : str
An astronomical year string
attr_name : str
The name of the field this year was found in (used for error
message)
config, pos : optional
Information about the source of the value
"""
if string is not None and not isinstance(string, str):
warn_or_raise(W08, W08, attr_name, config, pos)
return False
return True
def resolve_id(ID, id, config=None, pos=None):
if ID is None and id is not None:
warn_or_raise(W09, W09, (), config, pos)
return id
return ID
def check_ucd(ucd, config=None, pos=None):
"""
Warns or raises a
`~astropy.io.votable.exceptions.VOTableSpecError` if *ucd* is not
a valid `unified content descriptor`_ string as defined by the
VOTABLE standard.
Parameters
----------
ucd : str
A UCD string.
config, pos : optional
Information about the source of the value
"""
if config is None:
config = {}
if config.get('version_1_1_or_later'):
try:
ucd_mod.parse_ucd(
ucd,
check_controlled_vocabulary=config.get(
'version_1_2_or_later', False),
has_colon=config.get('version_1_2_or_later', False))
except ValueError as e:
# This weird construction is for Python 3 compatibility
if config.get('verify', 'ignore') == 'exception':
vo_raise(W06, (ucd, str(e)), config, pos)
elif config.get('verify', 'ignore') == 'warn':
vo_warn(W06, (ucd, str(e)), config, pos)
return False
else:
return False
return True
######################################################################
# PROPERTY MIXINS
class _IDProperty:
@property
def ID(self):
"""
The XML ID_ of the element. May be `None` or a string
conforming to XML ID_ syntax.
"""
return self._ID
@ID.setter
def ID(self, ID):
xmlutil.check_id(ID, 'ID', self._config, self._pos)
self._ID = ID
@ID.deleter
def ID(self):
self._ID = None
class _NameProperty:
@property
def name(self):
"""An optional name for the element."""
return self._name
@name.setter
def name(self, name):
xmlutil.check_token(name, 'name', self._config, self._pos)
self._name = name
@name.deleter
def name(self):
self._name = None
class _XtypeProperty:
@property
def xtype(self):
"""Extended data type information."""
return self._xtype
@xtype.setter
def xtype(self, xtype):
if xtype is not None and not self._config.get('version_1_2_or_later'):
warn_or_raise(
W28, W28, ('xtype', self._element_name, '1.2'),
self._config, self._pos)
check_string(xtype, 'xtype', self._config, self._pos)
self._xtype = xtype
@xtype.deleter
def xtype(self):
self._xtype = None
class _UtypeProperty:
_utype_in_v1_2 = False
@property
def utype(self):
"""The usage-specific or `unique type`_ of the element."""
return self._utype
@utype.setter
def utype(self, utype):
if (self._utype_in_v1_2 and
utype is not None and
not self._config.get('version_1_2_or_later')):
warn_or_raise(
W28, W28, ('utype', self._element_name, '1.2'),
self._config, self._pos)
check_string(utype, 'utype', self._config, self._pos)
self._utype = utype
@utype.deleter
def utype(self):
self._utype = None
class _UcdProperty:
_ucd_in_v1_2 = False
@property
def ucd(self):
"""The `unified content descriptor`_ for the element."""
return self._ucd
@ucd.setter
def ucd(self, ucd):
if ucd is not None and ucd.strip() == '':
ucd = None
if ucd is not None:
if (self._ucd_in_v1_2 and
not self._config.get('version_1_2_or_later')):
warn_or_raise(
W28, W28, ('ucd', self._element_name, '1.2'),
self._config, self._pos)
check_ucd(ucd, self._config, self._pos)
self._ucd = ucd
@ucd.deleter
def ucd(self):
self._ucd = None
class _DescriptionProperty:
@property
def description(self):
"""
An optional string describing the element. Corresponds to the
DESCRIPTION_ element.
"""
return self._description
@description.setter
def description(self, description):
self._description = description
@description.deleter
def description(self):
self._description = None
######################################################################
# ELEMENT CLASSES
class Element:
"""
A base class for all classes that represent XML elements in the
VOTABLE file.
"""
_element_name = ''
_attr_list = []
def _add_unknown_tag(self, iterator, tag, data, config, pos):
warn_or_raise(W10, W10, tag, config, pos)
def _ignore_add(self, iterator, tag, data, config, pos):
warn_unknown_attrs(tag, data.keys(), config, pos)
def _add_definitions(self, iterator, tag, data, config, pos):
if config.get('version_1_1_or_later'):
warn_or_raise(W22, W22, (), config, pos)
warn_unknown_attrs(tag, data.keys(), config, pos)
def parse(self, iterator, config):
"""
For internal use. Parse the XML content of the children of the
element.
Parameters
----------
iterator : xml iterable
An iterator over XML elements as returned by
`~astropy.utils.xml.iterparser.get_xml_iterator`.
config : dict
The configuration dictionary that affects how certain
elements are read.
Returns
-------
self : `~astropy.io.votable.tree.Element`
Returns self as a convenience.
"""
raise NotImplementedError()
def to_xml(self, w, **kwargs):
"""
For internal use. Output the element to XML.
Parameters
----------
w : astropy.utils.xml.writer.XMLWriter object
An XML writer to write to.
**kwargs : dict
Any configuration parameters to control the output.
"""
raise NotImplementedError()
class SimpleElement(Element):
"""
A base class for simple elements, such as FIELD, PARAM and INFO
that don't require any special parsing or outputting machinery.
"""
def __init__(self):
Element.__init__(self)
def __repr__(self):
buff = io.StringIO()
SimpleElement.to_xml(self, XMLWriter(buff))
return buff.getvalue().strip()
def parse(self, iterator, config):
for start, tag, data, pos in iterator:
if start and tag != self._element_name:
self._add_unknown_tag(iterator, tag, data, config, pos)
elif tag == self._element_name:
break
return self
def to_xml(self, w, **kwargs):
w.element(self._element_name,
attrib=w.object_attrs(self, self._attr_list))
class SimpleElementWithContent(SimpleElement):
"""
A base class for simple elements, such as FIELD, PARAM and INFO
that don't require any special parsing or outputting machinery.
"""
def __init__(self):
SimpleElement.__init__(self)
self._content = None
def parse(self, iterator, config):
for start, tag, data, pos in iterator:
if start and tag != self._element_name:
self._add_unknown_tag(iterator, tag, data, config, pos)
elif tag == self._element_name:
if data:
self.content = data
break
return self
def to_xml(self, w, **kwargs):
w.element(self._element_name, self._content,
attrib=w.object_attrs(self, self._attr_list))
@property
def content(self):
"""The content of the element."""
return self._content
@content.setter
def content(self, content):
check_string(content, 'content', self._config, self._pos)
self._content = content
@content.deleter
def content(self):
self._content = None
class Link(SimpleElement, _IDProperty):
"""
LINK_ elements: used to reference external documents and servers through a URI.
The keyword arguments correspond to setting members of the same
name, documented below.
"""
_attr_list = ['ID', 'content_role', 'content_type', 'title', 'value',
'href', 'action']
_element_name = 'LINK'
def __init__(self, ID=None, title=None, value=None, href=None, action=None,
id=None, config=None, pos=None, **kwargs):
if config is None:
config = {}
self._config = config
self._pos = pos
SimpleElement.__init__(self)
content_role = kwargs.get('content-role') or kwargs.get('content_role')
content_type = kwargs.get('content-type') or kwargs.get('content_type')
if 'gref' in kwargs:
warn_or_raise(W11, W11, (), config, pos)
self.ID = resolve_id(ID, id, config, pos)
self.content_role = content_role
self.content_type = content_type
self.title = title
self.value = value
self.href = href
self.action = action
warn_unknown_attrs(
'LINK', kwargs.keys(), config, pos,
['content-role', 'content_role', 'content-type', 'content_type',
'gref'])
@property
def content_role(self):
"""
Defines the MIME role of the referenced object. Must be one of:
None, 'query', 'hints', 'doc', 'location' or 'type'
"""
return self._content_role
@content_role.setter
def content_role(self, content_role):
if ((content_role == 'type' and
not self._config['version_1_3_or_later']) or
content_role not in
(None, 'query', 'hints', 'doc', 'location')):
vo_warn(W45, (content_role,), self._config, self._pos)
self._content_role = content_role
@content_role.deleter
def content_role(self):
self._content_role = None
@property
def content_type(self):
"""Defines the MIME content type of the referenced object."""
return self._content_type
@content_type.setter
def content_type(self, content_type):
xmlutil.check_mime_content_type(content_type, self._config, self._pos)
self._content_type = content_type
@content_type.deleter
def content_type(self):
self._content_type = None
@property
def href(self):
"""
A URI to an arbitrary protocol. The vo package only supports
http and anonymous ftp.
"""
return self._href
@href.setter
def href(self, href):
xmlutil.check_anyuri(href, self._config, self._pos)
self._href = href
@href.deleter
def href(self):
self._href = None
def to_table_column(self, column):
meta = {}
for key in self._attr_list:
val = getattr(self, key, None)
if val is not None:
meta[key] = val
column.meta.setdefault('links', [])
column.meta['links'].append(meta)
@classmethod
def from_table_column(cls, d):
return cls(**d)
class Info(SimpleElementWithContent, _IDProperty, _XtypeProperty,
_UtypeProperty):
"""
INFO_ elements: arbitrary key-value pairs for extensions to the standard.
The keyword arguments correspond to setting members of the same
name, documented below.
"""
_element_name = 'INFO'
_attr_list_11 = ['ID', 'name', 'value']
_attr_list_12 = _attr_list_11 + ['xtype', 'ref', 'unit', 'ucd', 'utype']
_utype_in_v1_2 = True
def __init__(self, ID=None, name=None, value=None, id=None, xtype=None,
ref=None, unit=None, ucd=None, utype=None,
config=None, pos=None, **extra):
if config is None:
config = {}
self._config = config
self._pos = pos
SimpleElementWithContent.__init__(self)
self.ID = (resolve_id(ID, id, config, pos) or
xmlutil.fix_id(name, config, pos))
self.name = name
self.value = value
self.xtype = xtype
self.ref = ref
self.unit = unit
self.ucd = ucd
self.utype = utype
if config.get('version_1_2_or_later'):
self._attr_list = self._attr_list_12
else:
self._attr_list = self._attr_list_11
if xtype is not None:
warn_unknown_attrs('INFO', ['xtype'], config, pos)
if ref is not None:
warn_unknown_attrs('INFO', ['ref'], config, pos)
if unit is not None:
warn_unknown_attrs('INFO', ['unit'], config, pos)
if ucd is not None:
warn_unknown_attrs('INFO', ['ucd'], config, pos)
if utype is not None:
warn_unknown_attrs('INFO', ['utype'], config, pos)
warn_unknown_attrs('INFO', extra.keys(), config, pos)
@property
def name(self):
"""[*required*] The key of the key-value pair."""
return self._name
@name.setter
def name(self, name):
if name is None:
warn_or_raise(W35, W35, ('name'), self._config, self._pos)
xmlutil.check_token(name, 'name', self._config, self._pos)
self._name = name
@property
def value(self):
"""
[*required*] The value of the key-value pair. (Always stored
as a string or unicode string).
"""
return self._value
@value.setter
def value(self, value):
if value is None:
warn_or_raise(W35, W35, ('value'), self._config, self._pos)
check_string(value, 'value', self._config, self._pos)
self._value = value
@property
def content(self):
"""The content inside the INFO element."""
return self._content
@content.setter
def content(self, content):
check_string(content, 'content', self._config, self._pos)
self._content = content
@content.deleter
def content(self):
self._content = None
@property
def ref(self):
"""
Refer to another INFO_ element by ID_, defined previously in
the document.
"""
return self._ref
@ref.setter
def ref(self, ref):
if ref is not None and not self._config.get('version_1_2_or_later'):
warn_or_raise(W28, W28, ('ref', 'INFO', '1.2'),
self._config, self._pos)
xmlutil.check_id(ref, 'ref', self._config, self._pos)
# TODO: actually apply the reference
# if ref is not None:
# try:
# other = self._votable.get_values_by_id(ref, before=self)
# except KeyError:
# vo_raise(
# "VALUES ref='%s', which has not already been defined." %
# self.ref, self._config, self._pos, KeyError)
# self.null = other.null
# self.type = other.type
# self.min = other.min
# self.min_inclusive = other.min_inclusive
# self.max = other.max
# self.max_inclusive = other.max_inclusive
# self._options[:] = other.options
self._ref = ref
@ref.deleter
def ref(self):
self._ref = None
@property
def unit(self):
"""A string specifying the units_ for the INFO_."""
return self._unit
@unit.setter
def unit(self, unit):
if unit is None:
self._unit = None
return
from astropy import units as u
if not self._config.get('version_1_2_or_later'):
warn_or_raise(W28, W28, ('unit', 'INFO', '1.2'),
self._config, self._pos)
# First, parse the unit in the default way, so that we can
# still emit a warning if the unit is not to spec.
default_format = _get_default_unit_format(self._config)
unit_obj = u.Unit(
unit, format=default_format, parse_strict='silent')
if isinstance(unit_obj, u.UnrecognizedUnit):
warn_or_raise(W50, W50, (unit,),
self._config, self._pos)
format = _get_unit_format(self._config)
if format != default_format:
unit_obj = u.Unit(
unit, format=format, parse_strict='silent')
self._unit = unit_obj
@unit.deleter
def unit(self):
self._unit = None
def to_xml(self, w, **kwargs):
attrib = w.object_attrs(self, self._attr_list)
if 'unit' in attrib:
attrib['unit'] = self.unit.to_string('cds')
w.element(self._element_name, self._content,
attrib=attrib)
class Values(Element, _IDProperty):
"""
VALUES_ element: used within FIELD_ and PARAM_ elements to define the domain of values.
The keyword arguments correspond to setting members of the same
name, documented below.
"""
def __init__(self, votable, field, ID=None, null=None, ref=None,
type="legal", id=None, config=None, pos=None, **extras):
if config is None:
config = {}
self._config = config
self._pos = pos
Element.__init__(self)
self._votable = votable
self._field = field
self.ID = resolve_id(ID, id, config, pos)
self.null = null
self._ref = ref
self.type = type
self.min = None
self.max = None
self.min_inclusive = True
self.max_inclusive = True
self._options = []
warn_unknown_attrs('VALUES', extras.keys(), config, pos)
def __repr__(self):
buff = io.StringIO()
self.to_xml(XMLWriter(buff))
return buff.getvalue().strip()
@property
def null(self):
"""
For integral datatypes, *null* is used to define the value
used for missing values.
"""
return self._null
@null.setter
def null(self, null):
if null is not None and isinstance(null, str):
try:
null_val = self._field.converter.parse_scalar(
null, self._config, self._pos)[0]
except Exception:
warn_or_raise(W36, W36, null, self._config, self._pos)
null_val = self._field.converter.parse_scalar(
'0', self._config, self._pos)[0]
else:
null_val = null
self._null = null_val
@null.deleter
def null(self):
self._null = None
@property
def type(self):
"""
[*required*] Defines the applicability of the domain defined
by this VALUES_ element. Must be one of the following
strings:
- 'legal': The domain of this column applies in general to
this datatype. (default)
- 'actual': The domain of this column applies only to the
data enclosed in the parent table.
"""
return self._type
@type.setter
def type(self, type):
if type not in ('legal', 'actual'):
vo_raise(E08, type, self._config, self._pos)
self._type = type
@property
def ref(self):
"""
Refer to another VALUES_ element by ID_, defined previously in
the document, for MIN/MAX/OPTION information.
"""
return self._ref
@ref.setter
def ref(self, ref):
xmlutil.check_id(ref, 'ref', self._config, self._pos)
if ref is not None:
try:
other = self._votable.get_values_by_id(ref, before=self)
except KeyError:
warn_or_raise(W43, W43, ('VALUES', self.ref), self._config,
self._pos)
ref = None
else:
self.null = other.null
self.type = other.type
self.min = other.min
self.min_inclusive = other.min_inclusive
self.max = other.max
self.max_inclusive = other.max_inclusive
self._options[:] = other.options
self._ref = ref
@ref.deleter
def ref(self):
self._ref = None
@property
def min(self):
"""
The minimum value of the domain. See :attr:`min_inclusive`.
"""
return self._min
@min.setter
def min(self, min):
if hasattr(self._field, 'converter') and min is not None:
self._min = self._field.converter.parse(min)[0]
else:
self._min = min
@min.deleter
def min(self):
self._min = None
@property
def min_inclusive(self):
"""When `True`, the domain includes the minimum value."""
return self._min_inclusive
@min_inclusive.setter
def min_inclusive(self, inclusive):
if inclusive == 'yes':
self._min_inclusive = True
elif inclusive == 'no':
self._min_inclusive = False
else:
self._min_inclusive = bool(inclusive)
@min_inclusive.deleter
def min_inclusive(self):
self._min_inclusive = True
@property
def max(self):
"""
The maximum value of the domain. See :attr:`max_inclusive`.
"""
return self._max
@max.setter
def max(self, max):
if hasattr(self._field, 'converter') and max is not None:
self._max = self._field.converter.parse(max)[0]
else:
self._max = max
@max.deleter
def max(self):
self._max = None
@property
def max_inclusive(self):
"""When `True`, the domain includes the maximum value."""
return self._max_inclusive
@max_inclusive.setter
def max_inclusive(self, inclusive):
if inclusive == 'yes':
self._max_inclusive = True
elif inclusive == 'no':
self._max_inclusive = False
else:
self._max_inclusive = bool(inclusive)
@max_inclusive.deleter
def max_inclusive(self):
self._max_inclusive = True
@property
def options(self):
"""
A list of string key-value tuples defining other OPTION
elements for the domain. All options are ignored -- they are
stored for round-tripping purposes only.
"""
return self._options
def parse(self, iterator, config):
if self.ref is not None:
for start, tag, data, pos in iterator:
if start:
warn_or_raise(W44, W44, tag, config, pos)
else:
if tag != 'VALUES':
warn_or_raise(W44, W44, tag, config, pos)
break
else:
for start, tag, data, pos in iterator:
if start:
if tag == 'MIN':
if 'value' not in data:
vo_raise(E09, 'MIN', config, pos)
self.min = data['value']
self.min_inclusive = data.get('inclusive', 'yes')
warn_unknown_attrs(
'MIN', data.keys(), config, pos,
['value', 'inclusive'])
elif tag == 'MAX':
if 'value' not in data:
vo_raise(E09, 'MAX', config, pos)
self.max = data['value']
self.max_inclusive = data.get('inclusive', 'yes')
warn_unknown_attrs(
'MAX', data.keys(), config, pos,
['value', 'inclusive'])
elif tag == 'OPTION':
if 'value' not in data:
vo_raise(E09, 'OPTION', config, pos)
xmlutil.check_token(
data.get('name'), 'name', config, pos)
self.options.append(
(data.get('name'), data.get('value')))
warn_unknown_attrs(
'OPTION', data.keys(), config, pos,
['value', 'name'])
elif tag == 'VALUES':
break
return self
def is_defaults(self):
"""
Are the settings on this ``VALUE`` element all the same as the
XML defaults?
"""
# If there's nothing meaningful or non-default to write,
# don't write anything.
return (self.ref is None and self.null is None and self.ID is None and
self.max is None and self.min is None and self.options == [])
def to_xml(self, w, **kwargs):
def yes_no(value):
if value:
return 'yes'
return 'no'
if self.is_defaults():
return
if self.ref is not None:
w.element('VALUES', attrib=w.object_attrs(self, ['ref']))
else:
with w.tag('VALUES',
attrib=w.object_attrs(
self, ['ID', 'null', 'ref'])):
if self.min is not None:
w.element(
'MIN',
value=self._field.converter.output(self.min, False),
inclusive=yes_no(self.min_inclusive))
if self.max is not None:
w.element(
'MAX',
value=self._field.converter.output(self.max, False),
inclusive=yes_no(self.max_inclusive))
for name, value in self.options:
w.element(
'OPTION',
name=name,
value=value)
def to_table_column(self, column):
# Have the ref filled in here
meta = {}
for key in ['ID', 'null']:
val = getattr(self, key, None)
if val is not None:
meta[key] = val
if self.min is not None:
meta['min'] = {
'value': self.min,
'inclusive': self.min_inclusive}
if self.max is not None:
meta['max'] = {
'value': self.max,
'inclusive': self.max_inclusive}
if len(self.options):
meta['options'] = dict(self.options)
column.meta['values'] = meta
def from_table_column(self, column):
if column.info.meta is None or 'values' not in column.info.meta:
return
meta = column.info.meta['values']
for key in ['ID', 'null']:
val = meta.get(key, None)
if val is not None:
setattr(self, key, val)
if 'min' in meta:
self.min = meta['min']['value']
self.min_inclusive = meta['min']['inclusive']
if 'max' in meta:
self.max = meta['max']['value']
self.max_inclusive = meta['max']['inclusive']
if 'options' in meta:
self._options = list(meta['options'].items())
class Field(SimpleElement, _IDProperty, _NameProperty, _XtypeProperty,
_UtypeProperty, _UcdProperty):
"""
FIELD_ element: describes the datatype of a particular column of data.
The keyword arguments correspond to setting members of the same
name, documented below.
If *ID* is provided, it is used for the column name in the
resulting recarray of the table. If no *ID* is provided, *name*
is used instead. If neither is provided, an exception will be
raised.
"""
_attr_list_11 = ['ID', 'name', 'datatype', 'arraysize', 'ucd',
'unit', 'width', 'precision', 'utype', 'ref']
_attr_list_12 = _attr_list_11 + ['xtype']
_element_name = 'FIELD'
def __init__(self, votable, ID=None, name=None, datatype=None,
arraysize=None, ucd=None, unit=None, width=None,
precision=None, utype=None, ref=None, type=None, id=None,
xtype=None,
config=None, pos=None, **extra):
if config is None:
if hasattr(votable, '_get_version_checks'):
config = votable._get_version_checks()
else:
config = {}
self._config = config
self._pos = pos
SimpleElement.__init__(self)
if config.get('version_1_2_or_later'):
self._attr_list = self._attr_list_12
else:
self._attr_list = self._attr_list_11
if xtype is not None:
warn_unknown_attrs(self._element_name, ['xtype'], config, pos)
# TODO: REMOVE ME ----------------------------------------
# This is a terrible hack to support Simple Image Access
# Protocol results from https://astroarchive.noirlab.edu/ . It creates a field
# for the coordinate projection type of type "double", which
# actually contains character data. We have to hack the field
# to store character data, or we can't read it in. A warning
# will be raised when this happens.
if (config.get('verify', 'ignore') != 'exception' and name == 'cprojection' and
ID == 'cprojection' and ucd == 'VOX:WCS_CoordProjection' and
datatype == 'double'):
datatype = 'char'
arraysize = '3'
vo_warn(W40, (), config, pos)
# ----------------------------------------
self.description = None
self._votable = votable
self.ID = (resolve_id(ID, id, config, pos) or
xmlutil.fix_id(name, config, pos))
self.name = name
if name is None:
if (self._element_name == 'PARAM' and
not config.get('version_1_1_or_later')):
pass
else:
warn_or_raise(W15, W15, self._element_name, config, pos)
self.name = self.ID
if self._ID is None and name is None:
vo_raise(W12, self._element_name, config, pos)
datatype_mapping = {
'string': 'char',
'unicodeString': 'unicodeChar',
'int16': 'short',
'int32': 'int',
'int64': 'long',
'float32': 'float',
'float64': 'double',
# The following appear in some Vizier tables
'unsignedInt': 'long',
'unsignedShort': 'int'
}
datatype_mapping.update(config.get('datatype_mapping', {}))
if datatype in datatype_mapping:
warn_or_raise(W13, W13, (datatype, datatype_mapping[datatype]),
config, pos)
datatype = datatype_mapping[datatype]
self.ref = ref
self.datatype = datatype
self.arraysize = arraysize
self.ucd = ucd
self.unit = unit
self.width = width
self.precision = precision
self.utype = utype
self.type = type
self._links = HomogeneousList(Link)
self.title = self.name
self.values = Values(self._votable, self)
self.xtype = xtype
self._setup(config, pos)
warn_unknown_attrs(self._element_name, extra.keys(), config, pos)
@classmethod
def uniqify_names(cls, fields):
"""
Make sure that all names and titles in a list of fields are
unique, by appending numbers if necessary.
"""
unique = {}
for field in fields:
i = 2
new_id = field.ID
while new_id in unique:
new_id = field.ID + f"_{i:d}"
i += 1
if new_id != field.ID:
vo_warn(W32, (field.ID, new_id), field._config, field._pos)
field.ID = new_id
unique[new_id] = field.ID
for field in fields:
i = 2
if field.name is None:
new_name = field.ID
implicit = True
else:
new_name = field.name
implicit = False
if new_name != field.ID:
while new_name in unique:
new_name = field.name + f" {i:d}"
i += 1
if (not implicit and
new_name != field.name):
vo_warn(W33, (field.name, new_name), field._config, field._pos)
field._unique_name = new_name
unique[new_name] = field.name
def _setup(self, config, pos):
if self.values._ref is not None:
self.values.ref = self.values._ref
self.converter = converters.get_converter(self, config, pos)
@property
def datatype(self):
"""
[*required*] The datatype of the column. Valid values (as
defined by the spec) are:
'boolean', 'bit', 'unsignedByte', 'short', 'int', 'long',
'char', 'unicodeChar', 'float', 'double', 'floatComplex', or
'doubleComplex'
Many VOTABLE files in the wild use 'string' instead of 'char',
so that is also a valid option, though 'string' will always be
converted to 'char' when writing the file back out.
"""
return self._datatype
@datatype.setter
def datatype(self, datatype):
if datatype is None:
if self._config.get('version_1_1_or_later'):
warn_or_raise(E10, E10, self._element_name, self._config,
self._pos)
datatype = 'char'
if datatype not in converters.converter_mapping:
vo_raise(E06, (datatype, self.ID), self._config, self._pos)
self._datatype = datatype
@property
def precision(self):
"""
Along with :attr:`width`, defines the `numerical accuracy`_
associated with the data. These values are used to limit the
precision when writing floating point values back to the XML
file. Otherwise, it is purely informational -- the Numpy
recarray containing the data itself does not use this
information.
"""
return self._precision
@precision.setter
def precision(self, precision):
if precision is not None and not re.match(r"^[FE]?[0-9]+$", precision):
vo_raise(E11, precision, self._config, self._pos)
self._precision = precision
@precision.deleter
def precision(self):
self._precision = None
@property
def width(self):
"""
Along with :attr:`precision`, defines the `numerical
accuracy`_ associated with the data. These values are used to
limit the precision when writing floating point values back to
the XML file. Otherwise, it is purely informational -- the
Numpy recarray containing the data itself does not use this
information.
"""
return self._width
@width.setter
def width(self, width):
if width is not None:
width = int(width)
if width <= 0:
vo_raise(E12, width, self._config, self._pos)
self._width = width
@width.deleter
def width(self):
self._width = None
# ref on FIELD and PARAM behave differently than elsewhere -- here
# they're just informational, such as to refer to a coordinate
# system.
@property
def ref(self):
"""
On FIELD_ elements, ref is used only for informational
purposes, for example to refer to a COOSYS_ or TIMESYS_ element.
"""
return self._ref
@ref.setter
def ref(self, ref):
xmlutil.check_id(ref, 'ref', self._config, self._pos)
self._ref = ref
@ref.deleter
def ref(self):
self._ref = None
@property
def unit(self):
"""A string specifying the units_ for the FIELD_."""
return self._unit
@unit.setter
def unit(self, unit):
if unit is None:
self._unit = None
return
from astropy import units as u
# First, parse the unit in the default way, so that we can
# still emit a warning if the unit is not to spec.
default_format = _get_default_unit_format(self._config)
unit_obj = u.Unit(
unit, format=default_format, parse_strict='silent')
if isinstance(unit_obj, u.UnrecognizedUnit):
warn_or_raise(W50, W50, (unit,),
self._config, self._pos)
format = _get_unit_format(self._config)
if format != default_format:
unit_obj = u.Unit(
unit, format=format, parse_strict='silent')
self._unit = unit_obj
@unit.deleter
def unit(self):
self._unit = None
@property
def arraysize(self):
"""
Specifies the size of the multidimensional array if this
FIELD_ contains more than a single value.
See `multidimensional arrays`_.
"""
return self._arraysize
@arraysize.setter
def arraysize(self, arraysize):
if (arraysize is not None and
not re.match(r"^([0-9]+x)*[0-9]*[*]?(s\W)?$", arraysize)):
vo_raise(E13, arraysize, self._config, self._pos)
self._arraysize = arraysize
@arraysize.deleter
def arraysize(self):
self._arraysize = None
@property
def type(self):
"""
The type attribute on FIELD_ elements is reserved for future
extensions.
"""
return self._type
@type.setter
def type(self, type):
self._type = type
@type.deleter
def type(self):
self._type = None
@property
def values(self):
"""
A :class:`Values` instance (or `None`) defining the domain
of the column.
"""
return self._values
@values.setter
def values(self, values):
assert values is None or isinstance(values, Values)
self._values = values
@values.deleter
def values(self):
self._values = None
@property
def links(self):
"""
A list of :class:`Link` instances used to reference more
details about the meaning of the FIELD_. This is purely
informational and is not used by the `astropy.io.votable`
package.
"""
return self._links
def parse(self, iterator, config):
for start, tag, data, pos in iterator:
if start:
if tag == 'VALUES':
self.values.__init__(
self._votable, self, config=config, pos=pos, **data)
self.values.parse(iterator, config)
elif tag == 'LINK':
link = Link(config=config, pos=pos, **data)
self.links.append(link)
link.parse(iterator, config)
elif tag == 'DESCRIPTION':
warn_unknown_attrs(
'DESCRIPTION', data.keys(), config, pos)
elif tag != self._element_name:
self._add_unknown_tag(iterator, tag, data, config, pos)
else:
if tag == 'DESCRIPTION':
if self.description is not None:
warn_or_raise(
W17, W17, self._element_name, config, pos)
self.description = data or None
elif tag == self._element_name:
break
if self.description is not None:
self.title = " ".join(x.strip() for x in
self.description.splitlines())
else:
self.title = self.name
self._setup(config, pos)
return self
def to_xml(self, w, **kwargs):
attrib = w.object_attrs(self, self._attr_list)
if 'unit' in attrib:
attrib['unit'] = self.unit.to_string('cds')
with w.tag(self._element_name, attrib=attrib):
if self.description is not None:
w.element('DESCRIPTION', self.description, wrap=True)
if not self.values.is_defaults():
self.values.to_xml(w, **kwargs)
for link in self.links:
link.to_xml(w, **kwargs)
def to_table_column(self, column):
"""
Sets the attributes of a given `astropy.table.Column` instance
to match the information in this `Field`.
"""
for key in ['ucd', 'width', 'precision', 'utype', 'xtype']:
val = getattr(self, key, None)
if val is not None:
column.meta[key] = val
if not self.values.is_defaults():
self.values.to_table_column(column)
for link in self.links:
link.to_table_column(column)
if self.description is not None:
column.description = self.description
if self.unit is not None:
# TODO: Use units framework when it's available
column.unit = self.unit
if (isinstance(self.converter, converters.FloatingPoint) and
self.converter.output_format != '{!r:>}'):
column.format = self.converter.output_format
elif isinstance(self.converter, converters.Char):
column.info.meta['_votable_string_dtype'] = 'char'
elif isinstance(self.converter, converters.UnicodeChar):
column.info.meta['_votable_string_dtype'] = 'unicodeChar'
@classmethod
def from_table_column(cls, votable, column):
"""
Restores a `Field` instance from a given
`astropy.table.Column` instance.
"""
kwargs = {}
meta = column.info.meta
if meta:
for key in ['ucd', 'width', 'precision', 'utype', 'xtype']:
val = meta.get(key, None)
if val is not None:
kwargs[key] = val
# TODO: Use the unit framework when available
if column.info.unit is not None:
kwargs['unit'] = column.info.unit
kwargs['name'] = column.info.name
result = converters.table_column_to_votable_datatype(column)
kwargs.update(result)
field = cls(votable, **kwargs)
if column.info.description is not None:
field.description = column.info.description
field.values.from_table_column(column)
if meta and 'links' in meta:
for link in meta['links']:
field.links.append(Link.from_table_column(link))
# TODO: Parse format into precision and width
return field
class Param(Field):
"""
PARAM_ element: constant-valued columns in the data.
:class:`Param` objects are a subclass of :class:`Field`, and have
all of its methods and members. Additionally, it defines :attr:`value`.
"""
_attr_list_11 = Field._attr_list_11 + ['value']
_attr_list_12 = Field._attr_list_12 + ['value']
_element_name = 'PARAM'
def __init__(self, votable, ID=None, name=None, value=None, datatype=None,
arraysize=None, ucd=None, unit=None, width=None,
precision=None, utype=None, type=None, id=None, config=None,
pos=None, **extra):
self._value = value
Field.__init__(self, votable, ID=ID, name=name, datatype=datatype,
arraysize=arraysize, ucd=ucd, unit=unit,
precision=precision, utype=utype, type=type,
id=id, config=config, pos=pos, **extra)
@property
def value(self):
"""
[*required*] The constant value of the parameter. Its type is
determined by the :attr:`~Field.datatype` member.
"""
return self._value
@value.setter
def value(self, value):
if value is None:
value = ""
if isinstance(value, str):
self._value = self.converter.parse(
value, self._config, self._pos)[0]
else:
self._value = value
def _setup(self, config, pos):
Field._setup(self, config, pos)
self.value = self._value
def to_xml(self, w, **kwargs):
tmp_value = self._value
self._value = self.converter.output(tmp_value, False)
# We must always have a value
if self._value is None:
self._value = ""
Field.to_xml(self, w, **kwargs)
self._value = tmp_value
class CooSys(SimpleElement):
"""
COOSYS_ element: defines a coordinate system.
The keyword arguments correspond to setting members of the same
name, documented below.
"""
_attr_list = ['ID', 'equinox', 'epoch', 'system']
_element_name = 'COOSYS'
def __init__(self, ID=None, equinox=None, epoch=None, system=None, id=None,
config=None, pos=None, **extra):
if config is None:
config = {}
self._config = config
self._pos = pos
# COOSYS was deprecated in 1.2 but then re-instated in 1.3
if (config.get('version_1_2_or_later') and
not config.get('version_1_3_or_later')):
warn_or_raise(W27, W27, (), config, pos)
SimpleElement.__init__(self)
self.ID = resolve_id(ID, id, config, pos)
self.equinox = equinox
self.epoch = epoch
self.system = system
warn_unknown_attrs('COOSYS', extra.keys(), config, pos)
@property
def ID(self):
"""
[*required*] The XML ID of the COOSYS_ element, used for
cross-referencing. May be `None` or a string conforming to
XML ID_ syntax.
"""
return self._ID
@ID.setter
def ID(self, ID):
if self._config.get('version_1_1_or_later'):
if ID is None:
vo_raise(E15, (), self._config, self._pos)
xmlutil.check_id(ID, 'ID', self._config, self._pos)
self._ID = ID
@property
def system(self):
"""
Specifies the type of coordinate system. Valid choices are:
'eq_FK4', 'eq_FK5', 'ICRS', 'ecl_FK4', 'ecl_FK5', 'galactic',
'supergalactic', 'xy', 'barycentric', or 'geo_app'
"""
return self._system
@system.setter
def system(self, system):
if system not in ('eq_FK4', 'eq_FK5', 'ICRS', 'ecl_FK4', 'ecl_FK5',
'galactic', 'supergalactic', 'xy', 'barycentric',
'geo_app'):
warn_or_raise(E16, E16, system, self._config, self._pos)
self._system = system
@system.deleter
def system(self):
self._system = None
@property
def equinox(self):
"""
A parameter required to fix the equatorial or ecliptic systems
(as e.g. "J2000" as the default "eq_FK5" or "B1950" as the
default "eq_FK4").
"""
return self._equinox
@equinox.setter
def equinox(self, equinox):
check_astroyear(equinox, 'equinox', self._config, self._pos)
self._equinox = equinox
@equinox.deleter
def equinox(self):
self._equinox = None
@property
def epoch(self):
"""
Specifies the epoch of the positions. It must be a string
specifying an astronomical year.
"""
return self._epoch
@epoch.setter
def epoch(self, epoch):
check_astroyear(epoch, 'epoch', self._config, self._pos)
self._epoch = epoch
@epoch.deleter
def epoch(self):
self._epoch = None
class TimeSys(SimpleElement):
"""
TIMESYS_ element: defines a time system.
The keyword arguments correspond to setting members of the same
name, documented below.
"""
_attr_list = ['ID', 'timeorigin', 'timescale', 'refposition']
_element_name = 'TIMESYS'
def __init__(self, ID=None, timeorigin=None, timescale=None, refposition=None, id=None,
config=None, pos=None, **extra):
if config is None:
config = {}
self._config = config
self._pos = pos
# TIMESYS is supported starting in version 1.4
if not config['version_1_4_or_later']:
warn_or_raise(
W54, W54, config['version'], config, pos)
SimpleElement.__init__(self)
self.ID = resolve_id(ID, id, config, pos)
self.timeorigin = timeorigin
self.timescale = timescale
self.refposition = refposition
warn_unknown_attrs('TIMESYS', extra.keys(), config, pos,
['ID', 'timeorigin', 'timescale', 'refposition'])
@property
def ID(self):
"""
[*required*] The XML ID of the TIMESYS_ element, used for
cross-referencing. Must be a string conforming to
XML ID_ syntax.
"""
return self._ID
@ID.setter
def ID(self, ID):
if ID is None:
vo_raise(E22, (), self._config, self._pos)
xmlutil.check_id(ID, 'ID', self._config, self._pos)
self._ID = ID
@property
def timeorigin(self):
"""
Specifies the time origin of the time coordinate,
given as a Julian Date for the the time scale and
reference point defined. It is usually given as a
floating point literal; for convenience, the magic
strings "MJD-origin" (standing for 2400000.5) and
"JD-origin" (standing for 0) are also allowed.
The timeorigin attribute MUST be given unless the
time’s representation contains a year of a calendar
era, in which case it MUST NOT be present. In VOTables,
these representations currently are Gregorian calendar
years with xtype="timestamp", or years in the Julian
or Besselian calendar when a column has yr, a, or Ba as
its unit and no time origin is given.
"""
return self._timeorigin
@timeorigin.setter
def timeorigin(self, timeorigin):
if (timeorigin is not None and
timeorigin != 'MJD-origin' and timeorigin != 'JD-origin'):
try:
timeorigin = float(timeorigin)
except ValueError:
warn_or_raise(E23, E23, timeorigin, self._config, self._pos)
self._timeorigin = timeorigin
@timeorigin.deleter
def timeorigin(self):
self._timeorigin = None
@property
def timescale(self):
"""
[*required*] String specifying the time scale used. Values
should be taken from the IVOA timescale vocabulary (documented
at http://www.ivoa.net/rdf/timescale).
"""
return self._timescale
@timescale.setter
def timescale(self, timescale):
self._timescale = timescale
@timescale.deleter
def timescale(self):
self._timescale = None
@property
def refposition(self):
"""
[*required*] String specifying the reference position. Values
should be taken from the IVOA refposition vocabulary (documented
at http://www.ivoa.net/rdf/refposition).
"""
return self._refposition
@refposition.setter
def refposition(self, refposition):
self._refposition = refposition
@refposition.deleter
def refposition(self):
self._refposition = None
class FieldRef(SimpleElement, _UtypeProperty, _UcdProperty):
"""
FIELDref_ element: used inside of GROUP_ elements to refer to remote FIELD_ elements.
"""
_attr_list_11 = ['ref']
_attr_list_12 = _attr_list_11 + ['ucd', 'utype']
_element_name = "FIELDref"
_utype_in_v1_2 = True
_ucd_in_v1_2 = True
def __init__(self, table, ref, ucd=None, utype=None, config=None, pos=None,
**extra):
"""
*table* is the :class:`Table` object that this :class:`FieldRef`
is a member of.
*ref* is the ID to reference a :class:`Field` object defined
elsewhere.
"""
if config is None:
config = {}
self._config = config
self._pos = pos
SimpleElement.__init__(self)
self._table = table
self.ref = ref
self.ucd = ucd
self.utype = utype
if config.get('version_1_2_or_later'):
self._attr_list = self._attr_list_12
else:
self._attr_list = self._attr_list_11
if ucd is not None:
warn_unknown_attrs(self._element_name, ['ucd'], config, pos)
if utype is not None:
warn_unknown_attrs(self._element_name, ['utype'], config, pos)
@property
def ref(self):
"""The ID_ of the FIELD_ that this FIELDref_ references."""
return self._ref
@ref.setter
def ref(self, ref):
xmlutil.check_id(ref, 'ref', self._config, self._pos)
self._ref = ref
@ref.deleter
def ref(self):
self._ref = None
def get_ref(self):
"""
Lookup the :class:`Field` instance that this :class:`FieldRef`
references.
"""
for field in self._table._votable.iter_fields_and_params():
if isinstance(field, Field) and field.ID == self.ref:
return field
vo_raise(
f"No field named '{self.ref}'",
self._config, self._pos, KeyError)
class ParamRef(SimpleElement, _UtypeProperty, _UcdProperty):
"""
PARAMref_ element: used inside of GROUP_ elements to refer to remote PARAM_ elements.
The keyword arguments correspond to setting members of the same
name, documented below.
It contains the following publicly-accessible members:
*ref*: An XML ID referring to a <PARAM> element.
"""
_attr_list_11 = ['ref']
_attr_list_12 = _attr_list_11 + ['ucd', 'utype']
_element_name = "PARAMref"
_utype_in_v1_2 = True
_ucd_in_v1_2 = True
def __init__(self, table, ref, ucd=None, utype=None, config=None, pos=None):
if config is None:
config = {}
self._config = config
self._pos = pos
Element.__init__(self)
self._table = table
self.ref = ref
self.ucd = ucd
self.utype = utype
if config.get('version_1_2_or_later'):
self._attr_list = self._attr_list_12
else:
self._attr_list = self._attr_list_11
if ucd is not None:
warn_unknown_attrs(self._element_name, ['ucd'], config, pos)
if utype is not None:
warn_unknown_attrs(self._element_name, ['utype'], config, pos)
@property
def ref(self):
"""The ID_ of the PARAM_ that this PARAMref_ references."""
return self._ref
@ref.setter
def ref(self, ref):
xmlutil.check_id(ref, 'ref', self._config, self._pos)
self._ref = ref
@ref.deleter
def ref(self):
self._ref = None
def get_ref(self):
"""
Lookup the :class:`Param` instance that this :class:``PARAMref``
references.
"""
for param in self._table._votable.iter_fields_and_params():
if isinstance(param, Param) and param.ID == self.ref:
return param
vo_raise(
f"No params named '{self.ref}'",
self._config, self._pos, KeyError)
class Group(Element, _IDProperty, _NameProperty, _UtypeProperty,
_UcdProperty, _DescriptionProperty):
"""
GROUP_ element: groups FIELD_ and PARAM_ elements.
This information is currently ignored by the vo package---that is
the columns in the recarray are always flat---but the grouping
information is stored so that it can be written out again to the
XML file.
The keyword arguments correspond to setting members of the same
name, documented below.
"""
def __init__(self, table, ID=None, name=None, ref=None, ucd=None,
utype=None, id=None, config=None, pos=None, **extra):
if config is None:
config = {}
self._config = config
self._pos = pos
Element.__init__(self)
self._table = table
self.ID = (resolve_id(ID, id, config, pos)
or xmlutil.fix_id(name, config, pos))
self.name = name
self.ref = ref
self.ucd = ucd
self.utype = utype
self.description = None
self._entries = HomogeneousList(
(FieldRef, ParamRef, Group, Param))
warn_unknown_attrs('GROUP', extra.keys(), config, pos)
def __repr__(self):
return f'<GROUP>... {len(self._entries)} entries ...</GROUP>'
@property
def ref(self):
"""
Currently ignored, as it's not clear from the spec how this is
meant to work.
"""
return self._ref
@ref.setter
def ref(self, ref):
xmlutil.check_id(ref, 'ref', self._config, self._pos)
self._ref = ref
@ref.deleter
def ref(self):
self._ref = None
@property
def entries(self):
"""
[read-only] A list of members of the GROUP_. This list may
only contain objects of type :class:`Param`, :class:`Group`,
:class:`ParamRef` and :class:`FieldRef`.
"""
return self._entries
def _add_fieldref(self, iterator, tag, data, config, pos):
fieldref = FieldRef(self._table, config=config, pos=pos, **data)
self.entries.append(fieldref)
def _add_paramref(self, iterator, tag, data, config, pos):
paramref = ParamRef(self._table, config=config, pos=pos, **data)
self.entries.append(paramref)
def _add_param(self, iterator, tag, data, config, pos):
if isinstance(self._table, VOTableFile):
votable = self._table
else:
votable = self._table._votable
param = Param(votable, config=config, pos=pos, **data)
self.entries.append(param)
param.parse(iterator, config)
def _add_group(self, iterator, tag, data, config, pos):
group = Group(self._table, config=config, pos=pos, **data)
self.entries.append(group)
group.parse(iterator, config)
def parse(self, iterator, config):
tag_mapping = {
'FIELDref': self._add_fieldref,
'PARAMref': self._add_paramref,
'PARAM': self._add_param,
'GROUP': self._add_group,
'DESCRIPTION': self._ignore_add}
for start, tag, data, pos in iterator:
if start:
tag_mapping.get(tag, self._add_unknown_tag)(
iterator, tag, data, config, pos)
else:
if tag == 'DESCRIPTION':
if self.description is not None:
warn_or_raise(W17, W17, 'GROUP', config, pos)
self.description = data or None
elif tag == 'GROUP':
break
return self
def to_xml(self, w, **kwargs):
with w.tag(
'GROUP',
attrib=w.object_attrs(
self, ['ID', 'name', 'ref', 'ucd', 'utype'])):
if self.description is not None:
w.element("DESCRIPTION", self.description, wrap=True)
for entry in self.entries:
entry.to_xml(w, **kwargs)
def iter_fields_and_params(self):
"""
Recursively iterate over all :class:`Param` elements in this
:class:`Group`.
"""
for entry in self.entries:
if isinstance(entry, Param):
yield entry
elif isinstance(entry, Group):
yield from entry.iter_fields_and_params()
def iter_groups(self):
"""
Recursively iterate over all sub-:class:`Group` instances in
this :class:`Group`.
"""
for entry in self.entries:
if isinstance(entry, Group):
yield entry
yield from entry.iter_groups()
class Table(Element, _IDProperty, _NameProperty, _UcdProperty,
_DescriptionProperty):
"""
TABLE_ element: optionally contains data.
It contains the following publicly-accessible and mutable
attribute:
*array*: A Numpy masked array of the data itself, where each
row is a row of votable data, and columns are named and typed
based on the <FIELD> elements of the table. The mask is
parallel to the data array, except for variable-length fields.
For those fields, the numpy array's column type is "object"
(``"O"``), and another masked array is stored there.
If the Table contains no data, (for example, its enclosing
:class:`Resource` has :attr:`~Resource.type` == 'meta') *array*
will have zero-length.
The keyword arguments correspond to setting members of the same
name, documented below.
"""
def __init__(self, votable, ID=None, name=None, ref=None, ucd=None,
utype=None, nrows=None, id=None, config=None, pos=None,
**extra):
if config is None:
config = {}
self._config = config
self._pos = pos
self._empty = False
Element.__init__(self)
self._votable = votable
self.ID = (resolve_id(ID, id, config, pos)
or xmlutil.fix_id(name, config, pos))
self.name = name
xmlutil.check_id(ref, 'ref', config, pos)
self._ref = ref
self.ucd = ucd
self.utype = utype
if nrows is not None:
nrows = int(nrows)
if nrows < 0:
raise ValueError("'nrows' cannot be negative.")
self._nrows = nrows
self.description = None
self.format = 'tabledata'
self._fields = HomogeneousList(Field)
self._params = HomogeneousList(Param)
self._groups = HomogeneousList(Group)
self._links = HomogeneousList(Link)
self._infos = HomogeneousList(Info)
self.array = ma.array([])
warn_unknown_attrs('TABLE', extra.keys(), config, pos)
def __repr__(self):
return repr(self.to_table())
def __bytes__(self):
return bytes(self.to_table())
def __str__(self):
return str(self.to_table())
@property
def ref(self):
return self._ref
@ref.setter
def ref(self, ref):
"""
Refer to another TABLE, previously defined, by the *ref* ID_
for all metadata (FIELD_, PARAM_ etc.) information.
"""
# When the ref changes, we want to verify that it will work
# by actually going and looking for the referenced table.
# If found, set a bunch of properties in this table based
# on the other one.
xmlutil.check_id(ref, 'ref', self._config, self._pos)
if ref is not None:
try:
table = self._votable.get_table_by_id(ref, before=self)
except KeyError:
warn_or_raise(
W43, W43, ('TABLE', self.ref), self._config, self._pos)
ref = None
else:
self._fields = table.fields
self._params = table.params
self._groups = table.groups
self._links = table.links
else:
del self._fields[:]
del self._params[:]
del self._groups[:]
del self._links[:]
self._ref = ref
@ref.deleter
def ref(self):
self._ref = None
@property
def format(self):
"""
[*required*] The serialization format of the table. Must be
one of:
'tabledata' (TABLEDATA_), 'binary' (BINARY_), 'binary2' (BINARY2_)
'fits' (FITS_).
Note that the 'fits' format, since it requires an external
file, can not be written out. Any file read in with 'fits'
format will be read out, by default, in 'tabledata' format.
See :ref:`astropy:votable-serialization`.
"""
return self._format
@format.setter
def format(self, format):
format = format.lower()
if format == 'fits':
vo_raise("fits format can not be written out, only read.",
self._config, self._pos, NotImplementedError)
if format == 'binary2':
if not self._config['version_1_3_or_later']:
vo_raise(
"binary2 only supported in votable 1.3 or later",
self._config, self._pos)
elif format not in ('tabledata', 'binary'):
vo_raise(f"Invalid format '{format}'",
self._config, self._pos)
self._format = format
@property
def nrows(self):
"""
[*immutable*] The number of rows in the table, as specified in
the XML file.
"""
return self._nrows
@property
def fields(self):
"""
A list of :class:`Field` objects describing the types of each
of the data columns.
"""
return self._fields
@property
def params(self):
"""
A list of parameters (constant-valued columns) for the
table. Must contain only :class:`Param` objects.
"""
return self._params
@property
def groups(self):
"""
A list of :class:`Group` objects describing how the columns
and parameters are grouped. Currently this information is
only kept around for round-tripping and informational
purposes.
"""
return self._groups
@property
def links(self):
"""
A list of :class:`Link` objects (pointers to other documents
or servers through a URI) for the table.
"""
return self._links
@property
def infos(self):
"""
A list of :class:`Info` objects for the table. Allows for
post-operational diagnostics.
"""
return self._infos
def is_empty(self):
"""
Returns True if this table doesn't contain any real data
because it was skipped over by the parser (through use of the
``table_number`` kwarg).
"""
return self._empty
def create_arrays(self, nrows=0, config=None):
"""
Create a new array to hold the data based on the current set
of fields, and store them in the *array* and member variable.
Any data in the existing array will be lost.
*nrows*, if provided, is the number of rows to allocate.
"""
if nrows is None:
nrows = 0
fields = self.fields
if len(fields) == 0:
array = np.recarray((nrows,), dtype='O')
mask = np.zeros((nrows,), dtype='b')
else:
# for field in fields: field._setup(config)
Field.uniqify_names(fields)
dtype = []
for x in fields:
if x._unique_name == x.ID:
id = x.ID
else:
id = (x._unique_name, x.ID)
dtype.append((id, x.converter.format))
array = np.recarray((nrows,), dtype=np.dtype(dtype))
descr_mask = []
for d in array.dtype.descr:
new_type = (d[1][1] == 'O' and 'O') or 'bool'
if len(d) == 2:
descr_mask.append((d[0], new_type))
elif len(d) == 3:
descr_mask.append((d[0], new_type, d[2]))
mask = np.zeros((nrows,), dtype=descr_mask)
self.array = ma.array(array, mask=mask)
def _resize_strategy(self, size):
"""
Return a new (larger) size based on size, used for
reallocating an array when it fills up. This is in its own
function so the resizing strategy can be easily replaced.
"""
# Once we go beyond 0, make a big step -- after that use a
# factor of 1.5 to help keep memory usage compact
if size == 0:
return 512
return int(np.ceil(size * RESIZE_AMOUNT))
def _add_field(self, iterator, tag, data, config, pos):
field = Field(self._votable, config=config, pos=pos, **data)
self.fields.append(field)
field.parse(iterator, config)
def _add_param(self, iterator, tag, data, config, pos):
param = Param(self._votable, config=config, pos=pos, **data)
self.params.append(param)
param.parse(iterator, config)
def _add_group(self, iterator, tag, data, config, pos):
group = Group(self, config=config, pos=pos, **data)
self.groups.append(group)
group.parse(iterator, config)
def _add_link(self, iterator, tag, data, config, pos):
link = Link(config=config, pos=pos, **data)
self.links.append(link)
link.parse(iterator, config)
def _add_info(self, iterator, tag, data, config, pos):
if not config.get('version_1_2_or_later'):
warn_or_raise(W26, W26, ('INFO', 'TABLE', '1.2'), config, pos)
info = Info(config=config, pos=pos, **data)
self.infos.append(info)
info.parse(iterator, config)
def parse(self, iterator, config):
columns = config.get('columns')
# If we've requested to read in only a specific table, skip
# all others
table_number = config.get('table_number')
current_table_number = config.get('_current_table_number')
skip_table = False
if current_table_number is not None:
config['_current_table_number'] += 1
if (table_number is not None and
table_number != current_table_number):
skip_table = True
self._empty = True
table_id = config.get('table_id')
if table_id is not None:
if table_id != self.ID:
skip_table = True
self._empty = True
if self.ref is not None:
# This table doesn't have its own datatype descriptors, it
# just references those from another table.
# This is to call the property setter to go and get the
# referenced information
self.ref = self.ref
for start, tag, data, pos in iterator:
if start:
if tag == 'DATA':
warn_unknown_attrs(
'DATA', data.keys(), config, pos)
break
else:
if tag == 'TABLE':
return self
elif tag == 'DESCRIPTION':
if self.description is not None:
warn_or_raise(W17, W17, 'RESOURCE', config, pos)
self.description = data or None
else:
tag_mapping = {
'FIELD': self._add_field,
'PARAM': self._add_param,
'GROUP': self._add_group,
'LINK': self._add_link,
'INFO': self._add_info,
'DESCRIPTION': self._ignore_add}
for start, tag, data, pos in iterator:
if start:
if tag == 'DATA':
if len(self.fields) == 0:
warn_or_raise(E25, E25, None, config, pos)
warn_unknown_attrs(
'DATA', data.keys(), config, pos)
break
tag_mapping.get(tag, self._add_unknown_tag)(
iterator, tag, data, config, pos)
else:
if tag == 'DESCRIPTION':
if self.description is not None:
warn_or_raise(W17, W17, 'RESOURCE', config, pos)
self.description = data or None
elif tag == 'TABLE':
# For error checking purposes
Field.uniqify_names(self.fields)
# We still need to create arrays, even if the file
# contains no DATA section
self.create_arrays(nrows=0, config=config)
return self
self.create_arrays(nrows=self._nrows, config=config)
fields = self.fields
names = [x.ID for x in fields]
# Deal with a subset of the columns, if requested.
if not columns:
colnumbers = list(range(len(fields)))
else:
if isinstance(columns, str):
columns = [columns]
columns = np.asarray(columns)
if issubclass(columns.dtype.type, np.integer):
if np.any(columns < 0) or np.any(columns > len(fields)):
raise ValueError(
"Some specified column numbers out of range")
colnumbers = columns
elif issubclass(columns.dtype.type, np.character):
try:
colnumbers = [names.index(x) for x in columns]
except ValueError:
raise ValueError(
f"Columns '{columns}' not found in fields list")
else:
raise TypeError("Invalid columns list")
if (not skip_table) and (len(fields) > 0):
for start, tag, data, pos in iterator:
if start:
if tag == 'TABLEDATA':
warn_unknown_attrs(
'TABLEDATA', data.keys(), config, pos)
self.array = self._parse_tabledata(
iterator, colnumbers, fields, config)
break
elif tag == 'BINARY':
warn_unknown_attrs(
'BINARY', data.keys(), config, pos)
self.array = self._parse_binary(
1, iterator, colnumbers, fields, config, pos)
break
elif tag == 'BINARY2':
if not config['version_1_3_or_later']:
warn_or_raise(
W52, W52, config['version'], config, pos)
self.array = self._parse_binary(
2, iterator, colnumbers, fields, config, pos)
break
elif tag == 'FITS':
warn_unknown_attrs(
'FITS', data.keys(), config, pos, ['extnum'])
try:
extnum = int(data.get('extnum', 0))
if extnum < 0:
raise ValueError("'extnum' cannot be negative.")
except ValueError:
vo_raise(E17, (), config, pos)
self.array = self._parse_fits(
iterator, extnum, config)
break
else:
warn_or_raise(W37, W37, tag, config, pos)
break
for start, tag, data, pos in iterator:
if not start and tag == 'DATA':
break
for start, tag, data, pos in iterator:
if start and tag == 'INFO':
if not config.get('version_1_2_or_later'):
warn_or_raise(
W26, W26, ('INFO', 'TABLE', '1.2'), config, pos)
info = Info(config=config, pos=pos, **data)
self.infos.append(info)
info.parse(iterator, config)
elif not start and tag == 'TABLE':
break
return self
def _parse_tabledata(self, iterator, colnumbers, fields, config):
# Since we don't know the number of rows up front, we'll
# reallocate the record array to make room as we go. This
# prevents the need to scan through the XML twice. The
# allocation is by factors of 1.5.
invalid = config.get('invalid', 'exception')
# Need to have only one reference so that we can resize the
# array
array = self.array
del self.array
parsers = [field.converter.parse for field in fields]
binparsers = [field.converter.binparse for field in fields]
numrows = 0
alloc_rows = len(array)
colnumbers_bits = [i in colnumbers for i in range(len(fields))]
row_default = [x.converter.default for x in fields]
mask_default = [True] * len(fields)
array_chunk = []
mask_chunk = []
chunk_size = config.get('chunk_size', DEFAULT_CHUNK_SIZE)
for start, tag, data, pos in iterator:
if tag == 'TR':
# Now parse one row
row = row_default[:]
row_mask = mask_default[:]
i = 0
for start, tag, data, pos in iterator:
if start:
binary = (data.get('encoding', None) == 'base64')
warn_unknown_attrs(
tag, data.keys(), config, pos, ['encoding'])
else:
if tag == 'TD':
if i >= len(fields):
vo_raise(E20, len(fields), config, pos)
if colnumbers_bits[i]:
try:
if binary:
rawdata = base64.b64decode(
data.encode('ascii'))
buf = io.BytesIO(rawdata)
buf.seek(0)
try:
value, mask_value = binparsers[i](
buf.read)
except Exception as e:
vo_reraise(
e, config, pos,
"(in row {:d}, col '{}')".format(
len(array_chunk),
fields[i].ID))
else:
try:
value, mask_value = parsers[i](
data, config, pos)
except Exception as e:
vo_reraise(
e, config, pos,
"(in row {:d}, col '{}')".format(
len(array_chunk),
fields[i].ID))
except Exception as e:
if invalid == 'exception':
vo_reraise(e, config, pos)
else:
row[i] = value
row_mask[i] = mask_value
elif tag == 'TR':
break
else:
self._add_unknown_tag(
iterator, tag, data, config, pos)
i += 1
if i < len(fields):
vo_raise(E21, (i, len(fields)), config, pos)
array_chunk.append(tuple(row))
mask_chunk.append(tuple(row_mask))
if len(array_chunk) == chunk_size:
while numrows + chunk_size > alloc_rows:
alloc_rows = self._resize_strategy(alloc_rows)
if alloc_rows != len(array):
array = _resize(array, alloc_rows)
array[numrows:numrows + chunk_size] = array_chunk
array.mask[numrows:numrows + chunk_size] = mask_chunk
numrows += chunk_size
array_chunk = []
mask_chunk = []
elif not start and tag == 'TABLEDATA':
break
# Now, resize the array to the exact number of rows we need and
# put the last chunk values in there.
alloc_rows = numrows + len(array_chunk)
array = _resize(array, alloc_rows)
array[numrows:] = array_chunk
if alloc_rows != 0:
array.mask[numrows:] = mask_chunk
numrows += len(array_chunk)
if (self.nrows is not None and
self.nrows >= 0 and
self.nrows != numrows):
warn_or_raise(W18, W18, (self.nrows, numrows), config, pos)
self._nrows = numrows
return array
def _get_binary_data_stream(self, iterator, config):
have_local_stream = False
for start, tag, data, pos in iterator:
if tag == 'STREAM':
if start:
warn_unknown_attrs(
'STREAM', data.keys(), config, pos,
['type', 'href', 'actuate', 'encoding', 'expires',
'rights'])
if 'href' not in data:
have_local_stream = True
if data.get('encoding', None) != 'base64':
warn_or_raise(
W38, W38, data.get('encoding', None),
config, pos)
else:
href = data['href']
xmlutil.check_anyuri(href, config, pos)
encoding = data.get('encoding', None)
else:
buffer = data
break
if have_local_stream:
buffer = base64.b64decode(buffer.encode('ascii'))
string_io = io.BytesIO(buffer)
string_io.seek(0)
read = string_io.read
else:
if not href.startswith(('http', 'ftp', 'file')):
vo_raise(
"The vo package only supports remote data through http, " +
"ftp or file",
self._config, self._pos, NotImplementedError)
fd = urllib.request.urlopen(href)
if encoding is not None:
if encoding == 'gzip':
fd = gzip.GzipFile(href, 'rb', fileobj=fd)
elif encoding == 'base64':
fd = codecs.EncodedFile(fd, 'base64')
else:
vo_raise(
f"Unknown encoding type '{encoding}'",
self._config, self._pos, NotImplementedError)
read = fd.read
def careful_read(length):
result = read(length)
if len(result) != length:
raise EOFError
return result
return careful_read
def _parse_binary(self, mode, iterator, colnumbers, fields, config, pos):
fields = self.fields
careful_read = self._get_binary_data_stream(iterator, config)
# Need to have only one reference so that we can resize the
# array
array = self.array
del self.array
binparsers = [field.converter.binparse for field in fields]
numrows = 0
alloc_rows = len(array)
while True:
# Resize result arrays if necessary
if numrows >= alloc_rows:
alloc_rows = self._resize_strategy(alloc_rows)
array = _resize(array, alloc_rows)
row_data = []
row_mask_data = []
try:
if mode == 2:
mask_bits = careful_read(int((len(fields) + 7) / 8))
row_mask_data = list(converters.bitarray_to_bool(
mask_bits, len(fields)))
# Ignore the mask for string columns (see issue 8995)
for i, f in enumerate(fields):
if row_mask_data[i] and (f.datatype == 'char' or f.datatype == 'unicodeChar'):
row_mask_data[i] = False
for i, binparse in enumerate(binparsers):
try:
value, value_mask = binparse(careful_read)
except EOFError:
raise
except Exception as e:
vo_reraise(
e, config, pos, "(in row {:d}, col '{}')".format(
numrows, fields[i].ID))
row_data.append(value)
if mode == 1:
row_mask_data.append(value_mask)
else:
row_mask_data[i] = row_mask_data[i] or value_mask
except EOFError:
break
row = [x.converter.default for x in fields]
row_mask = [False] * len(fields)
for i in colnumbers:
row[i] = row_data[i]
row_mask[i] = row_mask_data[i]
array[numrows] = tuple(row)
array.mask[numrows] = tuple(row_mask)
numrows += 1
array = _resize(array, numrows)
return array
def _parse_fits(self, iterator, extnum, config):
for start, tag, data, pos in iterator:
if tag == 'STREAM':
if start:
warn_unknown_attrs(
'STREAM', data.keys(), config, pos,
['type', 'href', 'actuate', 'encoding', 'expires',
'rights'])
href = data['href']
encoding = data.get('encoding', None)
else:
break
if not href.startswith(('http', 'ftp', 'file')):
vo_raise(
"The vo package only supports remote data through http, "
"ftp or file",
self._config, self._pos, NotImplementedError)
fd = urllib.request.urlopen(href)
if encoding is not None:
if encoding == 'gzip':
fd = gzip.GzipFile(href, 'r', fileobj=fd)
elif encoding == 'base64':
fd = codecs.EncodedFile(fd, 'base64')
else:
vo_raise(
f"Unknown encoding type '{encoding}'",
self._config, self._pos, NotImplementedError)
hdulist = fits.open(fd)
array = hdulist[int(extnum)].data
if array.dtype != self.array.dtype:
warn_or_raise(W19, W19, (), self._config, self._pos)
return array
def to_xml(self, w, **kwargs):
specified_format = kwargs.get('tabledata_format')
if specified_format is not None:
format = specified_format
else:
format = self.format
if format == 'fits':
format = 'tabledata'
with w.tag(
'TABLE',
attrib=w.object_attrs(
self,
('ID', 'name', 'ref', 'ucd', 'utype', 'nrows'))):
if self.description is not None:
w.element("DESCRIPTION", self.description, wrap=True)
for element_set in (self.fields, self.params):
for element in element_set:
element._setup({}, None)
if self.ref is None:
for element_set in (self.fields, self.params, self.groups,
self.links):
for element in element_set:
element.to_xml(w, **kwargs)
elif kwargs['version_1_2_or_later']:
index = list(self._votable.iter_tables()).index(self)
group = Group(self, ID=f"_g{index}")
group.to_xml(w, **kwargs)
if len(self.array):
with w.tag('DATA'):
if format == 'tabledata':
self._write_tabledata(w, **kwargs)
elif format == 'binary':
self._write_binary(1, w, **kwargs)
elif format == 'binary2':
self._write_binary(2, w, **kwargs)
if kwargs['version_1_2_or_later']:
for element in self._infos:
element.to_xml(w, **kwargs)
def _write_tabledata(self, w, **kwargs):
fields = self.fields
array = self.array
with w.tag('TABLEDATA'):
w._flush()
if (_has_c_tabledata_writer and
not kwargs.get('_debug_python_based_parser')):
supports_empty_values = [
field.converter.supports_empty_values(kwargs)
for field in fields]
fields = [field.converter.output for field in fields]
indent = len(w._tags) - 1
tablewriter.write_tabledata(
w.write, array.data, array.mask, fields,
supports_empty_values, indent, 1 << 8)
else:
write = w.write
indent_spaces = w.get_indentation_spaces()
tr_start = indent_spaces + "<TR>\n"
tr_end = indent_spaces + "</TR>\n"
td = indent_spaces + " <TD>{}</TD>\n"
td_empty = indent_spaces + " <TD/>\n"
fields = [(i, field.converter.output,
field.converter.supports_empty_values(kwargs))
for i, field in enumerate(fields)]
for row in range(len(array)):
write(tr_start)
array_row = array.data[row]
mask_row = array.mask[row]
for i, output, supports_empty_values in fields:
data = array_row[i]
masked = mask_row[i]
if supports_empty_values and np.all(masked):
write(td_empty)
else:
try:
val = output(data, masked)
except Exception as e:
vo_reraise(
e,
additional="(in row {:d}, col '{}')".format(
row, self.fields[i].ID))
if len(val):
write(td.format(val))
else:
write(td_empty)
write(tr_end)
def _write_binary(self, mode, w, **kwargs):
fields = self.fields
array = self.array
if mode == 1:
tag_name = 'BINARY'
else:
tag_name = 'BINARY2'
with w.tag(tag_name):
with w.tag('STREAM', encoding='base64'):
fields_basic = [(i, field.converter.binoutput)
for (i, field) in enumerate(fields)]
data = io.BytesIO()
for row in range(len(array)):
array_row = array.data[row]
array_mask = array.mask[row]
if mode == 2:
flattened = np.array([np.all(x) for x in array_mask])
data.write(converters.bool_to_bitarray(flattened))
for i, converter in fields_basic:
try:
chunk = converter(array_row[i], array_mask[i])
assert type(chunk) == bytes
except Exception as e:
vo_reraise(
e, additional=f"(in row {row:d}, col '{fields[i].ID}')")
data.write(chunk)
w._flush()
w.write(base64.b64encode(data.getvalue()).decode('ascii'))
def to_table(self, use_names_over_ids=False):
"""
Convert this VO Table to an `astropy.table.Table` instance.
Parameters
----------
use_names_over_ids : bool, optional
When `True` use the ``name`` attributes of columns as the
names of columns in the `astropy.table.Table` instance.
Since names are not guaranteed to be unique, this may cause
some columns to be renamed by appending numbers to the end.
Otherwise (default), use the ID attributes as the column
names.
.. warning::
Variable-length array fields may not be restored
identically when round-tripping through the
`astropy.table.Table` instance.
"""
from astropy.table import Table
meta = {}
for key in ['ID', 'name', 'ref', 'ucd', 'utype', 'description']:
val = getattr(self, key, None)
if val is not None:
meta[key] = val
if use_names_over_ids:
names = [field.name for field in self.fields]
unique_names = []
for i, name in enumerate(names):
new_name = name
i = 2
while new_name in unique_names:
new_name = f'{name}{i}'
i += 1
unique_names.append(new_name)
names = unique_names
else:
names = [field.ID for field in self.fields]
table = Table(self.array, names=names, meta=meta)
for name, field in zip(names, self.fields):
column = table[name]
field.to_table_column(column)
return table
@classmethod
def from_table(cls, votable, table):
"""
Create a `Table` instance from a given `astropy.table.Table`
instance.
"""
kwargs = {}
for key in ['ID', 'name', 'ref', 'ucd', 'utype']:
val = table.meta.get(key)
if val is not None:
kwargs[key] = val
new_table = cls(votable, **kwargs)
if 'description' in table.meta:
new_table.description = table.meta['description']
for colname in table.colnames:
column = table[colname]
new_table.fields.append(Field.from_table_column(votable, column))
if table.mask is None:
new_table.array = ma.array(np.asarray(table))
else:
new_table.array = ma.array(np.asarray(table),
mask=np.asarray(table.mask))
return new_table
def iter_fields_and_params(self):
"""
Recursively iterate over all FIELD and PARAM elements in the
TABLE.
"""
yield from self.params
yield from self.fields
for group in self.groups:
yield from group.iter_fields_and_params()
get_field_by_id = _lookup_by_attr_factory(
'ID', True, 'iter_fields_and_params', 'FIELD or PARAM',
"""
Looks up a FIELD or PARAM element by the given ID.
""")
get_field_by_id_or_name = _lookup_by_id_or_name_factory(
'iter_fields_and_params', 'FIELD or PARAM',
"""
Looks up a FIELD or PARAM element by the given ID or name.
""")
get_fields_by_utype = _lookup_by_attr_factory(
'utype', False, 'iter_fields_and_params', 'FIELD or PARAM',
"""
Looks up a FIELD or PARAM element by the given utype and
returns an iterator emitting all matches.
""")
def iter_groups(self):
"""
Recursively iterate over all GROUP elements in the TABLE.
"""
for group in self.groups:
yield group
yield from group.iter_groups()
get_group_by_id = _lookup_by_attr_factory(
'ID', True, 'iter_groups', 'GROUP',
"""
Looks up a GROUP element by the given ID. Used by the group's
"ref" attribute
""")
get_groups_by_utype = _lookup_by_attr_factory(
'utype', False, 'iter_groups', 'GROUP',
"""
Looks up a GROUP element by the given utype and returns an
iterator emitting all matches.
""")
def iter_info(self):
yield from self.infos
class Resource(Element, _IDProperty, _NameProperty, _UtypeProperty,
_DescriptionProperty):
"""
RESOURCE_ element: Groups TABLE_ and RESOURCE_ elements.
The keyword arguments correspond to setting members of the same
name, documented below.
"""
def __init__(self, name=None, ID=None, utype=None, type='results',
id=None, config=None, pos=None, **kwargs):
if config is None:
config = {}
self._config = config
self._pos = pos
Element.__init__(self)
self.name = name
self.ID = resolve_id(ID, id, config, pos)
self.utype = utype
self.type = type
self._extra_attributes = kwargs
self.description = None
self._coordinate_systems = HomogeneousList(CooSys)
self._time_systems = HomogeneousList(TimeSys)
self._groups = HomogeneousList(Group)
self._params = HomogeneousList(Param)
self._infos = HomogeneousList(Info)
self._links = HomogeneousList(Link)
self._tables = HomogeneousList(Table)
self._resources = HomogeneousList(Resource)
warn_unknown_attrs('RESOURCE', kwargs.keys(), config, pos)
def __repr__(self):
buff = io.StringIO()
w = XMLWriter(buff)
w.element(
self._element_name,
attrib=w.object_attrs(self, self._attr_list))
return buff.getvalue().strip()
@property
def type(self):
"""
[*required*] The type of the resource. Must be either:
- 'results': This resource contains actual result values
(default)
- 'meta': This resource contains only datatype descriptions
(FIELD_ elements), but no actual data.
"""
return self._type
@type.setter
def type(self, type):
if type not in ('results', 'meta'):
vo_raise(E18, type, self._config, self._pos)
self._type = type
@property
def extra_attributes(self):
"""
A dictionary of string keys to string values containing any
extra attributes of the RESOURCE_ element that are not defined
in the specification. (The specification explicitly allows
for extra attributes here, but nowhere else.)
"""
return self._extra_attributes
@property
def coordinate_systems(self):
"""
A list of coordinate system definitions (COOSYS_ elements) for
the RESOURCE_. Must contain only `CooSys` objects.
"""
return self._coordinate_systems
@property
def time_systems(self):
"""
A list of time system definitions (TIMESYS_ elements) for
the RESOURCE_. Must contain only `TimeSys` objects.
"""
return self._time_systems
@property
def infos(self):
"""
A list of informational parameters (key-value pairs) for the
resource. Must only contain `Info` objects.
"""
return self._infos
@property
def groups(self):
"""
A list of groups
"""
return self._groups
@property
def params(self):
"""
A list of parameters (constant-valued columns) for the
resource. Must contain only `Param` objects.
"""
return self._params
@property
def links(self):
"""
A list of links (pointers to other documents or servers
through a URI) for the resource. Must contain only `Link`
objects.
"""
return self._links
@property
def tables(self):
"""
A list of tables in the resource. Must contain only
`Table` objects.
"""
return self._tables
@property
def resources(self):
"""
A list of nested resources inside this resource. Must contain
only `Resource` objects.
"""
return self._resources
def _add_table(self, iterator, tag, data, config, pos):
table = Table(self._votable, config=config, pos=pos, **data)
self.tables.append(table)
table.parse(iterator, config)
def _add_info(self, iterator, tag, data, config, pos):
info = Info(config=config, pos=pos, **data)
self.infos.append(info)
info.parse(iterator, config)
def _add_group(self, iterator, tag, data, config, pos):
group = Group(self, config=config, pos=pos, **data)
self.groups.append(group)
group.parse(iterator, config)
def _add_param(self, iterator, tag, data, config, pos):
param = Param(self._votable, config=config, pos=pos, **data)
self.params.append(param)
param.parse(iterator, config)
def _add_coosys(self, iterator, tag, data, config, pos):
coosys = CooSys(config=config, pos=pos, **data)
self.coordinate_systems.append(coosys)
coosys.parse(iterator, config)
def _add_timesys(self, iterator, tag, data, config, pos):
timesys = TimeSys(config=config, pos=pos, **data)
self.time_systems.append(timesys)
timesys.parse(iterator, config)
def _add_resource(self, iterator, tag, data, config, pos):
resource = Resource(config=config, pos=pos, **data)
self.resources.append(resource)
resource.parse(self._votable, iterator, config)
def _add_link(self, iterator, tag, data, config, pos):
link = Link(config=config, pos=pos, **data)
self.links.append(link)
link.parse(iterator, config)
def parse(self, votable, iterator, config):
self._votable = votable
tag_mapping = {
'TABLE': self._add_table,
'INFO': self._add_info,
'PARAM': self._add_param,
'GROUP': self._add_group,
'COOSYS': self._add_coosys,
'TIMESYS': self._add_timesys,
'RESOURCE': self._add_resource,
'LINK': self._add_link,
'DESCRIPTION': self._ignore_add
}
for start, tag, data, pos in iterator:
if start:
tag_mapping.get(tag, self._add_unknown_tag)(
iterator, tag, data, config, pos)
elif tag == 'DESCRIPTION':
if self.description is not None:
warn_or_raise(W17, W17, 'RESOURCE', config, pos)
self.description = data or None
elif tag == 'RESOURCE':
break
del self._votable
return self
def to_xml(self, w, **kwargs):
attrs = w.object_attrs(self, ('ID', 'type', 'utype'))
attrs.update(self.extra_attributes)
with w.tag('RESOURCE', attrib=attrs):
if self.description is not None:
w.element("DESCRIPTION", self.description, wrap=True)
for element_set in (self.coordinate_systems, self.time_systems,
self.params, self.infos, self.links,
self.tables, self.resources):
for element in element_set:
element.to_xml(w, **kwargs)
def iter_tables(self):
"""
Recursively iterates over all tables in the resource and
nested resources.
"""
yield from self.tables
for resource in self.resources:
yield from resource.iter_tables()
def iter_fields_and_params(self):
"""
Recursively iterates over all FIELD_ and PARAM_ elements in
the resource, its tables and nested resources.
"""
yield from self.params
for table in self.tables:
yield from table.iter_fields_and_params()
for resource in self.resources:
yield from resource.iter_fields_and_params()
def iter_coosys(self):
"""
Recursively iterates over all the COOSYS_ elements in the
resource and nested resources.
"""
yield from self.coordinate_systems
for resource in self.resources:
yield from resource.iter_coosys()
def iter_timesys(self):
"""
Recursively iterates over all the TIMESYS_ elements in the
resource and nested resources.
"""
yield from self.time_systems
for resource in self.resources:
yield from resource.iter_timesys()
def iter_info(self):
"""
Recursively iterates over all the INFO_ elements in the
resource and nested resources.
"""
yield from self.infos
for table in self.tables:
yield from table.iter_info()
for resource in self.resources:
yield from resource.iter_info()
class VOTableFile(Element, _IDProperty, _DescriptionProperty):
"""
VOTABLE_ element: represents an entire file.
The keyword arguments correspond to setting members of the same
name, documented below.
*version* is settable at construction time only, since conformance
tests for building the rest of the structure depend on it.
"""
def __init__(self, ID=None, id=None, config=None, pos=None, version="1.4"):
if config is None:
config = {}
self._config = config
self._pos = pos
Element.__init__(self)
self.ID = resolve_id(ID, id, config, pos)
self.description = None
self._coordinate_systems = HomogeneousList(CooSys)
self._time_systems = HomogeneousList(TimeSys)
self._params = HomogeneousList(Param)
self._infos = HomogeneousList(Info)
self._resources = HomogeneousList(Resource)
self._groups = HomogeneousList(Group)
version = str(version)
if version == '1.0':
warnings.warn('VOTable 1.0 support is deprecated in astropy 4.3 and will be '
'removed in a future release', AstropyDeprecationWarning)
elif (version != '1.0') and (version not in self._version_namespace_map):
allowed_from_map = "', '".join(self._version_namespace_map)
raise ValueError(f"'version' should be in ('1.0', '{allowed_from_map}').")
self._version = version
def __repr__(self):
n_tables = len(list(self.iter_tables()))
return f'<VOTABLE>... {n_tables} tables ...</VOTABLE>'
@property
def version(self):
"""
The version of the VOTable specification that the file uses.
"""
return self._version
@version.setter
def version(self, version):
version = str(version)
if version not in self._version_namespace_map:
allowed_from_map = "', '".join(self._version_namespace_map)
raise ValueError(
f"astropy.io.votable only supports VOTable versions '{allowed_from_map}'")
self._version = version
@property
def coordinate_systems(self):
"""
A list of coordinate system descriptions for the file. Must
contain only `CooSys` objects.
"""
return self._coordinate_systems
@property
def time_systems(self):
"""
A list of time system descriptions for the file. Must
contain only `TimeSys` objects.
"""
return self._time_systems
@property
def params(self):
"""
A list of parameters (constant-valued columns) that apply to
the entire file. Must contain only `Param` objects.
"""
return self._params
@property
def infos(self):
"""
A list of informational parameters (key-value pairs) for the
entire file. Must only contain `Info` objects.
"""
return self._infos
@property
def resources(self):
"""
A list of resources, in the order they appear in the file.
Must only contain `Resource` objects.
"""
return self._resources
@property
def groups(self):
"""
A list of groups, in the order they appear in the file. Only
supported as a child of the VOTABLE element in VOTable 1.2 or
later.
"""
return self._groups
def _add_param(self, iterator, tag, data, config, pos):
param = Param(self, config=config, pos=pos, **data)
self.params.append(param)
param.parse(iterator, config)
def _add_resource(self, iterator, tag, data, config, pos):
resource = Resource(config=config, pos=pos, **data)
self.resources.append(resource)
resource.parse(self, iterator, config)
def _add_coosys(self, iterator, tag, data, config, pos):
coosys = CooSys(config=config, pos=pos, **data)
self.coordinate_systems.append(coosys)
coosys.parse(iterator, config)
def _add_timesys(self, iterator, tag, data, config, pos):
timesys = TimeSys(config=config, pos=pos, **data)
self.time_systems.append(timesys)
timesys.parse(iterator, config)
def _add_info(self, iterator, tag, data, config, pos):
info = Info(config=config, pos=pos, **data)
self.infos.append(info)
info.parse(iterator, config)
def _add_group(self, iterator, tag, data, config, pos):
if not config.get('version_1_2_or_later'):
warn_or_raise(W26, W26, ('GROUP', 'VOTABLE', '1.2'), config, pos)
group = Group(self, config=config, pos=pos, **data)
self.groups.append(group)
group.parse(iterator, config)
def _get_version_checks(self):
config = {}
config['version_1_1_or_later'] = \
util.version_compare(self.version, '1.1') >= 0
config['version_1_2_or_later'] = \
util.version_compare(self.version, '1.2') >= 0
config['version_1_3_or_later'] = \
util.version_compare(self.version, '1.3') >= 0
config['version_1_4_or_later'] = \
util.version_compare(self.version, '1.4') >= 0
return config
# Map VOTable version numbers to namespace URIs and schema information.
_version_namespace_map = {
# Version 1.0 isn't well-supported, but is allowed on parse (with a warning).
# It used DTD rather than schema, so this information would not be useful.
# By omitting 1.0 from this dict we can use the keys as the list of versions
# that are allowed in various other checks.
"1.1": {
"namespace_uri": "http://www.ivoa.net/xml/VOTable/v1.1",
"schema_location_attr": "xsi:noNamespaceSchemaLocation",
"schema_location_value": "http://www.ivoa.net/xml/VOTable/v1.1"
},
"1.2": {
"namespace_uri": "http://www.ivoa.net/xml/VOTable/v1.2",
"schema_location_attr": "xsi:noNamespaceSchemaLocation",
"schema_location_value": "http://www.ivoa.net/xml/VOTable/v1.2"
},
# With 1.3 we'll be more explicit with the schema location.
# - xsi:schemaLocation uses the namespace name along with the URL
# to reference it.
# - For convenience, but somewhat confusingly, the namespace URIs
# are also usable URLs for accessing an applicable schema.
# However to avoid confusion, we'll use the explicit schema URL.
"1.3": {
"namespace_uri": "http://www.ivoa.net/xml/VOTable/v1.3",
"schema_location_attr": "xsi:schemaLocation",
"schema_location_value":
"http://www.ivoa.net/xml/VOTable/v1.3 http://www.ivoa.net/xml/VOTable/VOTable-1.3.xsd"
},
# With 1.4 namespace URIs stopped incrementing with minor version changes
# so we use the same URI as with 1.3. See this IVOA note for more info:
# http://www.ivoa.net/documents/Notes/XMLVers/20180529/
"1.4": {
"namespace_uri": "http://www.ivoa.net/xml/VOTable/v1.3",
"schema_location_attr": "xsi:schemaLocation",
"schema_location_value":
"http://www.ivoa.net/xml/VOTable/v1.3 http://www.ivoa.net/xml/VOTable/VOTable-1.4.xsd"
}
}
def parse(self, iterator, config):
config['_current_table_number'] = 0
for start, tag, data, pos in iterator:
if start:
if tag == 'xml':
pass
elif tag == 'VOTABLE':
if 'version' not in data:
warn_or_raise(W20, W20, self.version, config, pos)
config['version'] = self.version
else:
config['version'] = self._version = data['version']
if config['version'].lower().startswith('v'):
warn_or_raise(
W29, W29, config['version'], config, pos)
self._version = config['version'] = \
config['version'][1:]
if config['version'] not in self._version_namespace_map:
vo_warn(W21, config['version'], config, pos)
if 'xmlns' in data:
ns_info = self._version_namespace_map.get(config['version'], {})
correct_ns = ns_info.get('namespace_uri')
if data['xmlns'] != correct_ns:
vo_warn(W41, (correct_ns, data['xmlns']), config, pos)
else:
vo_warn(W42, (), config, pos)
break
else:
vo_raise(E19, (), config, pos)
config.update(self._get_version_checks())
tag_mapping = {
'PARAM': self._add_param,
'RESOURCE': self._add_resource,
'COOSYS': self._add_coosys,
'TIMESYS': self._add_timesys,
'INFO': self._add_info,
'DEFINITIONS': self._add_definitions,
'DESCRIPTION': self._ignore_add,
'GROUP': self._add_group}
for start, tag, data, pos in iterator:
if start:
tag_mapping.get(tag, self._add_unknown_tag)(
iterator, tag, data, config, pos)
elif tag == 'DESCRIPTION':
if self.description is not None:
warn_or_raise(W17, W17, 'VOTABLE', config, pos)
self.description = data or None
if not len(self.resources) and config['version_1_2_or_later']:
warn_or_raise(W53, W53, (), config, pos)
return self
def to_xml(self, fd, compressed=False, tabledata_format=None,
_debug_python_based_parser=False, _astropy_version=None):
"""
Write to an XML file.
Parameters
----------
fd : str or file-like
Where to write the file. If a file-like object, must be writable.
compressed : bool, optional
When `True`, write to a gzip-compressed file. (Default:
`False`)
tabledata_format : str, optional
Override the format of the table(s) data to write. Must
be one of ``tabledata`` (text representation), ``binary`` or
``binary2``. By default, use the format that was specified
in each `Table` object as it was created or read in. See
:ref:`astropy:votable-serialization`.
"""
if tabledata_format is not None:
if tabledata_format.lower() not in (
'tabledata', 'binary', 'binary2'):
raise ValueError(f"Unknown format type '{format}'")
kwargs = {
'version': self.version,
'tabledata_format':
tabledata_format,
'_debug_python_based_parser': _debug_python_based_parser,
'_group_number': 1}
kwargs.update(self._get_version_checks())
with util.convert_to_writable_filelike(
fd, compressed=compressed) as fd:
w = XMLWriter(fd)
version = self.version
if _astropy_version is None:
lib_version = astropy_version
else:
lib_version = _astropy_version
xml_header = """
<?xml version="1.0" encoding="utf-8"?>
<!-- Produced with astropy.io.votable version {lib_version}
http://www.astropy.org/ -->\n"""
w.write(xml_header.lstrip().format(**locals()))
# Build the VOTABLE tag attributes.
votable_attr = {
'version': version,
'xmlns:xsi': "http://www.w3.org/2001/XMLSchema-instance"
}
ns_info = self._version_namespace_map.get(version, {})
namespace_uri = ns_info.get('namespace_uri')
if namespace_uri:
votable_attr['xmlns'] = namespace_uri
schema_location_attr = ns_info.get('schema_location_attr')
schema_location_value = ns_info.get('schema_location_value')
if schema_location_attr and schema_location_value:
votable_attr[schema_location_attr] = schema_location_value
with w.tag('VOTABLE', votable_attr):
if self.description is not None:
w.element("DESCRIPTION", self.description, wrap=True)
element_sets = [self.coordinate_systems, self.time_systems,
self.params, self.infos, self.resources]
if kwargs['version_1_2_or_later']:
element_sets[0] = self.groups
for element_set in element_sets:
for element in element_set:
element.to_xml(w, **kwargs)
def iter_tables(self):
"""
Iterates over all tables in the VOTable file in a "flat" way,
ignoring the nesting of resources etc.
"""
for resource in self.resources:
yield from resource.iter_tables()
def get_first_table(self):
"""
Often, you know there is only one table in the file, and
that's all you need. This method returns that first table.
"""
for table in self.iter_tables():
if not table.is_empty():
return table
raise IndexError("No table found in VOTABLE file.")
get_table_by_id = _lookup_by_attr_factory(
'ID', True, 'iter_tables', 'TABLE',
"""
Looks up a TABLE_ element by the given ID. Used by the table
"ref" attribute.
""")
get_tables_by_utype = _lookup_by_attr_factory(
'utype', False, 'iter_tables', 'TABLE',
"""
Looks up a TABLE_ element by the given utype, and returns an
iterator emitting all matches.
""")
def get_table_by_index(self, idx):
"""
Get a table by its ordinal position in the file.
"""
for i, table in enumerate(self.iter_tables()):
if i == idx:
return table
raise IndexError(
f"No table at index {idx:d} found in VOTABLE file.")
def iter_fields_and_params(self):
"""
Recursively iterate over all FIELD_ and PARAM_ elements in the
VOTABLE_ file.
"""
for resource in self.resources:
yield from resource.iter_fields_and_params()
get_field_by_id = _lookup_by_attr_factory(
'ID', True, 'iter_fields_and_params', 'FIELD',
"""
Looks up a FIELD_ element by the given ID_. Used by the field's
"ref" attribute.
""")
get_fields_by_utype = _lookup_by_attr_factory(
'utype', False, 'iter_fields_and_params', 'FIELD',
"""
Looks up a FIELD_ element by the given utype and returns an
iterator emitting all matches.
""")
get_field_by_id_or_name = _lookup_by_id_or_name_factory(
'iter_fields_and_params', 'FIELD',
"""
Looks up a FIELD_ element by the given ID_ or name.
""")
def iter_values(self):
"""
Recursively iterate over all VALUES_ elements in the VOTABLE_
file.
"""
for field in self.iter_fields_and_params():
yield field.values
get_values_by_id = _lookup_by_attr_factory(
'ID', True, 'iter_values', 'VALUES',
"""
Looks up a VALUES_ element by the given ID. Used by the values
"ref" attribute.
""")
def iter_groups(self):
"""
Recursively iterate over all GROUP_ elements in the VOTABLE_
file.
"""
for table in self.iter_tables():
yield from table.iter_groups()
get_group_by_id = _lookup_by_attr_factory(
'ID', True, 'iter_groups', 'GROUP',
"""
Looks up a GROUP_ element by the given ID. Used by the group's
"ref" attribute
""")
get_groups_by_utype = _lookup_by_attr_factory(
'utype', False, 'iter_groups', 'GROUP',
"""
Looks up a GROUP_ element by the given utype and returns an
iterator emitting all matches.
""")
def iter_coosys(self):
"""
Recursively iterate over all COOSYS_ elements in the VOTABLE_
file.
"""
yield from self.coordinate_systems
for resource in self.resources:
yield from resource.iter_coosys()
get_coosys_by_id = _lookup_by_attr_factory(
'ID', True, 'iter_coosys', 'COOSYS',
"""Looks up a COOSYS_ element by the given ID.""")
def iter_timesys(self):
"""
Recursively iterate over all TIMESYS_ elements in the VOTABLE_
file.
"""
yield from self.time_systems
for resource in self.resources:
yield from resource.iter_timesys()
get_timesys_by_id = _lookup_by_attr_factory(
'ID', True, 'iter_timesys', 'TIMESYS',
"""Looks up a TIMESYS_ element by the given ID.""")
def iter_info(self):
"""
Recursively iterate over all INFO_ elements in the VOTABLE_
file.
"""
yield from self.infos
for resource in self.resources:
yield from resource.iter_info()
get_info_by_id = _lookup_by_attr_factory(
'ID', True, 'iter_info', 'INFO',
"""Looks up a INFO element by the given ID.""")
def set_all_tables_format(self, format):
"""
Set the output storage format of all tables in the file.
"""
for table in self.iter_tables():
table.format = format
@classmethod
def from_table(cls, table, table_id=None):
"""
Create a `VOTableFile` instance from a given
`astropy.table.Table` instance.
Parameters
----------
table_id : str, optional
Set the given ID attribute on the returned Table instance.
"""
votable_file = cls()
resource = Resource()
votable = Table.from_table(votable_file, table)
if table_id is not None:
votable.ID = table_id
resource.tables.append(votable)
votable_file.resources.append(resource)
return votable_file
|
cf392e0e7d9d47de3271f6372995af6361cde14b99345a8ee3d0aff5beaf4c17 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
.. _warnings:
Warnings
--------
.. note::
Most of the following warnings indicate violations of the VOTable
specification. They should be reported to the authors of the
tools that produced the VOTable file.
To control the warnings emitted, use the standard Python
:mod:`warnings` module and the ``astropy.io.votable.exceptions.conf.max_warnings``
configuration item. Most of these are of the type `VOTableSpecWarning`.
{warnings}
.. _exceptions:
Exceptions
----------
.. note::
This is a list of many of the fatal exceptions emitted by ``astropy.io.votable``
when the file does not conform to spec. Other exceptions may be
raised due to unforeseen cases or bugs in ``astropy.io.votable`` itself.
{exceptions}
"""
# STDLIB
import io
import re
from textwrap import dedent
from warnings import warn
from astropy import config as _config
from astropy.utils.exceptions import AstropyWarning
__all__ = [
'Conf', 'conf', 'warn_or_raise', 'vo_raise', 'vo_reraise', 'vo_warn',
'warn_unknown_attrs', 'parse_vowarning', 'VOWarning',
'VOTableChangeWarning', 'VOTableSpecWarning',
'UnimplementedWarning', 'IOWarning', 'VOTableSpecError']
# NOTE: Cannot put this in __init__.py due to circular import.
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy.io.votable.exceptions`.
"""
max_warnings = _config.ConfigItem(
10,
'Number of times the same type of warning is displayed '
'before being suppressed',
cfgtype='integer')
conf = Conf()
def _format_message(message, name, config=None, pos=None):
if config is None:
config = {}
if pos is None:
pos = ('?', '?')
filename = config.get('filename', '?')
return f'{filename}:{pos[0]}:{pos[1]}: {name}: {message}'
def _suppressed_warning(warning, config, stacklevel=2):
warning_class = type(warning)
config.setdefault('_warning_counts', dict()).setdefault(warning_class, 0)
config['_warning_counts'][warning_class] += 1
message_count = config['_warning_counts'][warning_class]
if message_count <= conf.max_warnings:
if message_count == conf.max_warnings:
warning.formatted_message += \
' (suppressing further warnings of this type...)'
warn(warning, stacklevel=stacklevel+1)
def warn_or_raise(warning_class, exception_class=None, args=(), config=None,
pos=None, stacklevel=1):
"""
Warn or raise an exception, depending on the verify setting.
"""
if config is None:
config = {}
# NOTE: the default here is deliberately warn rather than ignore, since
# one would expect that calling warn_or_raise without config should not
# silence the warnings.
config_value = config.get('verify', 'warn')
if config_value == 'exception':
if exception_class is None:
exception_class = warning_class
vo_raise(exception_class, args, config, pos)
elif config_value == 'warn':
vo_warn(warning_class, args, config, pos, stacklevel=stacklevel+1)
def vo_raise(exception_class, args=(), config=None, pos=None):
"""
Raise an exception, with proper position information if available.
"""
if config is None:
config = {}
raise exception_class(args, config, pos)
def vo_reraise(exc, config=None, pos=None, additional=''):
"""
Raise an exception, with proper position information if available.
Restores the original traceback of the exception, and should only
be called within an "except:" block of code.
"""
if config is None:
config = {}
message = _format_message(str(exc), exc.__class__.__name__, config, pos)
if message.split()[0] == str(exc).split()[0]:
message = str(exc)
if len(additional):
message += ' ' + additional
exc.args = (message,)
raise exc
def vo_warn(warning_class, args=(), config=None, pos=None, stacklevel=1):
"""
Warn, with proper position information if available.
"""
if config is None:
config = {}
# NOTE: the default here is deliberately warn rather than ignore, since
# one would expect that calling warn_or_raise without config should not
# silence the warnings.
if config.get('verify', 'warn') != 'ignore':
warning = warning_class(args, config, pos)
_suppressed_warning(warning, config, stacklevel=stacklevel+1)
def warn_unknown_attrs(element, attrs, config, pos, good_attr=[], stacklevel=1):
for attr in attrs:
if attr not in good_attr:
vo_warn(W48, (attr, element), config, pos, stacklevel=stacklevel+1)
_warning_pat = re.compile(
r":?(?P<nline>[0-9?]+):(?P<nchar>[0-9?]+): " +
r"((?P<warning>[WE]\d+): )?(?P<rest>.*)$")
def parse_vowarning(line):
"""
Parses the vo warning string back into its parts.
"""
result = {}
match = _warning_pat.search(line)
if match:
result['warning'] = warning = match.group('warning')
if warning is not None:
result['is_warning'] = (warning[0].upper() == 'W')
result['is_exception'] = not result['is_warning']
result['number'] = int(match.group('warning')[1:])
result['doc_url'] = f"io/votable/api_exceptions.html#{warning.lower()}"
else:
result['is_warning'] = False
result['is_exception'] = False
result['is_other'] = True
result['number'] = None
result['doc_url'] = None
try:
result['nline'] = int(match.group('nline'))
except ValueError:
result['nline'] = 0
try:
result['nchar'] = int(match.group('nchar'))
except ValueError:
result['nchar'] = 0
result['message'] = match.group('rest')
result['is_something'] = True
else:
result['warning'] = None
result['is_warning'] = False
result['is_exception'] = False
result['is_other'] = False
result['is_something'] = False
if not isinstance(line, str):
line = line.decode('utf-8')
result['message'] = line
return result
class VOWarning(AstropyWarning):
"""
The base class of all VO warnings and exceptions.
Handles the formatting of the message with a warning or exception
code, filename, line and column number.
"""
default_args = ()
message_template = ''
def __init__(self, args, config=None, pos=None):
if config is None:
config = {}
if not isinstance(args, tuple):
args = (args, )
msg = self.message_template.format(*args)
self.formatted_message = _format_message(
msg, self.__class__.__name__, config, pos)
Warning.__init__(self, self.formatted_message)
def __str__(self):
return self.formatted_message
@classmethod
def get_short_name(cls):
if len(cls.default_args):
return cls.message_template.format(*cls.default_args)
return cls.message_template
class VOTableChangeWarning(VOWarning, SyntaxWarning):
"""
A change has been made to the input XML file.
"""
class VOTableSpecWarning(VOWarning, SyntaxWarning):
"""
The input XML file violates the spec, but there is an obvious workaround.
"""
class UnimplementedWarning(VOWarning, SyntaxWarning):
"""
A feature of the VOTABLE_ spec is not implemented.
"""
class IOWarning(VOWarning, RuntimeWarning):
"""
A network or IO error occurred, but was recovered using the cache.
"""
class VOTableSpecError(VOWarning, ValueError):
"""
The input XML file violates the spec and there is no good workaround.
"""
class W01(VOTableSpecWarning):
"""
The VOTable spec states:
If a cell contains an array or complex number, it should be
encoded as multiple numbers separated by whitespace.
Many VOTable files in the wild use commas as a separator instead,
and ``astropy.io.votable`` can support this convention depending on the
:ref:`astropy:verifying-votables` setting.
``astropy.io.votable`` always outputs files using only spaces, regardless of
how they were input.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#toc-header-35>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:TABLEDATA>`__
"""
message_template = "Array uses commas rather than whitespace"
class W02(VOTableSpecWarning):
r"""
XML ids must match the following regular expression::
^[A-Za-z_][A-Za-z0-9_\.\-]*$
The VOTable 1.1 says the following:
According to the XML standard, the attribute ``ID`` is a
string beginning with a letter or underscore (``_``), followed
by a sequence of letters, digits, or any of the punctuation
characters ``.`` (dot), ``-`` (dash), ``_`` (underscore), or
``:`` (colon).
However, this is in conflict with the XML standard, which says
colons may not be used. VOTable 1.1's own schema does not allow a
colon here. Therefore, ``astropy.io.votable`` disallows the colon.
VOTable 1.2 corrects this error in the specification.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:name>`__,
`XML Names <http://www.w3.org/TR/REC-xml/#NT-Name>`__
"""
message_template = "{} attribute '{}' is invalid. Must be a standard XML id"
default_args = ('x', 'y')
class W03(VOTableChangeWarning):
"""
The VOTable 1.1 spec says the following about ``name`` vs. ``ID``
on ``FIELD`` and ``VALUE`` elements:
``ID`` and ``name`` attributes have a different role in
VOTable: the ``ID`` is meant as a *unique identifier* of an
element seen as a VOTable component, while the ``name`` is
meant for presentation purposes, and need not to be unique
throughout the VOTable document. The ``ID`` attribute is
therefore required in the elements which have to be
referenced, but in principle any element may have an ``ID``
attribute. ... In summary, the ``ID`` is different from the
``name`` attribute in that (a) the ``ID`` attribute is made
from a restricted character set, and must be unique throughout
a VOTable document whereas names are standard XML attributes
and need not be unique; and (b) there should be support in the
parsing software to look up references and extract the
relevant element with matching ``ID``.
It is further recommended in the VOTable 1.2 spec:
While the ``ID`` attribute has to be unique in a VOTable
document, the ``name`` attribute need not. It is however
recommended, as a good practice, to assign unique names within
a ``TABLE`` element. This recommendation means that, between a
``TABLE`` and its corresponding closing ``TABLE`` tag,
``name`` attributes of ``FIELD``, ``PARAM`` and optional
``GROUP`` elements should be all different.
Since ``astropy.io.votable`` requires a unique identifier for each of its
columns, ``ID`` is used for the column name when present.
However, when ``ID`` is not present, (since it is not required by
the specification) ``name`` is used instead. However, ``name``
must be cleansed by replacing invalid characters (such as
whitespace) with underscores.
.. note::
This warning does not indicate that the input file is invalid
with respect to the VOTable specification, only that the
column names in the record array may not match exactly the
``name`` attributes specified in the file.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:name>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:name>`__
"""
message_template = "Implicitly generating an ID from a name '{}' -> '{}'"
default_args = ('x', 'y')
class W04(VOTableSpecWarning):
"""
The ``content-type`` attribute must use MIME content-type syntax as
defined in `RFC 2046 <https://tools.ietf.org/html/rfc2046>`__.
The current check for validity is somewhat over-permissive.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:link>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:link>`__
"""
message_template = "content-type '{}' must be a valid MIME content type"
default_args = ('x',)
class W05(VOTableSpecWarning):
"""
The attribute must be a valid URI as defined in `RFC 2396
<https://www.ietf.org/rfc/rfc2396.txt>`_.
"""
message_template = "'{}' is not a valid URI"
default_args = ('x',)
class W06(VOTableSpecWarning):
"""
This warning is emitted when a ``ucd`` attribute does not match
the syntax of a `unified content descriptor
<http://vizier.u-strasbg.fr/doc/UCD.htx>`__.
If the VOTable version is 1.2 or later, the UCD will also be
checked to ensure it conforms to the controlled vocabulary defined
by UCD1+.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:ucd>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:ucd>`__
"""
message_template = "Invalid UCD '{}': {}"
default_args = ('x', 'explanation')
class W07(VOTableSpecWarning):
"""
As astro year field is a Besselian or Julian year matching the
regular expression::
^[JB]?[0-9]+([.][0-9]*)?$
Defined in this XML Schema snippet::
<xs:simpleType name="astroYear">
<xs:restriction base="xs:token">
<xs:pattern value="[JB]?[0-9]+([.][0-9]*)?"/>
</xs:restriction>
</xs:simpleType>
"""
message_template = "Invalid astroYear in {}: '{}'"
default_args = ('x', 'y')
class W08(VOTableSpecWarning):
"""
To avoid local-dependent number parsing differences, ``astropy.io.votable``
may require a string or unicode string where a numeric type may
make more sense.
"""
message_template = "'{}' must be a str or bytes object"
default_args = ('x',)
class W09(VOTableSpecWarning):
"""
The VOTable specification uses the attribute name ``ID`` (with
uppercase letters) to specify unique identifiers. Some
VOTable-producing tools use the more standard lowercase ``id``
instead. ``astropy.io.votable`` accepts ``id`` and emits this warning if
``verify`` is ``'warn'``.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:name>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:name>`__
"""
message_template = "ID attribute not capitalized"
class W10(VOTableSpecWarning):
"""
The parser has encountered an element that does not exist in the
specification, or appears in an invalid context. Check the file
against the VOTable schema (with a tool such as `xmllint
<http://xmlsoft.org/xmllint.html>`__. If the file validates
against the schema, and you still receive this warning, this may
indicate a bug in ``astropy.io.votable``.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC54>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#ToC58>`__
"""
message_template = "Unknown tag '{}'. Ignoring"
default_args = ('x',)
class W11(VOTableSpecWarning):
"""
Earlier versions of the VOTable specification used a ``gref``
attribute on the ``LINK`` element to specify a `GLU reference
<http://aladin.u-strasbg.fr/glu/>`__. New files should
specify a ``glu:`` protocol using the ``href`` attribute.
Since ``astropy.io.votable`` does not currently support GLU references, it
likewise does not automatically convert the ``gref`` attribute to
the new form.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:link>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:link>`__
"""
message_template = "The gref attribute on LINK is deprecated in VOTable 1.1"
class W12(VOTableChangeWarning):
"""
In order to name the columns of the Numpy record array, each
``FIELD`` element must have either an ``ID`` or ``name`` attribute
to derive a name from. Strictly speaking, according to the
VOTable schema, the ``name`` attribute is required. However, if
``name`` is not present by ``ID`` is, and ``verify`` is not ``'exception'``,
``astropy.io.votable`` will continue without a ``name`` defined.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:name>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:name>`__
"""
message_template = (
"'{}' element must have at least one of 'ID' or 'name' attributes")
default_args = ('x',)
class W13(VOTableSpecWarning):
"""
Some VOTable files in the wild use non-standard datatype names. These
are mapped to standard ones using the following mapping::
string -> char
unicodeString -> unicodeChar
int16 -> short
int32 -> int
int64 -> long
float32 -> float
float64 -> double
unsignedInt -> long
unsignedShort -> int
To add more datatype mappings during parsing, use the
``datatype_mapping`` keyword to `astropy.io.votable.parse`.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__
"""
message_template = "'{}' is not a valid VOTable datatype, should be '{}'"
default_args = ('x', 'y')
# W14: Deprecated
class W15(VOTableSpecWarning):
"""
The ``name`` attribute is required on every ``FIELD`` element.
However, many VOTable files in the wild omit it and provide only
an ``ID`` instead. In this case, when ``verify`` is not ``'exception'``
``astropy.io.votable`` will copy the ``name`` attribute to a new ``ID``
attribute.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:name>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:name>`__
"""
message_template = "{} element missing required 'name' attribute"
default_args = ('x',)
# W16: Deprecated
class W17(VOTableSpecWarning):
"""
A ``DESCRIPTION`` element can only appear once within its parent
element.
According to the schema, it may only occur once (`1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC54>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#ToC58>`__)
However, it is a `proposed extension
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:addesc>`__
to VOTable 1.2.
"""
message_template = "{} element contains more than one DESCRIPTION element"
default_args = ('x',)
class W18(VOTableSpecWarning):
"""
The number of rows explicitly specified in the ``nrows`` attribute
does not match the actual number of rows (``TR`` elements) present
in the ``TABLE``. This may indicate truncation of the file, or an
internal error in the tool that produced it. If ``verify`` is not
``'exception'``, parsing will proceed, with the loss of some performance.
**References:** `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC10>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#ToC10>`__
"""
message_template = 'TABLE specified nrows={}, but table contains {} rows'
default_args = ('x', 'y')
class W19(VOTableSpecWarning):
"""
The column fields as defined using ``FIELD`` elements do not match
those in the headers of the embedded FITS file. If ``verify`` is not
``'exception'``, the embedded FITS file will take precedence.
"""
message_template = (
'The fields defined in the VOTable do not match those in the ' +
'embedded FITS file')
class W20(VOTableSpecWarning):
"""
If no version number is explicitly given in the VOTable file, the
parser assumes it is written to the VOTable 1.1 specification.
"""
message_template = 'No version number specified in file. Assuming {}'
default_args = ('1.1',)
class W21(UnimplementedWarning):
"""
Unknown issues may arise using ``astropy.io.votable`` with VOTable files
from a version other than 1.1, 1.2, 1.3, or 1.4.
"""
message_template = (
'astropy.io.votable is designed for VOTable version 1.1, 1.2, 1.3,'
' and 1.4, but this file is {}')
default_args = ('x',)
class W22(VOTableSpecWarning):
"""
Version 1.0 of the VOTable specification used the ``DEFINITIONS``
element to define coordinate systems. Version 1.1 now uses
``COOSYS`` elements throughout the document.
**References:** `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:definitions>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:definitions>`__
"""
message_template = 'The DEFINITIONS element is deprecated in VOTable 1.1. Ignoring'
class W23(IOWarning):
"""
Raised when the VO service database can not be updated (possibly
due to a network outage). This is only a warning, since an older
and possible out-of-date VO service database was available
locally.
"""
message_template = "Unable to update service information for '{}'"
default_args = ('x',)
class W24(VOWarning, FutureWarning):
"""
The VO catalog database retrieved from the www is designed for a
newer version of ``astropy.io.votable``. This may cause problems or limited
features performing service queries. Consider upgrading ``astropy.io.votable``
to the latest version.
"""
message_template = "The VO catalog database is for a later version of astropy.io.votable"
class W25(IOWarning):
"""
A VO service query failed due to a network error or malformed
arguments. Another alternative service may be attempted. If all
services fail, an exception will be raised.
"""
message_template = "'{}' failed with: {}"
default_args = ('service', '...')
class W26(VOTableSpecWarning):
"""
The given element was not supported inside of the given element
until the specified VOTable version, however the version declared
in the file is for an earlier version. These attributes may not
be written out to the file.
"""
message_template = "'{}' inside '{}' added in VOTable {}"
default_args = ('child', 'parent', 'X.X')
class W27(VOTableSpecWarning):
"""
The ``COOSYS`` element was deprecated in VOTABLE version 1.2 in
favor of a reference to the Space-Time Coordinate (STC) data
model (see `utype
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:utype>`__
and the IVOA note `referencing STC in VOTable
<http://ivoa.net/Documents/latest/VOTableSTC.html>`__.
"""
message_template = "COOSYS deprecated in VOTable 1.2"
class W28(VOTableSpecWarning):
"""
The given attribute was not supported on the given element until the
specified VOTable version, however the version declared in the file is
for an earlier version. These attributes may not be written out to
the file.
"""
message_template = "'{}' on '{}' added in VOTable {}"
default_args = ('attribute', 'element', 'X.X')
class W29(VOTableSpecWarning):
"""
Some VOTable files specify their version number in the form "v1.0",
when the only supported forms in the spec are "1.0".
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC54>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#ToC58>`__
"""
message_template = "Version specified in non-standard form '{}'"
default_args = ('v1.0',)
class W30(VOTableSpecWarning):
"""
Some VOTable files write missing floating-point values in non-standard ways,
such as "null" and "-". If ``verify`` is not ``'exception'``, any
non-standard floating-point literals are treated as missing values.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__
"""
message_template = "Invalid literal for float '{}'. Treating as empty."
default_args = ('x',)
class W31(VOTableSpecWarning):
"""
Since NaN's can not be represented in integer fields directly, a null
value must be specified in the FIELD descriptor to support reading
NaN's from the tabledata.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__
"""
message_template = "NaN given in an integral field without a specified null value"
class W32(VOTableSpecWarning):
"""
Each field in a table must have a unique ID. If two or more fields
have the same ID, some will be renamed to ensure that all IDs are
unique.
From the VOTable 1.2 spec:
The ``ID`` and ``ref`` attributes are defined as XML types
``ID`` and ``IDREF`` respectively. This means that the
contents of ``ID`` is an identifier which must be unique
throughout a VOTable document, and that the contents of the
``ref`` attribute represents a reference to an identifier
which must exist in the VOTable document.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:name>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:name>`__
"""
message_template = "Duplicate ID '{}' renamed to '{}' to ensure uniqueness"
default_args = ('x', 'x_2')
class W33(VOTableChangeWarning):
"""
Each field in a table must have a unique name. If two or more
fields have the same name, some will be renamed to ensure that all
names are unique.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:name>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:name>`__
"""
message_template = "Column name '{}' renamed to '{}' to ensure uniqueness"
default_args = ('x', 'x_2')
class W34(VOTableSpecWarning):
"""
The attribute requires the value to be a valid XML token, as
defined by `XML 1.0
<http://www.w3.org/TR/2000/WD-xml-2e-20000814#NT-Nmtoken>`__.
"""
message_template = "'{}' is an invalid token for attribute '{}'"
default_args = ('x', 'y')
class W35(VOTableSpecWarning):
"""
The ``name`` and ``value`` attributes are required on all ``INFO``
elements.
**References:** `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC54>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#ToC32>`__
"""
message_template = "'{}' attribute required for INFO elements"
default_args = ('x',)
class W36(VOTableSpecWarning):
"""
If the field specifies a ``null`` value, that value must conform
to the given ``datatype``.
**References:** `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:values>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:values>`__
"""
message_template = "null value '{}' does not match field datatype, setting to 0"
default_args = ('x',)
class W37(UnimplementedWarning):
"""
The 3 datatypes defined in the VOTable specification and supported by
``astropy.io.votable`` are ``TABLEDATA``, ``BINARY`` and ``FITS``.
**References:** `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:data>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:data>`__
"""
message_template = "Unsupported data format '{}'"
default_args = ('x',)
class W38(VOTableSpecWarning):
"""
The only encoding for local binary data supported by the VOTable
specification is base64.
"""
message_template = "Inline binary data must be base64 encoded, got '{}'"
default_args = ('x',)
class W39(VOTableSpecWarning):
"""
Bit values do not support masking. This warning is raised upon
setting masked data in a bit column.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__
"""
message_template = "Bit values can not be masked"
class W40(VOTableSpecWarning):
"""
This is a terrible hack to support Simple Image Access Protocol
results from `NOIRLab Astro Data Archive <https://astroarchive.noirlab.edu/>`__. It
creates a field for the coordinate projection type of type "double",
which actually contains character data. We have to hack the field
to store character data, or we can't read it in. A warning will be
raised when this happens.
"""
message_template = "'cprojection' datatype repaired"
class W41(VOTableSpecWarning):
"""
An XML namespace was specified on the ``VOTABLE`` element, but the
namespace does not match what is expected for a ``VOTABLE`` file.
The ``VOTABLE`` namespace is::
http://www.ivoa.net/xml/VOTable/vX.X
where "X.X" is the version number.
Some files in the wild set the namespace to the location of the
VOTable schema, which is not correct and will not pass some
validating parsers.
"""
message_template = (
"An XML namespace is specified, but is incorrect. Expected " +
"'{}', got '{}'")
default_args = ('x', 'y')
class W42(VOTableSpecWarning):
"""
The root element should specify a namespace.
The ``VOTABLE`` namespace is::
http://www.ivoa.net/xml/VOTable/vX.X
where "X.X" is the version number.
"""
message_template = "No XML namespace specified"
class W43(VOTableSpecWarning):
"""
Referenced elements should be defined before referees. From the
VOTable 1.2 spec:
In VOTable1.2, it is further recommended to place the ID
attribute prior to referencing it whenever possible.
"""
message_template = "{} ref='{}' which has not already been defined"
default_args = ('element', 'x',)
class W44(VOTableSpecWarning):
"""
``VALUES`` elements that reference another element should not have
their own content.
From the VOTable 1.2 spec:
The ``ref`` attribute of a ``VALUES`` element can be used to
avoid a repetition of the domain definition, by referring to a
previously defined ``VALUES`` element having the referenced
``ID`` attribute. When specified, the ``ref`` attribute
defines completely the domain without any other element or
attribute, as e.g. ``<VALUES ref="RAdomain"/>``
"""
message_template = "VALUES element with ref attribute has content ('{}')"
default_args = ('element',)
class W45(VOWarning, ValueError):
"""
The ``content-role`` attribute on the ``LINK`` element must be one of
the following::
query, hints, doc, location
And in VOTable 1.3, additionally::
type
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC54>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#ToC58>`__
`1.3
<http://www.ivoa.net/documents/VOTable/20130315/PR-VOTable-1.3-20130315.html#sec:link>`__
"""
message_template = "content-role attribute '{}' invalid"
default_args = ('x',)
class W46(VOTableSpecWarning):
"""
The given char or unicode string is too long for the specified
field length.
"""
message_template = "{} value is too long for specified length of {}"
default_args = ('char or unicode', 'x')
class W47(VOTableSpecWarning):
"""
If no arraysize is specified on a char field, the default of '1'
is implied, but this is rarely what is intended.
"""
message_template = "Missing arraysize indicates length 1"
class W48(VOTableSpecWarning):
"""
The attribute is not defined in the specification.
"""
message_template = "Unknown attribute '{}' on {}"
default_args = ('attribute', 'element')
class W49(VOTableSpecWarning):
"""
Prior to VOTable 1.3, the empty cell was illegal for integer
fields.
If a \"null\" value was specified for the cell, it will be used
for the value, otherwise, 0 will be used.
"""
message_template = "Empty cell illegal for integer fields."
class W50(VOTableSpecWarning):
"""
Invalid unit string as defined in the `Units in the VO, Version 1.0
<https://www.ivoa.net/documents/VOUnits>`_ (VOTable version >= 1.4)
or `Standards for Astronomical Catalogues, Version 2.0
<http://cdsarc.u-strasbg.fr/doc/catstd-3.2.htx>`_ (version < 1.4).
Consider passing an explicit ``unit_format`` parameter if the units
in this file conform to another specification.
"""
message_template = "Invalid unit string '{}'"
default_args = ('x',)
class W51(VOTableSpecWarning):
"""
The integer value is out of range for the size of the field.
"""
message_template = "Value '{}' is out of range for a {} integer field"
default_args = ('x', 'n-bit')
class W52(VOTableSpecWarning):
"""
The BINARY2 format was introduced in VOTable 1.3. It should
not be present in files marked as an earlier version.
"""
message_template = ("The BINARY2 format was introduced in VOTable 1.3, but "
"this file is declared as version '{}'")
default_args = ('1.2',)
class W53(VOTableSpecWarning):
"""
The VOTABLE element must contain at least one RESOURCE element.
"""
message_template = ("VOTABLE element must contain at least one RESOURCE element.")
default_args = ()
class W54(VOTableSpecWarning):
"""
The TIMESYS element was introduced in VOTable 1.4. It should
not be present in files marked as an earlier version.
"""
message_template = (
"The TIMESYS element was introduced in VOTable 1.4, but "
"this file is declared as version '{}'")
default_args = ('1.3',)
class W55(VOTableSpecWarning):
"""
When non-ASCII characters are detected when reading
a TABLEDATA value for a FIELD with ``datatype="char"``, we
can issue this warning.
"""
message_template = (
'FIELD ({}) has datatype="char" but contains non-ASCII '
'value ({})')
default_args = ('', '')
class E01(VOWarning, ValueError):
"""
The size specifier for a ``char`` or ``unicode`` field must be
only a number followed, optionally, by an asterisk.
Multi-dimensional size specifiers are not supported for these
datatypes.
Strings, which are defined as a set of characters, can be
represented in VOTable as a fixed- or variable-length array of
characters::
<FIELD name="unboundedString" datatype="char" arraysize="*"/>
A 1D array of strings can be represented as a 2D array of
characters, but given the logic above, it is possible to define a
variable-length array of fixed-length strings, but not a
fixed-length array of variable-length strings.
"""
message_template = "Invalid size specifier '{}' for a {} field (in field '{}')"
default_args = ('x', 'char/unicode', 'y')
class E02(VOWarning, ValueError):
"""
The number of array elements in the data does not match that specified
in the FIELD specifier.
"""
message_template = (
"Incorrect number of elements in array. " +
"Expected multiple of {}, got {}")
default_args = ('x', 'y')
class E03(VOWarning, ValueError):
"""
Complex numbers should be two values separated by whitespace.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__
"""
message_template = "'{}' does not parse as a complex number"
default_args = ('x',)
class E04(VOWarning, ValueError):
"""
A ``bit`` array should be a string of '0's and '1's.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__
"""
message_template = "Invalid bit value '{}'"
default_args = ('x',)
class E05(VOWarning, ValueError):
r"""
A ``boolean`` value should be one of the following strings (case
insensitive) in the ``TABLEDATA`` format::
'TRUE', 'FALSE', '1', '0', 'T', 'F', '\0', ' ', '?'
and in ``BINARY`` format::
'T', 'F', '1', '0', '\0', ' ', '?'
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__
"""
message_template = "Invalid boolean value '{}'"
default_args = ('x',)
class E06(VOWarning, ValueError):
"""
The supported datatypes are::
double, float, bit, boolean, unsignedByte, short, int, long,
floatComplex, doubleComplex, char, unicodeChar
The following non-standard aliases are also supported, but in
these case :ref:`W13 <W13>` will be raised::
string -> char
unicodeString -> unicodeChar
int16 -> short
int32 -> int
int64 -> long
float32 -> float
float64 -> double
unsignedInt -> long
unsignedShort -> int
To add more datatype mappings during parsing, use the
``datatype_mapping`` keyword to `astropy.io.votable.parse`.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__
"""
message_template = "Unknown datatype '{}' on field '{}'"
default_args = ('x', 'y')
# E07: Deprecated
class E08(VOWarning, ValueError):
"""
The ``type`` attribute on the ``VALUES`` element must be either
``legal`` or ``actual``.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:values>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:values>`__
"""
message_template = "type must be 'legal' or 'actual', but is '{}'"
default_args = ('x',)
class E09(VOWarning, ValueError):
"""
The ``MIN``, ``MAX`` and ``OPTION`` elements must always have a
``value`` attribute.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:values>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:values>`__
"""
message_template = "'{}' must have a value attribute"
default_args = ('x',)
class E10(VOWarning, ValueError):
"""
From VOTable 1.1 and later, ``FIELD`` and ``PARAM`` elements must have
a ``datatype`` field.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#elem:FIELD>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#elem:FIELD>`__
"""
message_template = "'datatype' attribute required on all '{}' elements"
default_args = ('FIELD',)
class E11(VOWarning, ValueError):
"""
The precision attribute is meant to express the number of significant
digits, either as a number of decimal places (e.g. ``precision="F2"`` or
equivalently ``precision="2"`` to express 2 significant figures
after the decimal point), or as a number of significant figures
(e.g. ``precision="E5"`` indicates a relative precision of 10-5).
It is validated using the following regular expression::
[EF]?[1-9][0-9]*
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:form>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:form>`__
"""
message_template = "precision '{}' is invalid"
default_args = ('x',)
class E12(VOWarning, ValueError):
"""
The width attribute is meant to indicate to the application the
number of characters to be used for input or output of the
quantity.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:form>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:form>`__
"""
message_template = "width must be a positive integer, got '{}'"
default_args = ('x',)
class E13(VOWarning, ValueError):
r"""
From the VOTable 1.2 spec:
A table cell can contain an array of a given primitive type,
with a fixed or variable number of elements; the array may
even be multidimensional. For instance, the position of a
point in a 3D space can be defined by the following::
<FIELD ID="point_3D" datatype="double" arraysize="3"/>
and each cell corresponding to that definition must contain
exactly 3 numbers. An asterisk (\*) may be appended to
indicate a variable number of elements in the array, as in::
<FIELD ID="values" datatype="int" arraysize="100*"/>
where it is specified that each cell corresponding to that
definition contains 0 to 100 integer numbers. The number may
be omitted to specify an unbounded array (in practice up to
=~2×10⁹ elements).
A table cell can also contain a multidimensional array of a
given primitive type. This is specified by a sequence of
dimensions separated by the ``x`` character, with the first
dimension changing fastest; as in the case of a simple array,
the last dimension may be variable in length. As an example,
the following definition declares a table cell which may
contain a set of up to 10 images, each of 64×64 bytes::
<FIELD ID="thumbs" datatype="unsignedByte" arraysize="64×64×10*"/>
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:dim>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:dim>`__
"""
message_template = "Invalid arraysize attribute '{}'"
default_args = ('x',)
class E14(VOWarning, ValueError):
"""
All ``PARAM`` elements must have a ``value`` attribute.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#elem:FIELD>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#elem:FIELD>`__
"""
message_template = "value attribute is required for all PARAM elements"
class E15(VOWarning, ValueError):
"""
All ``COOSYS`` elements must have an ``ID`` attribute.
Note that the VOTable 1.1 specification says this attribute is
optional, but its corresponding schema indicates it is required.
In VOTable 1.2, the ``COOSYS`` element is deprecated.
"""
message_template = "ID attribute is required for all COOSYS elements"
class E16(VOTableSpecWarning):
"""
The ``system`` attribute on the ``COOSYS`` element must be one of the
following::
'eq_FK4', 'eq_FK5', 'ICRS', 'ecl_FK4', 'ecl_FK5', 'galactic',
'supergalactic', 'xy', 'barycentric', 'geo_app'
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#elem:COOSYS>`__
"""
message_template = "Invalid system attribute '{}'"
default_args = ('x',)
class E17(VOWarning, ValueError):
"""
``extnum`` attribute must be a positive integer.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC54>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#ToC58>`__
"""
message_template = "extnum must be a positive integer"
class E18(VOWarning, ValueError):
"""
The ``type`` attribute of the ``RESOURCE`` element must be one of
"results" or "meta".
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC54>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#ToC58>`__
"""
message_template = "type must be 'results' or 'meta', not '{}'"
default_args = ('x',)
class E19(VOWarning, ValueError):
"""
Raised either when the file doesn't appear to be XML, or the root
element is not VOTABLE.
"""
message_template = "File does not appear to be a VOTABLE"
class E20(VOTableSpecError):
"""
The table had only *x* fields defined, but the data itself has more
columns than that.
"""
message_template = "Data has more columns than are defined in the header ({})"
default_args = ('x',)
class E21(VOWarning, ValueError):
"""
The table had *x* fields defined, but the data itself has only *y*
columns.
"""
message_template = "Data has fewer columns ({}) than are defined in the header ({})"
default_args = ('x', 'y')
class E22(VOWarning, ValueError):
"""
All ``TIMESYS`` elements must have an ``ID`` attribute.
"""
message_template = "ID attribute is required for all TIMESYS elements"
class E23(VOTableSpecWarning):
"""
The ``timeorigin`` attribute on the ``TIMESYS`` element must be
either a floating point literal specifying a valid Julian Date,
or, for convenience, the string "MJD-origin" (standing for 2400000.5)
or the string "JD-origin" (standing for 0).
**References**: `1.4
<http://www.ivoa.net/documents/VOTable/20191021/REC-VOTable-1.4-20191021.html#ToC21>`__
"""
message_template = "Invalid timeorigin attribute '{}'"
default_args = ('x',)
class E24(VOWarning, ValueError):
"""
Non-ASCII unicode values should not be written when the FIELD ``datatype="char"``,
and cannot be written in BINARY or BINARY2 serialization.
"""
message_template = (
'Attempt to write non-ASCII value ({}) to FIELD ({}) which '
'has datatype="char"')
default_args = ('', '')
class E25(VOTableSpecWarning):
"""
A VOTable cannot have a DATA section without any defined FIELD; DATA will be ignored.
"""
message_template = "No FIELDs are defined; DATA section will be ignored."
def _get_warning_and_exception_classes(prefix):
classes = []
for key, val in globals().items():
if re.match(prefix + "[0-9]{2}", key):
classes.append((key, val))
classes.sort()
return classes
def _build_doc_string():
def generate_set(prefix):
classes = _get_warning_and_exception_classes(prefix)
out = io.StringIO()
for name, cls in classes:
out.write(f".. _{name}:\n\n")
msg = f"{cls.__name__}: {cls.get_short_name()}"
if not isinstance(msg, str):
msg = msg.decode('utf-8')
out.write(msg)
out.write('\n')
out.write('~' * len(msg))
out.write('\n\n')
doc = cls.__doc__
if not isinstance(doc, str):
doc = doc.decode('utf-8')
out.write(dedent(doc))
out.write('\n\n')
return out.getvalue()
warnings = generate_set('W')
exceptions = generate_set('E')
return {'warnings': warnings,
'exceptions': exceptions}
if __doc__ is not None:
__doc__ = __doc__.format(**_build_doc_string())
__all__.extend([x[0] for x in _get_warning_and_exception_classes('W')])
__all__.extend([x[0] for x in _get_warning_and_exception_classes('E')])
|
f7af7a64db0de2fb2cc0779ec3755f0fb963ec2343b5b7aa95b28fbae26a9119 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This file contains routines to verify the correctness of UCD strings.
"""
# STDLIB
import re
# LOCAL
from astropy.utils import data
__all__ = ['parse_ucd', 'check_ucd']
class UCDWords:
"""
Manages a list of acceptable UCD words.
Works by reading in a data file exactly as provided by IVOA. This
file resides in data/ucd1p-words.txt.
"""
def __init__(self):
self._primary = set()
self._secondary = set()
self._descriptions = {}
self._capitalization = {}
with data.get_pkg_data_fileobj(
"data/ucd1p-words.txt", encoding='ascii') as fd:
for line in fd.readlines():
type, name, descr = (x.strip() for x in line.split('|'))
name_lower = name.lower()
if type in 'QPEVC':
self._primary.add(name_lower)
if type in 'QSEVC':
self._secondary.add(name_lower)
self._descriptions[name_lower] = descr
self._capitalization[name_lower] = name
def is_primary(self, name):
"""
Returns True if *name* is a valid primary name.
"""
return name.lower() in self._primary
def is_secondary(self, name):
"""
Returns True if *name* is a valid secondary name.
"""
return name.lower() in self._secondary
def get_description(self, name):
"""
Returns the official English description of the given UCD
*name*.
"""
return self._descriptions[name.lower()]
def normalize_capitalization(self, name):
"""
Returns the standard capitalization form of the given name.
"""
return self._capitalization[name.lower()]
_ucd_singleton = None
def parse_ucd(ucd, check_controlled_vocabulary=False, has_colon=False):
"""
Parse the UCD into its component parts.
Parameters
----------
ucd : str
The UCD string
check_controlled_vocabulary : bool, optional
If `True`, then each word in the UCD will be verified against
the UCD1+ controlled vocabulary, (as required by the VOTable
specification version 1.2), otherwise not.
has_colon : bool, optional
If `True`, the UCD may contain a colon (as defined in earlier
versions of the standard).
Returns
-------
parts : list
The result is a list of tuples of the form:
(*namespace*, *word*)
If no namespace was explicitly specified, *namespace* will be
returned as ``'ivoa'`` (i.e., the default namespace).
Raises
------
ValueError
if *ucd* is invalid
"""
global _ucd_singleton
if _ucd_singleton is None:
_ucd_singleton = UCDWords()
if has_colon:
m = re.search(r'[^A-Za-z0-9_.:;\-]', ucd)
else:
m = re.search(r'[^A-Za-z0-9_.;\-]', ucd)
if m is not None:
raise ValueError(f"UCD has invalid character '{m.group(0)}' in '{ucd}'")
word_component_re = r'[A-Za-z0-9][A-Za-z0-9\-_]*'
word_re = fr'{word_component_re}(\.{word_component_re})*'
parts = ucd.split(';')
words = []
for i, word in enumerate(parts):
colon_count = word.count(':')
if colon_count == 1:
ns, word = word.split(':', 1)
if not re.match(word_component_re, ns):
raise ValueError(f"Invalid namespace '{ns}'")
ns = ns.lower()
elif colon_count > 1:
raise ValueError(f"Too many colons in '{word}'")
else:
ns = 'ivoa'
if not re.match(word_re, word):
raise ValueError(f"Invalid word '{word}'")
if ns == 'ivoa' and check_controlled_vocabulary:
if i == 0:
if not _ucd_singleton.is_primary(word):
if _ucd_singleton.is_secondary(word):
raise ValueError(
f"Secondary word '{word}' is not valid as a primary word")
else:
raise ValueError(f"Unknown word '{word}'")
else:
if not _ucd_singleton.is_secondary(word):
if _ucd_singleton.is_primary(word):
raise ValueError(
f"Primary word '{word}' is not valid as a secondary word")
else:
raise ValueError(f"Unknown word '{word}'")
try:
normalized_word = _ucd_singleton.normalize_capitalization(word)
except KeyError:
normalized_word = word
words.append((ns, normalized_word))
return words
def check_ucd(ucd, check_controlled_vocabulary=False, has_colon=False):
"""
Returns False if *ucd* is not a valid `unified content descriptor`_.
Parameters
----------
ucd : str
The UCD string
check_controlled_vocabulary : bool, optional
If `True`, then each word in the UCD will be verified against
the UCD1+ controlled vocabulary, (as required by the VOTable
specification version 1.2), otherwise not.
has_colon : bool, optional
If `True`, the UCD may contain a colon (as defined in earlier
versions of the standard).
Returns
-------
valid : bool
"""
if ucd is None:
return True
try:
parse_ucd(ucd,
check_controlled_vocabulary=check_controlled_vocabulary,
has_colon=has_colon)
except ValueError:
return False
return True
|
ae45d387f4b18d914eb7776dd55eb9b9cdab88bebea6d0092366f31791e95174 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Various utilities and cookbook-like things.
"""
# STDLIB
import codecs
import contextlib
import io
import re
import gzip
from packaging.version import Version
__all__ = [
'convert_to_writable_filelike',
'stc_reference_frames',
'coerce_range_list_param',
]
@contextlib.contextmanager
def convert_to_writable_filelike(fd, compressed=False):
"""
Returns a writable file-like object suitable for streaming output.
Parameters
----------
fd : str or file-like
May be:
- a file path string, in which case it is opened, and the file
object is returned.
- an object with a :meth:``write`` method, in which case that
object is returned.
compressed : bool, optional
If `True`, create a gzip-compressed file. (Default is `False`).
Returns
-------
fd : writable file-like
"""
if isinstance(fd, str):
if fd.endswith('.gz') or compressed:
with gzip.GzipFile(fd, 'wb') as real_fd:
encoded_fd = io.TextIOWrapper(real_fd, encoding='utf8')
yield encoded_fd
encoded_fd.flush()
real_fd.flush()
return
else:
with open(fd, 'wt', encoding='utf8') as real_fd:
yield real_fd
return
elif hasattr(fd, 'write'):
assert callable(fd.write)
if compressed:
fd = gzip.GzipFile(fileobj=fd)
# If we can't write Unicode strings, use a codecs.StreamWriter
# object
needs_wrapper = False
try:
fd.write('')
except TypeError:
needs_wrapper = True
if not hasattr(fd, 'encoding') or fd.encoding is None:
needs_wrapper = True
if needs_wrapper:
yield codecs.getwriter('utf-8')(fd)
fd.flush()
else:
yield fd
fd.flush()
return
else:
raise TypeError("Can not be coerced to writable file-like object")
# <http://www.ivoa.net/documents/REC/DM/STC-20071030.html>
stc_reference_frames = {
'FK4', 'FK5', 'ECLIPTIC', 'ICRS', 'GALACTIC', 'GALACTIC_I', 'GALACTIC_II',
'SUPER_GALACTIC', 'AZ_EL', 'BODY', 'GEO_C', 'GEO_D', 'MAG', 'GSE', 'GSM',
'SM', 'HGC', 'HGS', 'HEEQ', 'HRTN', 'HPC', 'HPR', 'HCC', 'HGI',
'MERCURY_C', 'VENUS_C', 'LUNA_C', 'MARS_C', 'JUPITER_C_III',
'SATURN_C_III', 'URANUS_C_III', 'NEPTUNE_C_III', 'PLUTO_C', 'MERCURY_G',
'VENUS_G', 'LUNA_G', 'MARS_G', 'JUPITER_G_III', 'SATURN_G_III',
'URANUS_G_III', 'NEPTUNE_G_III', 'PLUTO_G', 'UNKNOWNFrame'}
def coerce_range_list_param(p, frames=None, numeric=True):
"""
Coerces and/or verifies the object *p* into a valid range-list-format parameter.
As defined in `Section 8.7.2 of Simple
Spectral Access Protocol
<http://www.ivoa.net/documents/REC/DAL/SSA-20080201.html>`_.
Parameters
----------
p : str or sequence
May be a string as passed verbatim to the service expecting a
range-list, or a sequence. If a sequence, each item must be
either:
- a numeric value
- a named value, such as, for example, 'J' for named
spectrum (if the *numeric* kwarg is False)
- a 2-tuple indicating a range
- the last item my be a string indicating the frame of
reference
frames : sequence of str, optional
A sequence of acceptable frame of reference keywords. If not
provided, the default set in ``set_reference_frames`` will be
used.
numeric : bool, optional
TODO
Returns
-------
parts : tuple
The result is a tuple:
- a string suitable for passing to a service as a range-list
argument
- an integer counting the number of elements
"""
def str_or_none(x):
if x is None:
return ''
if numeric:
x = float(x)
return str(x)
def numeric_or_range(x):
if isinstance(x, tuple) and len(x) == 2:
return f'{str_or_none(x[0])}/{str_or_none(x[1])}'
else:
return str_or_none(x)
def is_frame_of_reference(x):
return isinstance(x, str)
if p is None:
return None, 0
elif isinstance(p, (tuple, list)):
has_frame_of_reference = len(p) > 1 and is_frame_of_reference(p[-1])
if has_frame_of_reference:
points = p[:-1]
else:
points = p[:]
out = ','.join([numeric_or_range(x) for x in points])
length = len(points)
if has_frame_of_reference:
if frames is not None and p[-1] not in frames:
raise ValueError(
f"'{p[-1]}' is not a valid frame of reference")
out += ';' + p[-1]
length += 1
return out, length
elif isinstance(p, str):
number = r'([-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?)?'
if not numeric:
number = r'(' + number + ')|([A-Z_]+)'
match = re.match(
'^' + number + r'([,/]' + number +
r')+(;(?P<frame>[<A-Za-z_0-9]+))?$',
p)
if match is None:
raise ValueError(f"'{p}' is not a valid range list")
frame = match.groupdict()['frame']
if frames is not None and frame is not None and frame not in frames:
raise ValueError(
f"'{frame}' is not a valid frame of reference")
return p, p.count(',') + p.count(';') + 1
try:
float(p)
return str(p), 1
except TypeError:
raise ValueError(f"'{p}' is not a valid range list")
def version_compare(a, b):
"""
Compare two VOTable version identifiers.
"""
def version_to_tuple(v):
if v[0].lower() == 'v':
v = v[1:]
return Version(v)
av = version_to_tuple(a)
bv = version_to_tuple(b)
# Can't use cmp because it was removed from Python 3.x
return (av > bv) - (av < bv)
|
fc0666f6f0fefe59292ce20f7357730eebcf856c023da17f4208242a73bc0c96 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import argparse
import glob
import logging
import os
import sys
from astropy.io import fits
from astropy.io.fits.util import fill
from astropy import __version__
log = logging.getLogger('fitsdiff')
DESCRIPTION = """
Compare two FITS image files and report the differences in header keywords and
data.
fitsdiff [options] filename1 filename2
where filename1 filename2 are the two files to be compared. They may also be
wild cards, in such cases, they must be enclosed by double or single quotes, or
they may be directory names. If both are directory names, all files in each of
the directories will be included; if only one is a directory name, then the
directory name will be prefixed to the file name(s) specified by the other
argument. for example::
fitsdiff "*.fits" "/machine/data1"
will compare all FITS files in the current directory to the corresponding files
in the directory /machine/data1.
This script is part of the Astropy package. See
https://docs.astropy.org/en/latest/io/fits/usage/scripts.html#fitsdiff
for further documentation.
""".strip()
EPILOG = fill("""
If the two files are identical within the specified conditions, it will report
"No difference is found." If the value(s) of -c and -k takes the form
'@filename', list is in the text file 'filename', and each line in that text
file contains one keyword.
Example
-------
fitsdiff -k filename,filtnam1 -n 5 -r 1.e-6 test1.fits test2
This command will compare files test1.fits and test2.fits, report maximum of 5
different pixels values per extension, only report data values larger than
1.e-6 relative to each other, and will neglect the different values of keywords
FILENAME and FILTNAM1 (or their very existence).
fitsdiff command-line arguments can also be set using the environment variable
FITSDIFF_SETTINGS. If the FITSDIFF_SETTINGS environment variable is present,
each argument present will override the corresponding argument on the
command-line unless the --exact option is specified. The FITSDIFF_SETTINGS
environment variable exists to make it easier to change the
behavior of fitsdiff on a global level, such as in a set of regression tests.
""".strip(), width=80)
class StoreListAction(argparse.Action):
def __init__(self, option_strings, dest, nargs=None, **kwargs):
if nargs is not None:
raise ValueError("nargs not allowed")
super().__init__(option_strings, dest, nargs, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, [])
# Accept either a comma-separated list or a filename (starting with @)
# containing a value on each line
if values and values[0] == '@':
value = values[1:]
if not os.path.exists(value):
log.warning(f'{self.dest} argument {value} does not exist')
return
try:
values = [v.strip() for v in open(value).readlines()]
setattr(namespace, self.dest, values)
except OSError as exc:
log.warning('reading {} for {} failed: {}; ignoring this '
'argument'.format(value, self.dest, exc))
del exc
else:
setattr(namespace, self.dest,
[v.strip() for v in values.split(',')])
def handle_options(argv=None):
parser = argparse.ArgumentParser(
description=DESCRIPTION, epilog=EPILOG,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
'--version', action='version',
version=f'%(prog)s {__version__}')
parser.add_argument(
'fits_files', metavar='file', nargs='+',
help='.fits files to process.')
parser.add_argument(
'-q', '--quiet', action='store_true',
help='Produce no output and just return a status code.')
parser.add_argument(
'-n', '--num-diffs', type=int, default=10, dest='numdiffs',
metavar='INTEGER',
help='Max number of data differences (image pixel or table element) '
'to report per extension (default %(default)s).')
parser.add_argument(
'-r', '--rtol', '--relative-tolerance', type=float, default=None,
dest='rtol', metavar='NUMBER',
help='The relative tolerance for comparison of two numbers, '
'specifically two floating point numbers. This applies to data '
'in both images and tables, and to floating point keyword values '
'in headers (default %(default)s).')
parser.add_argument(
'-a', '--atol', '--absolute-tolerance', type=float, default=None,
dest='atol', metavar='NUMBER',
help='The absolute tolerance for comparison of two numbers, '
'specifically two floating point numbers. This applies to data '
'in both images and tables, and to floating point keyword values '
'in headers (default %(default)s).')
parser.add_argument(
'-b', '--no-ignore-blanks', action='store_false',
dest='ignore_blanks', default=True,
help="Don't ignore trailing blanks (whitespace) in string values. "
"Otherwise trailing blanks both in header keywords/values and in "
"table column values) are not treated as significant i.e., "
"without this option 'ABCDEF ' and 'ABCDEF' are considered "
"equivalent. ")
parser.add_argument(
'--no-ignore-blank-cards', action='store_false',
dest='ignore_blank_cards', default=True,
help="Don't ignore entirely blank cards in headers. Normally fitsdiff "
"does not consider blank cards when comparing headers, but this "
"will ensure that even blank cards match up. ")
parser.add_argument(
'--exact', action='store_true',
dest='exact_comparisons', default=False,
help="Report ALL differences, "
"overriding command-line options and FITSDIFF_SETTINGS. ")
parser.add_argument(
'-o', '--output-file', metavar='FILE',
help='Output results to this file; otherwise results are printed to '
'stdout.')
parser.add_argument(
'-u', '--ignore-hdus', action=StoreListAction,
default=[], dest='ignore_hdus',
metavar='HDU_NAMES',
help='Comma-separated list of HDU names not to be compared. HDU '
'names may contain wildcard patterns.')
group = parser.add_argument_group('Header Comparison Options')
group.add_argument(
'-k', '--ignore-keywords', action=StoreListAction,
default=[], dest='ignore_keywords',
metavar='KEYWORDS',
help='Comma-separated list of keywords not to be compared. Keywords '
'may contain wildcard patterns. To exclude all keywords, use '
'"*"; make sure to have double or single quotes around the '
'asterisk on the command-line.')
group.add_argument(
'-c', '--ignore-comments', action=StoreListAction,
default=[], dest='ignore_comments',
metavar='COMMENTS',
help='Comma-separated list of keywords whose comments will not be '
'compared. Wildcards may be used as with --ignore-keywords.')
group = parser.add_argument_group('Table Comparison Options')
group.add_argument(
'-f', '--ignore-fields', action=StoreListAction,
default=[], dest='ignore_fields',
metavar='COLUMNS',
help='Comma-separated list of fields (i.e. columns) not to be '
'compared. All columns may be excluded using "*" as with '
'--ignore-keywords.')
options = parser.parse_args(argv)
# Determine which filenames to compare
if len(options.fits_files) != 2:
parser.error('\nfitsdiff requires two arguments; '
'see `fitsdiff --help` for more details.')
return options
def setup_logging(outfile=None):
log.setLevel(logging.INFO)
error_handler = logging.StreamHandler(sys.stderr)
error_handler.setFormatter(logging.Formatter('%(levelname)s: %(message)s'))
error_handler.setLevel(logging.WARNING)
log.addHandler(error_handler)
if outfile is not None:
output_handler = logging.FileHandler(outfile)
else:
output_handler = logging.StreamHandler()
class LevelFilter(logging.Filter):
"""Log only messages matching the specified level."""
def __init__(self, name='', level=logging.NOTSET):
logging.Filter.__init__(self, name)
self.level = level
def filter(self, rec):
return rec.levelno == self.level
# File output logs all messages, but stdout logs only INFO messages
# (since errors are already logged to stderr)
output_handler.addFilter(LevelFilter(level=logging.INFO))
output_handler.setFormatter(logging.Formatter('%(message)s'))
log.addHandler(output_handler)
def match_files(paths):
if os.path.isfile(paths[0]) and os.path.isfile(paths[1]):
# shortcut if both paths are files
return [paths]
dirnames = [None, None]
filelists = [None, None]
for i, path in enumerate(paths):
if glob.has_magic(path):
files = [os.path.split(f) for f in glob.glob(path)]
if not files:
log.error('Wildcard pattern %r did not match any files.', path)
sys.exit(2)
dirs, files = list(zip(*files))
if len(set(dirs)) > 1:
log.error('Wildcard pattern %r should match only one '
'directory.', path)
sys.exit(2)
dirnames[i] = set(dirs).pop()
filelists[i] = sorted(files)
elif os.path.isdir(path):
dirnames[i] = path
filelists[i] = [f for f in sorted(os.listdir(path))
if os.path.isfile(os.path.join(path, f))]
elif os.path.isfile(path):
dirnames[i] = os.path.dirname(path)
filelists[i] = [os.path.basename(path)]
else:
log.error(
'%r is not an existing file, directory, or wildcard '
'pattern; see `fitsdiff --help` for more usage help.', path)
sys.exit(2)
dirnames[i] = os.path.abspath(dirnames[i])
filematch = set(filelists[0]) & set(filelists[1])
for a, b in [(0, 1), (1, 0)]:
if len(filelists[a]) > len(filematch) and not os.path.isdir(paths[a]):
for extra in sorted(set(filelists[a]) - filematch):
log.warning('%r has no match in %r', extra, dirnames[b])
return [(os.path.join(dirnames[0], f),
os.path.join(dirnames[1], f)) for f in filematch]
def main(args=None):
args = args or sys.argv[1:]
if 'FITSDIFF_SETTINGS' in os.environ:
args = os.environ['FITSDIFF_SETTINGS'].split() + args
opts = handle_options(args)
if opts.rtol is None:
opts.rtol = 0.0
if opts.atol is None:
opts.atol = 0.0
if opts.exact_comparisons:
# override the options so that each is the most restrictive
opts.ignore_keywords = []
opts.ignore_comments = []
opts.ignore_fields = []
opts.rtol = 0.0
opts.atol = 0.0
opts.ignore_blanks = False
opts.ignore_blank_cards = False
if not opts.quiet:
setup_logging(opts.output_file)
files = match_files(opts.fits_files)
close_file = False
if opts.quiet:
out_file = None
elif opts.output_file:
out_file = open(opts.output_file, 'w')
close_file = True
else:
out_file = sys.stdout
identical = []
try:
for a, b in files:
# TODO: pass in any additional arguments here too
diff = fits.diff.FITSDiff(
a, b,
ignore_hdus=opts.ignore_hdus,
ignore_keywords=opts.ignore_keywords,
ignore_comments=opts.ignore_comments,
ignore_fields=opts.ignore_fields,
numdiffs=opts.numdiffs,
rtol=opts.rtol,
atol=opts.atol,
ignore_blanks=opts.ignore_blanks,
ignore_blank_cards=opts.ignore_blank_cards)
diff.report(fileobj=out_file)
identical.append(diff.identical)
return int(not all(identical))
finally:
if close_file:
out_file.close()
# Close the file if used for the logging output, and remove handlers to
# avoid having them multiple times for unit tests.
for handler in log.handlers:
if isinstance(handler, logging.FileHandler):
handler.close()
log.removeHandler(handler)
|
3af9458b4dfbf4529ddfb0a19fd6bd4f27b96a43cbd9ae5b9b7be4c671269671 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import contextlib
import csv
import operator
import os
import re
import sys
import textwrap
import warnings
from contextlib import suppress
import numpy as np
from numpy import char as chararray
from .base import DELAYED, _ValidHDU, ExtensionHDU
# This module may have many dependencies on astropy.io.fits.column, but
# astropy.io.fits.column has fewer dependencies overall, so it's easier to
# keep table/column-related utilities in astropy.io.fits.column
from astropy.io.fits.column import (FITS2NUMPY, KEYWORD_NAMES, KEYWORD_TO_ATTRIBUTE,
ATTRIBUTE_TO_KEYWORD, TDEF_RE, Column, ColDefs,
_AsciiColDefs, _FormatP, _FormatQ, _makep,
_parse_tformat, _scalar_to_format, _convert_format,
_cmp_recformats)
from astropy.io.fits.fitsrec import FITS_rec, _get_recarray_field, _has_unicode_fields
from astropy.io.fits.header import Header, _pad_length
from astropy.io.fits.util import _is_int, _str_to_num
from astropy.utils import lazyproperty
from astropy.utils.exceptions import AstropyDeprecationWarning
class FITSTableDumpDialect(csv.excel):
"""
A CSV dialect for the Astropy format of ASCII dumps of FITS tables.
"""
delimiter = ' '
lineterminator = '\n'
quotechar = '"'
quoting = csv.QUOTE_ALL
skipinitialspace = True
class _TableLikeHDU(_ValidHDU):
"""
A class for HDUs that have table-like data. This is used for both
Binary/ASCII tables as well as Random Access Group HDUs (which are
otherwise too dissimilar for tables to use _TableBaseHDU directly).
"""
_data_type = FITS_rec
_columns_type = ColDefs
# TODO: Temporary flag representing whether uints are enabled; remove this
# after restructuring to support uints by default on a per-column basis
_uint = False
@classmethod
def match_header(cls, header):
"""
This is an abstract HDU type for HDUs that contain table-like data.
This is even more abstract than _TableBaseHDU which is specifically for
the standard ASCII and Binary Table types.
"""
raise NotImplementedError
@classmethod
def from_columns(cls, columns, header=None, nrows=0, fill=False,
character_as_bytes=False, **kwargs):
"""
Given either a `ColDefs` object, a sequence of `Column` objects,
or another table HDU or table data (a `FITS_rec` or multi-field
`numpy.ndarray` or `numpy.recarray` object, return a new table HDU of
the class this method was called on using the column definition from
the input.
See also `FITS_rec.from_columns`.
Parameters
----------
columns : sequence of `Column`, `ColDefs` -like
The columns from which to create the table data, or an object with
a column-like structure from which a `ColDefs` can be instantiated.
This includes an existing `BinTableHDU` or `TableHDU`, or a
`numpy.recarray` to give some examples.
If these columns have data arrays attached that data may be used in
initializing the new table. Otherwise the input columns will be
used as a template for a new table with the requested number of
rows.
header : `Header`
An optional `Header` object to instantiate the new HDU yet. Header
keywords specifically related to defining the table structure (such
as the "TXXXn" keywords like TTYPEn) will be overridden by the
supplied column definitions, but all other informational and data
model-specific keywords are kept.
nrows : int
Number of rows in the new table. If the input columns have data
associated with them, the size of the largest input column is used.
Otherwise the default is 0.
fill : bool
If `True`, will fill all cells with zeros or blanks. If `False`,
copy the data from input, undefined cells will still be filled with
zeros/blanks.
character_as_bytes : bool
Whether to return bytes for string columns when accessed from the
HDU. By default this is `False` and (unicode) strings are returned,
but for large tables this may use up a lot of memory.
Notes
-----
Any additional keyword arguments accepted by the HDU class's
``__init__`` may also be passed in as keyword arguments.
"""
coldefs = cls._columns_type(columns)
data = FITS_rec.from_columns(coldefs, nrows=nrows, fill=fill,
character_as_bytes=character_as_bytes)
hdu = cls(data=data, header=header, character_as_bytes=character_as_bytes, **kwargs)
coldefs._add_listener(hdu)
return hdu
@lazyproperty
def columns(self):
"""
The :class:`ColDefs` objects describing the columns in this table.
"""
# The base class doesn't make any assumptions about where the column
# definitions come from, so just return an empty ColDefs
return ColDefs([])
@property
def _nrows(self):
"""
table-like HDUs must provide an attribute that specifies the number of
rows in the HDU's table.
For now this is an internal-only attribute.
"""
raise NotImplementedError
def _get_tbdata(self):
"""Get the table data from an input HDU object."""
columns = self.columns
# TODO: Details related to variable length arrays need to be dealt with
# specifically in the BinTableHDU class, since they're a detail
# specific to FITS binary tables
if (any(type(r) in (_FormatP, _FormatQ)
for r in columns._recformats) and
self._data_size is not None and
self._data_size > self._theap):
# We have a heap; include it in the raw_data
raw_data = self._get_raw_data(self._data_size, np.uint8,
self._data_offset)
tbsize = self._header['NAXIS1'] * self._header['NAXIS2']
data = raw_data[:tbsize].view(dtype=columns.dtype,
type=np.rec.recarray)
else:
raw_data = self._get_raw_data(self._nrows, columns.dtype,
self._data_offset)
if raw_data is None:
# This can happen when a brand new table HDU is being created
# and no data has been assigned to the columns, which case just
# return an empty array
raw_data = np.array([], dtype=columns.dtype)
data = raw_data.view(np.rec.recarray)
self._init_tbdata(data)
data = data.view(self._data_type)
columns._add_listener(data)
return data
def _init_tbdata(self, data):
columns = self.columns
data.dtype = data.dtype.newbyteorder('>')
# hack to enable pseudo-uint support
data._uint = self._uint
# pass datLoc, for P format
data._heapoffset = self._theap
data._heapsize = self._header['PCOUNT']
tbsize = self._header['NAXIS1'] * self._header['NAXIS2']
data._gap = self._theap - tbsize
# pass the attributes
for idx, col in enumerate(columns):
# get the data for each column object from the rec.recarray
col.array = data.field(idx)
# delete the _arrays attribute so that it is recreated to point to the
# new data placed in the column object above
del columns._arrays
def _update_load_data(self):
"""Load the data if asked to."""
if not self._data_loaded:
self.data
def _update_column_added(self, columns, column):
"""
Update the data upon addition of a new column through the `ColDefs`
interface.
"""
# recreate data from the columns
self.data = FITS_rec.from_columns(
self.columns, nrows=self._nrows, fill=False,
character_as_bytes=self._character_as_bytes
)
def _update_column_removed(self, columns, col_idx):
"""
Update the data upon removal of a column through the `ColDefs`
interface.
"""
# recreate data from the columns
self.data = FITS_rec.from_columns(
self.columns, nrows=self._nrows, fill=False,
character_as_bytes=self._character_as_bytes
)
class _TableBaseHDU(ExtensionHDU, _TableLikeHDU):
"""
FITS table extension base HDU class.
Parameters
----------
data : array
Data to be used.
header : `Header` instance
Header to be used. If the ``data`` is also specified, header keywords
specifically related to defining the table structure (such as the
"TXXXn" keywords like TTYPEn) will be overridden by the supplied column
definitions, but all other informational and data model-specific
keywords are kept.
name : str
Name to be populated in ``EXTNAME`` keyword.
uint : bool, optional
Set to `True` if the table contains unsigned integer columns.
ver : int > 0 or None, optional
The ver of the HDU, will be the value of the keyword ``EXTVER``.
If not given or None, it defaults to the value of the ``EXTVER``
card of the ``header`` or 1.
(default: None)
character_as_bytes : bool
Whether to return bytes for string columns. By default this is `False`
and (unicode) strings are returned, but this does not respect memory
mapping and loads the whole column in memory when accessed.
"""
_manages_own_heap = False
"""
This flag implies that when writing VLA tables (P/Q format) the heap
pointers that go into P/Q table columns should not be reordered or
rearranged in any way by the default heap management code.
This is included primarily as an optimization for compressed image HDUs
which perform their own heap maintenance.
"""
def __init__(self, data=None, header=None, name=None, uint=False, ver=None,
character_as_bytes=False):
super().__init__(data=data, header=header, name=name, ver=ver)
self._uint = uint
self._character_as_bytes = character_as_bytes
if data is DELAYED:
# this should never happen
if header is None:
raise ValueError('No header to setup HDU.')
# if the file is read the first time, no need to copy, and keep it
# unchanged
else:
self._header = header
else:
# construct a list of cards of minimal header
cards = [
('XTENSION', self._extension, self._ext_comment),
('BITPIX', 8, 'array data type'),
('NAXIS', 2, 'number of array dimensions'),
('NAXIS1', 0, 'length of dimension 1'),
('NAXIS2', 0, 'length of dimension 2'),
('PCOUNT', 0, 'number of group parameters'),
('GCOUNT', 1, 'number of groups'),
('TFIELDS', 0, 'number of table fields')]
if header is not None:
# Make a "copy" (not just a view) of the input header, since it
# may get modified. the data is still a "view" (for now)
hcopy = header.copy(strip=True)
cards.extend(hcopy.cards)
self._header = Header(cards)
if isinstance(data, np.ndarray) and data.dtype.fields is not None:
# self._data_type is FITS_rec.
if isinstance(data, self._data_type):
self.data = data
else:
self.data = self._data_type.from_columns(data)
# TEMP: Special column keywords are normally overwritten by attributes
# from Column objects. In Astropy 3.0, several new keywords are now
# recognized as being special column keywords, but we don't
# automatically clear them yet, as we need to raise a deprecation
# warning for at least one major version.
if header is not None:
future_ignore = set()
for keyword in header.keys():
match = TDEF_RE.match(keyword)
try:
base_keyword = match.group('label')
except Exception:
continue # skip if there is no match
if base_keyword in {'TCTYP', 'TCUNI', 'TCRPX', 'TCRVL', 'TCDLT', 'TRPOS'}:
future_ignore.add(base_keyword)
if future_ignore:
keys = ', '.join(x + 'n' for x in sorted(future_ignore))
warnings.warn("The following keywords are now recognized as special "
"column-related attributes and should be set via the "
"Column objects: {}. In future, these values will be "
"dropped from manually specified headers automatically "
"and replaced with values generated based on the "
"Column objects.".format(keys), AstropyDeprecationWarning)
# TODO: Too much of the code in this class uses header keywords
# in making calculations related to the data size. This is
# unreliable, however, in cases when users mess with the header
# unintentionally--code that does this should be cleaned up.
self._header['NAXIS1'] = self.data._raw_itemsize
self._header['NAXIS2'] = self.data.shape[0]
self._header['TFIELDS'] = len(self.data._coldefs)
self.columns = self.data._coldefs
self.columns._add_listener(self.data)
self.update()
with suppress(TypeError, AttributeError):
# Make the ndarrays in the Column objects of the ColDefs
# object of the HDU reference the same ndarray as the HDU's
# FITS_rec object.
for idx, col in enumerate(self.columns):
col.array = self.data.field(idx)
# Delete the _arrays attribute so that it is recreated to
# point to the new data placed in the column objects above
del self.columns._arrays
elif data is None:
pass
else:
raise TypeError('Table data has incorrect type.')
# Ensure that the correct EXTNAME is set on the new header if one was
# created, or that it overrides the existing EXTNAME if different
if name:
self.name = name
if ver is not None:
self.ver = ver
@classmethod
def match_header(cls, header):
"""
This is an abstract type that implements the shared functionality of
the ASCII and Binary Table HDU types, which should be used instead of
this.
"""
raise NotImplementedError
@lazyproperty
def columns(self):
"""
The :class:`ColDefs` objects describing the columns in this table.
"""
if self._has_data and hasattr(self.data, '_coldefs'):
return self.data._coldefs
return self._columns_type(self)
@lazyproperty
def data(self):
data = self._get_tbdata()
data._coldefs = self.columns
data._character_as_bytes = self._character_as_bytes
# Columns should now just return a reference to the data._coldefs
del self.columns
return data
@data.setter
def data(self, data):
if 'data' in self.__dict__:
if self.__dict__['data'] is data:
return
else:
self._data_replaced = True
else:
self._data_replaced = True
self._modified = True
if data is None and self.columns:
# Create a new table with the same columns, but empty rows
formats = ','.join(self.columns._recformats)
data = np.rec.array(None, formats=formats,
names=self.columns.names,
shape=0)
if isinstance(data, np.ndarray) and data.dtype.fields is not None:
# Go ahead and always make a view, even if the data is already the
# correct class (self._data_type) so we can update things like the
# column defs, if necessary
data = data.view(self._data_type)
if not isinstance(data.columns, self._columns_type):
# This would be the place, if the input data was for an ASCII
# table and this is binary table, or vice versa, to convert the
# data to the appropriate format for the table type
new_columns = self._columns_type(data.columns)
data = FITS_rec.from_columns(new_columns)
if 'data' in self.__dict__:
self.columns._remove_listener(self.__dict__['data'])
self.__dict__['data'] = data
self.columns = self.data.columns
self.columns._add_listener(self.data)
self.update()
with suppress(TypeError, AttributeError):
# Make the ndarrays in the Column objects of the ColDefs
# object of the HDU reference the same ndarray as the HDU's
# FITS_rec object.
for idx, col in enumerate(self.columns):
col.array = self.data.field(idx)
# Delete the _arrays attribute so that it is recreated to
# point to the new data placed in the column objects above
del self.columns._arrays
elif data is None:
pass
else:
raise TypeError('Table data has incorrect type.')
# returning the data signals to lazyproperty that we've already handled
# setting self.__dict__['data']
return data
@property
def _nrows(self):
if not self._data_loaded:
return self._header.get('NAXIS2', 0)
else:
return len(self.data)
@lazyproperty
def _theap(self):
size = self._header['NAXIS1'] * self._header['NAXIS2']
return self._header.get('THEAP', size)
# TODO: Need to either rename this to update_header, for symmetry with the
# Image HDUs, or just at some point deprecate it and remove it altogether,
# since header updates should occur automatically when necessary...
def update(self):
"""
Update header keywords to reflect recent changes of columns.
"""
self._header.set('NAXIS1', self.data._raw_itemsize, after='NAXIS')
self._header.set('NAXIS2', self.data.shape[0], after='NAXIS1')
self._header.set('TFIELDS', len(self.columns), after='GCOUNT')
self._clear_table_keywords()
self._populate_table_keywords()
def copy(self):
"""
Make a copy of the table HDU, both header and data are copied.
"""
# touch the data, so it's defined (in the case of reading from a
# FITS file)
return self.__class__(data=self.data.copy(),
header=self._header.copy())
def _prewriteto(self, checksum=False, inplace=False):
if self._has_data:
self.data._scale_back(
update_heap_pointers=not self._manages_own_heap)
# check TFIELDS and NAXIS2
self._header['TFIELDS'] = len(self.data._coldefs)
self._header['NAXIS2'] = self.data.shape[0]
# calculate PCOUNT, for variable length tables
tbsize = self._header['NAXIS1'] * self._header['NAXIS2']
heapstart = self._header.get('THEAP', tbsize)
self.data._gap = heapstart - tbsize
pcount = self.data._heapsize + self.data._gap
if pcount > 0:
self._header['PCOUNT'] = pcount
# update the other T****n keywords
self._populate_table_keywords()
# update TFORM for variable length columns
for idx in range(self.data._nfields):
format = self.data._coldefs._recformats[idx]
if isinstance(format, _FormatP):
_max = self.data.field(idx).max
# May be either _FormatP or _FormatQ
format_cls = format.__class__
format = format_cls(format.dtype, repeat=format.repeat,
max=_max)
self._header['TFORM' + str(idx + 1)] = format.tform
return super()._prewriteto(checksum, inplace)
def _verify(self, option='warn'):
"""
_TableBaseHDU verify method.
"""
errs = super()._verify(option=option)
if (len(self._header) > 1):
if not (isinstance(self._header[0], str) and
self._header[0].rstrip() == self._extension):
err_text = 'The XTENSION keyword must match the HDU type.'
fix_text = f'Converted the XTENSION keyword to {self._extension}.'
def fix(header=self._header):
header[0] = (self._extension, self._ext_comment)
errs.append(self.run_option(option, err_text=err_text,
fix_text=fix_text, fix=fix))
self.req_cards('NAXIS', None, lambda v: (v == 2), 2, option, errs)
self.req_cards('BITPIX', None, lambda v: (v == 8), 8, option, errs)
self.req_cards('TFIELDS', 7,
lambda v: (_is_int(v) and v >= 0 and v <= 999), 0,
option, errs)
tfields = self._header['TFIELDS']
for idx in range(tfields):
self.req_cards('TFORM' + str(idx + 1), None, None, None, option,
errs)
return errs
def _summary(self):
"""
Summarize the HDU: name, dimensions, and formats.
"""
class_name = self.__class__.__name__
# if data is touched, use data info.
if self._data_loaded:
if self.data is None:
nrows = 0
else:
nrows = len(self.data)
ncols = len(self.columns)
format = self.columns.formats
# if data is not touched yet, use header info.
else:
nrows = self._header['NAXIS2']
ncols = self._header['TFIELDS']
format = ', '.join([self._header['TFORM' + str(j + 1)]
for j in range(ncols)])
format = f'[{format}]'
dims = f"{nrows}R x {ncols}C"
ncards = len(self._header)
return (self.name, self.ver, class_name, ncards, dims, format)
def _update_column_removed(self, columns, idx):
super()._update_column_removed(columns, idx)
# Fix the header to reflect the column removal
self._clear_table_keywords(index=idx)
def _update_column_attribute_changed(self, column, col_idx, attr,
old_value, new_value):
"""
Update the header when one of the column objects is updated.
"""
# base_keyword is the keyword without the index such as TDIM
# while keyword is like TDIM1
base_keyword = ATTRIBUTE_TO_KEYWORD[attr]
keyword = base_keyword + str(col_idx + 1)
if keyword in self._header:
if new_value is None:
# If the new value is None, i.e. None was assigned to the
# column attribute, then treat this as equivalent to deleting
# that attribute
del self._header[keyword]
else:
self._header[keyword] = new_value
else:
keyword_idx = KEYWORD_NAMES.index(base_keyword)
# Determine the appropriate keyword to insert this one before/after
# if it did not already exist in the header
for before_keyword in reversed(KEYWORD_NAMES[:keyword_idx]):
before_keyword += str(col_idx + 1)
if before_keyword in self._header:
self._header.insert(before_keyword, (keyword, new_value),
after=True)
break
else:
for after_keyword in KEYWORD_NAMES[keyword_idx + 1:]:
after_keyword += str(col_idx + 1)
if after_keyword in self._header:
self._header.insert(after_keyword,
(keyword, new_value))
break
else:
# Just append
self._header[keyword] = new_value
def _clear_table_keywords(self, index=None):
"""
Wipe out any existing table definition keywords from the header.
If specified, only clear keywords for the given table index (shifting
up keywords for any other columns). The index is zero-based.
Otherwise keywords for all columns.
"""
# First collect all the table structure related keyword in the header
# into a single list so we can then sort them by index, which will be
# useful later for updating the header in a sensible order (since the
# header *might* not already be written in a reasonable order)
table_keywords = []
for idx, keyword in enumerate(self._header.keys()):
match = TDEF_RE.match(keyword)
try:
base_keyword = match.group('label')
except Exception:
continue # skip if there is no match
if base_keyword in KEYWORD_TO_ATTRIBUTE:
# TEMP: For Astropy 3.0 we don't clear away the following keywords
# as we are first raising a deprecation warning that these will be
# dropped automatically if they were specified in the header. We
# can remove this once we are happy to break backward-compatibility
if base_keyword in {'TCTYP', 'TCUNI', 'TCRPX', 'TCRVL', 'TCDLT', 'TRPOS'}:
continue
num = int(match.group('num')) - 1 # convert to zero-base
table_keywords.append((idx, match.group(0), base_keyword,
num))
# First delete
rev_sorted_idx_0 = sorted(table_keywords, key=operator.itemgetter(0),
reverse=True)
for idx, keyword, _, num in rev_sorted_idx_0:
if index is None or index == num:
del self._header[idx]
# Now shift up remaining column keywords if only one column was cleared
if index is not None:
sorted_idx_3 = sorted(table_keywords, key=operator.itemgetter(3))
for _, keyword, base_keyword, num in sorted_idx_3:
if num <= index:
continue
old_card = self._header.cards[keyword]
new_card = (base_keyword + str(num), old_card.value,
old_card.comment)
self._header.insert(keyword, new_card)
del self._header[keyword]
# Also decrement TFIELDS
if 'TFIELDS' in self._header:
self._header['TFIELDS'] -= 1
def _populate_table_keywords(self):
"""Populate the new table definition keywords from the header."""
for idx, column in enumerate(self.columns):
for keyword, attr in KEYWORD_TO_ATTRIBUTE.items():
val = getattr(column, attr)
if val is not None:
keyword = keyword + str(idx + 1)
self._header[keyword] = val
class TableHDU(_TableBaseHDU):
"""
FITS ASCII table extension HDU class.
Parameters
----------
data : array or `FITS_rec`
Data to be used.
header : `Header`
Header to be used.
name : str
Name to be populated in ``EXTNAME`` keyword.
ver : int > 0 or None, optional
The ver of the HDU, will be the value of the keyword ``EXTVER``.
If not given or None, it defaults to the value of the ``EXTVER``
card of the ``header`` or 1.
(default: None)
character_as_bytes : bool
Whether to return bytes for string columns. By default this is `False`
and (unicode) strings are returned, but this does not respect memory
mapping and loads the whole column in memory when accessed.
"""
_extension = 'TABLE'
_ext_comment = 'ASCII table extension'
_padding_byte = ' '
_columns_type = _AsciiColDefs
__format_RE = re.compile(
r'(?P<code>[ADEFIJ])(?P<width>\d+)(?:\.(?P<prec>\d+))?')
def __init__(self, data=None, header=None, name=None, ver=None, character_as_bytes=False):
super().__init__(data, header, name=name, ver=ver, character_as_bytes=character_as_bytes)
@classmethod
def match_header(cls, header):
card = header.cards[0]
xtension = card.value
if isinstance(xtension, str):
xtension = xtension.rstrip()
return card.keyword == 'XTENSION' and xtension == cls._extension
def _get_tbdata(self):
columns = self.columns
names = [n for idx, n in enumerate(columns.names)]
# determine if there are duplicate field names and if there
# are throw an exception
dup = np.rec.find_duplicate(names)
if dup:
raise ValueError(f"Duplicate field names: {dup}")
# TODO: Determine if this extra logic is necessary--I feel like the
# _AsciiColDefs class should be responsible for telling the table what
# its dtype should be...
itemsize = columns.spans[-1] + columns.starts[-1] - 1
dtype = {}
for idx in range(len(columns)):
data_type = 'S' + str(columns.spans[idx])
if idx == len(columns) - 1:
# The last column is padded out to the value of NAXIS1
if self._header['NAXIS1'] > itemsize:
data_type = 'S' + str(columns.spans[idx] +
self._header['NAXIS1'] - itemsize)
dtype[columns.names[idx]] = (data_type, columns.starts[idx] - 1)
raw_data = self._get_raw_data(self._nrows, dtype, self._data_offset)
data = raw_data.view(np.rec.recarray)
self._init_tbdata(data)
return data.view(self._data_type)
def _calculate_datasum(self):
"""
Calculate the value for the ``DATASUM`` card in the HDU.
"""
if self._has_data:
# We have the data to be used.
# We need to pad the data to a block length before calculating
# the datasum.
bytes_array = self.data.view(type=np.ndarray, dtype=np.ubyte)
padding = np.frombuffer(_pad_length(self.size) * b' ',
dtype=np.ubyte)
d = np.append(bytes_array, padding)
cs = self._compute_checksum(d)
return cs
else:
# This is the case where the data has not been read from the file
# yet. We can handle that in a generic manner so we do it in the
# base class. The other possibility is that there is no data at
# all. This can also be handled in a generic manner.
return super()._calculate_datasum()
def _verify(self, option='warn'):
"""
`TableHDU` verify method.
"""
errs = super()._verify(option=option)
self.req_cards('PCOUNT', None, lambda v: (v == 0), 0, option, errs)
tfields = self._header['TFIELDS']
for idx in range(tfields):
self.req_cards('TBCOL' + str(idx + 1), None, _is_int, None, option,
errs)
return errs
class BinTableHDU(_TableBaseHDU):
"""
Binary table HDU class.
Parameters
----------
data : array, `FITS_rec`, or `~astropy.table.Table`
Data to be used.
header : `Header`
Header to be used.
name : str
Name to be populated in ``EXTNAME`` keyword.
uint : bool, optional
Set to `True` if the table contains unsigned integer columns.
ver : int > 0 or None, optional
The ver of the HDU, will be the value of the keyword ``EXTVER``.
If not given or None, it defaults to the value of the ``EXTVER``
card of the ``header`` or 1.
(default: None)
character_as_bytes : bool
Whether to return bytes for string columns. By default this is `False`
and (unicode) strings are returned, but this does not respect memory
mapping and loads the whole column in memory when accessed.
"""
_extension = 'BINTABLE'
_ext_comment = 'binary table extension'
def __init__(self, data=None, header=None, name=None, uint=False, ver=None,
character_as_bytes=False):
from astropy.table import Table
if isinstance(data, Table):
from astropy.io.fits.convenience import table_to_hdu
hdu = table_to_hdu(data)
if header is not None:
hdu.header.update(header)
data = hdu.data
header = hdu.header
super().__init__(data, header, name=name, uint=uint, ver=ver,
character_as_bytes=character_as_bytes)
@classmethod
def match_header(cls, header):
card = header.cards[0]
xtension = card.value
if isinstance(xtension, str):
xtension = xtension.rstrip()
return (card.keyword == 'XTENSION' and
xtension in (cls._extension, 'A3DTABLE'))
def _calculate_datasum_with_heap(self):
"""
Calculate the value for the ``DATASUM`` card given the input data
"""
with _binary_table_byte_swap(self.data) as data:
dout = data.view(type=np.ndarray, dtype=np.ubyte)
csum = self._compute_checksum(dout)
# Now add in the heap data to the checksum (we can skip any gap
# between the table and the heap since it's all zeros and doesn't
# contribute to the checksum
if data._get_raw_data() is None:
# This block is still needed because
# test_variable_length_table_data leads to ._get_raw_data
# returning None which means _get_heap_data doesn't work.
# Which happens when the data is loaded in memory rather than
# being unloaded on disk
for idx in range(data._nfields):
if isinstance(data.columns._recformats[idx], _FormatP):
for coldata in data.field(idx):
# coldata should already be byteswapped from the call
# to _binary_table_byte_swap
if not len(coldata):
continue
csum = self._compute_checksum(coldata, csum)
else:
csum = self._compute_checksum(data._get_heap_data(), csum)
return csum
def _calculate_datasum(self):
"""
Calculate the value for the ``DATASUM`` card in the HDU.
"""
if self._has_data:
# This method calculates the datasum while incorporating any
# heap data, which is obviously not handled from the base
# _calculate_datasum
return self._calculate_datasum_with_heap()
else:
# This is the case where the data has not been read from the file
# yet. We can handle that in a generic manner so we do it in the
# base class. The other possibility is that there is no data at
# all. This can also be handled in a generic manner.
return super()._calculate_datasum()
def _writedata_internal(self, fileobj):
size = 0
if self.data is None:
return size
with _binary_table_byte_swap(self.data) as data:
if _has_unicode_fields(data):
# If the raw data was a user-supplied recarray, we can't write
# unicode columns directly to the file, so we have to switch
# to a slower row-by-row write
self._writedata_by_row(fileobj)
else:
fileobj.writearray(data)
# write out the heap of variable length array columns this has
# to be done after the "regular" data is written (above)
# to avoid a bug in the lustre filesystem client, don't
# write 0-byte objects
if data._gap > 0:
fileobj.write((data._gap * '\0').encode('ascii'))
nbytes = data._gap
if not self._manages_own_heap:
# Write the heap data one column at a time, in the order
# that the data pointers appear in the column (regardless
# if that data pointer has a different, previous heap
# offset listed)
for idx in range(data._nfields):
if not isinstance(data.columns._recformats[idx],
_FormatP):
continue
field = self.data.field(idx)
for row in field:
if len(row) > 0:
nbytes += row.nbytes
fileobj.writearray(row)
else:
heap_data = data._get_heap_data()
if len(heap_data) > 0:
nbytes += len(heap_data)
fileobj.writearray(heap_data)
data._heapsize = nbytes - data._gap
size += nbytes
size += self.data.size * self.data._raw_itemsize
return size
def _writedata_by_row(self, fileobj):
fields = [self.data.field(idx)
for idx in range(len(self.data.columns))]
# Creating Record objects is expensive (as in
# `for row in self.data:` so instead we just iterate over the row
# indices and get one field at a time:
for idx in range(len(self.data)):
for field in fields:
item = field[idx]
field_width = None
if field.dtype.kind == 'U':
# Read the field *width* by reading past the field kind.
i = field.dtype.str.index(field.dtype.kind)
field_width = int(field.dtype.str[i+1:])
item = np.char.encode(item, 'ascii')
fileobj.writearray(item)
if field_width is not None:
j = item.dtype.str.index(item.dtype.kind)
item_length = int(item.dtype.str[j+1:])
# Fix padding problem (see #5296).
padding = '\x00'*(field_width - item_length)
fileobj.write(padding.encode('ascii'))
_tdump_file_format = textwrap.dedent("""
- **datafile:** Each line of the data file represents one row of table
data. The data is output one column at a time in column order. If
a column contains an array, each element of the column array in the
current row is output before moving on to the next column. Each row
ends with a new line.
Integer data is output right-justified in a 21-character field
followed by a blank. Floating point data is output right justified
using 'g' format in a 21-character field with 15 digits of
precision, followed by a blank. String data that does not contain
whitespace is output left-justified in a field whose width matches
the width specified in the ``TFORM`` header parameter for the
column, followed by a blank. When the string data contains
whitespace characters, the string is enclosed in quotation marks
(``""``). For the last data element in a row, the trailing blank in
the field is replaced by a new line character.
For column data containing variable length arrays ('P' format), the
array data is preceded by the string ``'VLA_Length= '`` and the
integer length of the array for that row, left-justified in a
21-character field, followed by a blank.
.. note::
This format does *not* support variable length arrays using the
('Q' format) due to difficult to overcome ambiguities. What this
means is that this file format cannot support VLA columns in
tables stored in files that are over 2 GB in size.
For column data representing a bit field ('X' format), each bit
value in the field is output right-justified in a 21-character field
as 1 (for true) or 0 (for false).
- **cdfile:** Each line of the column definitions file provides the
definitions for one column in the table. The line is broken up into
8, sixteen-character fields. The first field provides the column
name (``TTYPEn``). The second field provides the column format
(``TFORMn``). The third field provides the display format
(``TDISPn``). The fourth field provides the physical units
(``TUNITn``). The fifth field provides the dimensions for a
multidimensional array (``TDIMn``). The sixth field provides the
value that signifies an undefined value (``TNULLn``). The seventh
field provides the scale factor (``TSCALn``). The eighth field
provides the offset value (``TZEROn``). A field value of ``""`` is
used to represent the case where no value is provided.
- **hfile:** Each line of the header parameters file provides the
definition of a single HDU header card as represented by the card
image.
""")
def dump(self, datafile=None, cdfile=None, hfile=None, overwrite=False):
"""
Dump the table HDU to a file in ASCII format. The table may be dumped
in three separate files, one containing column definitions, one
containing header parameters, and one for table data.
Parameters
----------
datafile : path-like or file-like, optional
Output data file. The default is the root name of the
fits file associated with this HDU appended with the
extension ``.txt``.
cdfile : path-like or file-like, optional
Output column definitions file. The default is `None`, no
column definitions output is produced.
hfile : path-like or file-like, optional
Output header parameters file. The default is `None`,
no header parameters output is produced.
overwrite : bool, optional
If ``True``, overwrite the output file if it exists. Raises an
``OSError`` if ``False`` and the output file exists. Default is
``False``.
Notes
-----
The primary use for the `dump` method is to allow viewing and editing
the table data and parameters in a standard text editor.
The `load` method can be used to create a new table from the three
plain text (ASCII) files.
"""
# check if the output files already exist
exist = []
files = [datafile, cdfile, hfile]
for f in files:
if isinstance(f, str):
if os.path.exists(f) and os.path.getsize(f) != 0:
if overwrite:
os.remove(f)
else:
exist.append(f)
if exist:
raise OSError(' '.join([f"File '{f}' already exists."
for f in exist])+" If you mean to "
"replace the file(s) "
"then use the argument "
"'overwrite=True'.")
# Process the data
self._dump_data(datafile)
# Process the column definitions
if cdfile:
self._dump_coldefs(cdfile)
# Process the header parameters
if hfile:
self._header.tofile(hfile, sep='\n', endcard=False, padding=False)
if isinstance(dump.__doc__, str):
dump.__doc__ += _tdump_file_format.replace('\n', '\n ')
def load(cls, datafile, cdfile=None, hfile=None, replace=False,
header=None):
"""
Create a table from the input ASCII files. The input is from up to
three separate files, one containing column definitions, one containing
header parameters, and one containing column data.
The column definition and header parameters files are not required.
When absent the column definitions and/or header parameters are taken
from the header object given in the header argument; otherwise sensible
defaults are inferred (though this mode is not recommended).
Parameters
----------
datafile : path-like or file-like
Input data file containing the table data in ASCII format.
cdfile : path-like or file-like, optional
Input column definition file containing the names,
formats, display formats, physical units, multidimensional
array dimensions, undefined values, scale factors, and
offsets associated with the columns in the table. If
`None`, the column definitions are taken from the current
values in this object.
hfile : path-like or file-like, optional
Input parameter definition file containing the header
parameter definitions to be associated with the table. If
`None`, the header parameter definitions are taken from
the current values in this objects header.
replace : bool, optional
When `True`, indicates that the entire header should be
replaced with the contents of the ASCII file instead of
just updating the current header.
header : `~astropy.io.fits.Header`, optional
When the cdfile and hfile are missing, use this Header object in
the creation of the new table and HDU. Otherwise this Header
supersedes the keywords from hfile, which is only used to update
values not present in this Header, unless ``replace=True`` in which
this Header's values are completely replaced with the values from
hfile.
Notes
-----
The primary use for the `load` method is to allow the input of ASCII
data that was edited in a standard text editor of the table data and
parameters. The `dump` method can be used to create the initial ASCII
files.
"""
# Process the parameter file
if header is None:
header = Header()
if hfile:
if replace:
header = Header.fromtextfile(hfile)
else:
header.extend(Header.fromtextfile(hfile), update=True,
update_first=True)
coldefs = None
# Process the column definitions file
if cdfile:
coldefs = cls._load_coldefs(cdfile)
# Process the data file
data = cls._load_data(datafile, coldefs)
if coldefs is None:
coldefs = ColDefs(data)
# Create a new HDU using the supplied header and data
hdu = cls(data=data, header=header)
hdu.columns = coldefs
return hdu
if isinstance(load.__doc__, str):
load.__doc__ += _tdump_file_format.replace('\n', '\n ')
load = classmethod(load)
# Have to create a classmethod from this here instead of as a decorator;
# otherwise we can't update __doc__
def _dump_data(self, fileobj):
"""
Write the table data in the ASCII format read by BinTableHDU.load()
to fileobj.
"""
if not fileobj and self._file:
root = os.path.splitext(self._file.name)[0]
fileobj = root + '.txt'
close_file = False
if isinstance(fileobj, str):
fileobj = open(fileobj, 'w')
close_file = True
linewriter = csv.writer(fileobj, dialect=FITSTableDumpDialect)
# Process each row of the table and output one row at a time
def format_value(val, format):
if format[0] == 'S':
itemsize = int(format[1:])
return '{:{size}}'.format(val, size=itemsize)
elif format in np.typecodes['AllInteger']:
# output integer
return f'{val:21d}'
elif format in np.typecodes['Complex']:
return f'{val.real:21.15g}+{val.imag:.15g}j'
elif format in np.typecodes['Float']:
# output floating point
return f'{val:#21.15g}'
for row in self.data:
line = [] # the line for this row of the table
# Process each column of the row.
for column in self.columns:
# format of data in a variable length array
# where None means it is not a VLA:
vla_format = None
format = _convert_format(column.format)
if isinstance(format, _FormatP):
# P format means this is a variable length array so output
# the length of the array for this row and set the format
# for the VLA data
line.append('VLA_Length=')
line.append(f'{len(row[column.name]):21d}')
_, dtype, option = _parse_tformat(column.format)
vla_format = FITS2NUMPY[option[0]][0]
if vla_format:
# Output the data for each element in the array
for val in row[column.name].flat:
line.append(format_value(val, vla_format))
else:
# The column data is a single element
dtype = self.data.dtype.fields[column.name][0]
array_format = dtype.char
if array_format == 'V':
array_format = dtype.base.char
if array_format == 'S':
array_format += str(dtype.itemsize)
if dtype.char == 'V':
for value in row[column.name].flat:
line.append(format_value(value, array_format))
else:
line.append(format_value(row[column.name],
array_format))
linewriter.writerow(line)
if close_file:
fileobj.close()
def _dump_coldefs(self, fileobj):
"""
Write the column definition parameters in the ASCII format read by
BinTableHDU.load() to fileobj.
"""
close_file = False
if isinstance(fileobj, str):
fileobj = open(fileobj, 'w')
close_file = True
# Process each column of the table and output the result to the
# file one at a time
for column in self.columns:
line = [column.name, column.format]
attrs = ['disp', 'unit', 'dim', 'null', 'bscale', 'bzero']
line += ['{!s:16s}'.format(value if value else '""')
for value in (getattr(column, attr) for attr in attrs)]
fileobj.write(' '.join(line))
fileobj.write('\n')
if close_file:
fileobj.close()
@classmethod
def _load_data(cls, fileobj, coldefs=None):
"""
Read the table data from the ASCII file output by BinTableHDU.dump().
"""
close_file = False
if isinstance(fileobj, str):
fileobj = open(fileobj)
close_file = True
initialpos = fileobj.tell() # We'll be returning here later
linereader = csv.reader(fileobj, dialect=FITSTableDumpDialect)
# First we need to do some preprocessing on the file to find out how
# much memory we'll need to reserve for the table. This is necessary
# even if we already have the coldefs in order to determine how many
# rows to reserve memory for
vla_lengths = []
recformats = []
names = []
nrows = 0
if coldefs is not None:
recformats = coldefs._recformats
names = coldefs.names
def update_recformats(value, idx):
fitsformat = _scalar_to_format(value)
recformat = _convert_format(fitsformat)
if idx >= len(recformats):
recformats.append(recformat)
else:
if _cmp_recformats(recformats[idx], recformat) < 0:
recformats[idx] = recformat
# TODO: The handling of VLAs could probably be simplified a bit
for row in linereader:
nrows += 1
if coldefs is not None:
continue
col = 0
idx = 0
while idx < len(row):
if row[idx] == 'VLA_Length=':
if col < len(vla_lengths):
vla_length = vla_lengths[col]
else:
vla_length = int(row[idx + 1])
vla_lengths.append(vla_length)
idx += 2
while vla_length:
update_recformats(row[idx], col)
vla_length -= 1
idx += 1
col += 1
else:
if col >= len(vla_lengths):
vla_lengths.append(None)
update_recformats(row[idx], col)
col += 1
idx += 1
# Update the recformats for any VLAs
for idx, length in enumerate(vla_lengths):
if length is not None:
recformats[idx] = str(length) + recformats[idx]
dtype = np.rec.format_parser(recformats, names, None).dtype
# TODO: In the future maybe enable loading a bit at a time so that we
# can convert from this format to an actual FITS file on disk without
# needing enough physical memory to hold the entire thing at once
hdu = BinTableHDU.from_columns(np.recarray(shape=1, dtype=dtype),
nrows=nrows, fill=True)
# TODO: It seems to me a lot of this could/should be handled from
# within the FITS_rec class rather than here.
data = hdu.data
for idx, length in enumerate(vla_lengths):
if length is not None:
arr = data.columns._arrays[idx]
dt = recformats[idx][len(str(length)):]
# NOTE: FormatQ not supported here; it's hard to determine
# whether or not it will be necessary to use a wider descriptor
# type. The function documentation will have to serve as a
# warning that this is not supported.
recformats[idx] = _FormatP(dt, max=length)
data.columns._recformats[idx] = recformats[idx]
name = data.columns.names[idx]
data._cache_field(name, _makep(arr, arr, recformats[idx]))
def format_value(col, val):
# Special formatting for a couple particular data types
if recformats[col] == FITS2NUMPY['L']:
return bool(int(val))
elif recformats[col] == FITS2NUMPY['M']:
# For some reason, in arrays/fields where numpy expects a
# complex it's not happy to take a string representation
# (though it's happy to do that in other contexts), so we have
# to convert the string representation for it:
return complex(val)
else:
return val
# Jump back to the start of the data and create a new line reader
fileobj.seek(initialpos)
linereader = csv.reader(fileobj, dialect=FITSTableDumpDialect)
for row, line in enumerate(linereader):
col = 0
idx = 0
while idx < len(line):
if line[idx] == 'VLA_Length=':
vla_len = vla_lengths[col]
idx += 2
slice_ = slice(idx, idx + vla_len)
data[row][col][:] = line[idx:idx + vla_len]
idx += vla_len
elif dtype[col].shape:
# This is an array column
array_size = int(np.multiply.reduce(dtype[col].shape))
slice_ = slice(idx, idx + array_size)
idx += array_size
else:
slice_ = None
if slice_ is None:
# This is a scalar row element
data[row][col] = format_value(col, line[idx])
idx += 1
else:
data[row][col].flat[:] = [format_value(col, val)
for val in line[slice_]]
col += 1
if close_file:
fileobj.close()
return data
@classmethod
def _load_coldefs(cls, fileobj):
"""
Read the table column definitions from the ASCII file output by
BinTableHDU.dump().
"""
close_file = False
if isinstance(fileobj, str):
fileobj = open(fileobj)
close_file = True
columns = []
for line in fileobj:
words = line[:-1].split()
kwargs = {}
for key in ['name', 'format', 'disp', 'unit', 'dim']:
kwargs[key] = words.pop(0).replace('""', '')
for key in ['null', 'bscale', 'bzero']:
word = words.pop(0).replace('""', '')
if word:
word = _str_to_num(word)
kwargs[key] = word
columns.append(Column(**kwargs))
if close_file:
fileobj.close()
return ColDefs(columns)
@contextlib.contextmanager
def _binary_table_byte_swap(data):
"""
Ensures that all the data of a binary FITS table (represented as a FITS_rec
object) is in a big-endian byte order. Columns are swapped in-place one
at a time, and then returned to their previous byte order when this context
manager exits.
Because a new dtype is needed to represent the byte-swapped columns, the
new dtype is temporarily applied as well.
"""
orig_dtype = data.dtype
names = []
formats = []
offsets = []
to_swap = []
if sys.byteorder == 'little':
swap_types = ('<', '=')
else:
swap_types = ('<',)
for idx, name in enumerate(orig_dtype.names):
field = _get_recarray_field(data, idx)
field_dtype, field_offset = orig_dtype.fields[name]
names.append(name)
formats.append(field_dtype)
offsets.append(field_offset)
if isinstance(field, chararray.chararray):
continue
# only swap unswapped
# must use field_dtype.base here since for multi-element dtypes,
# the .str with be '|V<N>' where <N> is the total bytes per element
if field.itemsize > 1 and field_dtype.base.str[0] in swap_types:
to_swap.append(field)
# Override the dtype for this field in the new record dtype with
# the byteswapped version
formats[-1] = field_dtype.newbyteorder()
# deal with var length table
recformat = data.columns._recformats[idx]
if isinstance(recformat, _FormatP):
coldata = data.field(idx)
for c in coldata:
if (not isinstance(c, chararray.chararray) and
c.itemsize > 1 and c.dtype.str[0] in swap_types):
to_swap.append(c)
for arr in reversed(to_swap):
arr.byteswap(True)
data.dtype = np.dtype({'names': names,
'formats': formats,
'offsets': offsets})
yield data
for arr in to_swap:
arr.byteswap(True)
data.dtype = orig_dtype
|
f6d9c5c0434522525d593a9d707f3b01890a23601449e81e119a13a39e03537d | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import contextlib
import copy
import gc
import pickle
import re
import sys
import warnings
import pytest
import numpy as np
from numpy import char as chararray
try:
import objgraph
HAVE_OBJGRAPH = True
except ImportError:
HAVE_OBJGRAPH = False
from astropy.io import fits
from astropy.table import Table
from astropy.units import UnitsWarning, Unit, UnrecognizedUnit
from astropy.utils.compat import NUMPY_LT_1_22, NUMPY_LT_1_22_1
from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyUserWarning
from astropy.io.fits.column import ColumnAttribute, Delayed, NUMPY2FITS
from astropy.io.fits.util import decode_ascii
from astropy.io.fits.verify import VerifyError
from . import FitsTestCase
def comparefloats(a, b):
"""
Compare two float scalars or arrays and see if they are consistent
Consistency is determined ensuring the difference is less than the
expected amount. Return True if consistent, False if any differences.
"""
aa = a
bb = b
# compute expected precision
if aa.dtype.name == 'float32' or bb.dtype.name == 'float32':
precision = 0.000001
else:
precision = 0.0000000000000001
precision = 0.00001 # until precision problem is fixed in astropy.io.fits
diff = np.absolute(aa - bb)
mask0 = aa == 0
masknz = aa != 0.
if np.any(mask0):
if diff[mask0].max() != 0.:
return False
if np.any(masknz):
if (diff[masknz] / np.absolute(aa[masknz])).max() > precision:
return False
return True
def comparerecords(a, b):
"""
Compare two record arrays
Does this field by field, using approximation testing for float columns
(Complex not yet handled.)
Column names not compared, but column types and sizes are.
"""
nfieldsa = len(a.dtype.names)
nfieldsb = len(b.dtype.names)
if nfieldsa != nfieldsb:
print("number of fields don't match")
return False
for i in range(nfieldsa):
fielda = a.field(i)
fieldb = b.field(i)
if fielda.dtype.char == 'S':
fielda = decode_ascii(fielda)
if fieldb.dtype.char == 'S':
fieldb = decode_ascii(fieldb)
if (not isinstance(fielda, type(fieldb)) and not
isinstance(fieldb, type(fielda))):
print("type(fielda): ", type(fielda), " fielda: ", fielda)
print("type(fieldb): ", type(fieldb), " fieldb: ", fieldb)
print(f'field {i} type differs')
return False
if len(fielda) and isinstance(fielda[0], np.floating):
if not comparefloats(fielda, fieldb):
print("fielda: ", fielda)
print("fieldb: ", fieldb)
print(f'field {i} differs')
return False
elif (isinstance(fielda, fits.column._VLF) or
isinstance(fieldb, fits.column._VLF)):
for row in range(len(fielda)):
if np.any(fielda[row] != fieldb[row]):
print(f'fielda[{row}]: {fielda[row]}')
print(f'fieldb[{row}]: {fieldb[row]}')
print(f'field {i} differs in row {row}')
else:
if np.any(fielda != fieldb):
print("fielda: ", fielda)
print("fieldb: ", fieldb)
print(f'field {i} differs')
return False
return True
def _assert_attr_col(new_tbhdu, tbhdu):
"""
Helper function to compare column attributes
"""
# Double check that the headers are equivalent
assert tbhdu.columns.names == new_tbhdu.columns.names
attrs = [k for k, v in fits.Column.__dict__.items()
if isinstance(v, ColumnAttribute)]
for name in tbhdu.columns.names:
col = tbhdu.columns[name]
new_col = new_tbhdu.columns[name]
for attr in attrs:
if getattr(col, attr) and getattr(new_col, attr):
assert getattr(col, attr) == getattr(new_col, attr)
class TestTableFunctions(FitsTestCase):
def test_constructor_copies_header(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/153
Ensure that a header from one HDU is copied when used to initialize new
HDU.
This is like the test of the same name in test_image, but tests this
for tables as well.
"""
ifd = fits.HDUList([fits.PrimaryHDU(), fits.BinTableHDU()])
thdr = ifd[1].header
thdr['FILENAME'] = 'labq01i3q_rawtag.fits'
thdu = fits.BinTableHDU(header=thdr)
ofd = fits.HDUList(thdu)
ofd[0].header['FILENAME'] = 'labq01i3q_flt.fits'
# Original header should be unchanged
assert thdr['FILENAME'] == 'labq01i3q_rawtag.fits'
def test_open(self):
# open some existing FITS files:
tt = fits.open(self.data('tb.fits'))
fd = fits.open(self.data('test0.fits'))
# create some local arrays
a1 = chararray.array(['abc', 'def', 'xx'])
r1 = np.array([11., 12., 13.], dtype=np.float32)
# create a table from scratch, using a mixture of columns from existing
# tables and locally created arrays:
# first, create individual column definitions
c1 = fits.Column(name='abc', format='3A', array=a1)
c2 = fits.Column(name='def', format='E', array=r1)
a3 = np.array([3, 4, 5], dtype='i2')
c3 = fits.Column(name='xyz', format='I', array=a3)
a4 = np.array([1, 2, 3], dtype='i2')
c4 = fits.Column(name='t1', format='I', array=a4)
a5 = np.array([3 + 3j, 4 + 4j, 5 + 5j], dtype='c8')
c5 = fits.Column(name='t2', format='C', array=a5)
# Note that X format must be two-D array
a6 = np.array([[0], [1], [0]], dtype=np.uint8)
c6 = fits.Column(name='t3', format='X', array=a6)
a7 = np.array([101, 102, 103], dtype='i4')
c7 = fits.Column(name='t4', format='J', array=a7)
a8 = np.array([[1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1],
[0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0],
[1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1]], dtype=np.uint8)
c8 = fits.Column(name='t5', format='11X', array=a8)
# second, create a column-definitions object for all columns in a table
x = fits.ColDefs([c1, c2, c3, c4, c5, c6, c7, c8])
tbhdu = fits.BinTableHDU.from_columns(x)
# another way to create a table is by using existing table's
# information:
x2 = fits.ColDefs(tt[1])
t2 = fits.BinTableHDU.from_columns(x2, nrows=2)
ra = np.rec.array([
(1, 'abc', 3.7000002861022949, 0),
(2, 'xy ', 6.6999998092651367, 1)], names='c1, c2, c3, c4')
assert comparerecords(t2.data, ra)
# the table HDU's data is a subclass of a record array, so we can
# access one row like this:
assert tbhdu.data[1][0] == a1[1]
assert tbhdu.data[1][1] == r1[1]
assert tbhdu.data[1][2] == a3[1]
assert tbhdu.data[1][3] == a4[1]
assert tbhdu.data[1][4] == a5[1]
assert (tbhdu.data[1][5] == a6[1].view('bool')).all()
assert tbhdu.data[1][6] == a7[1]
assert (tbhdu.data[1][7] == a8[1]).all()
# and a column like this:
assert str(tbhdu.data.field('abc')) == "['abc' 'def' 'xx']"
# An alternative way to create a column-definitions object is from an
# existing table.
_ = fits.ColDefs(tt[1])
# now we write out the newly created table HDU to a FITS file:
fout = fits.HDUList(fits.PrimaryHDU())
fout.append(tbhdu)
fout.writeto(self.temp('tableout1.fits'), overwrite=True)
with fits.open(self.temp('tableout1.fits')) as f2:
temp = f2[1].data.field(7)
assert (temp[0] == [True, True, False, True, False, True,
True, True, False, False, True]).all()
# An alternative way to create an output table FITS file:
fout2 = fits.open(self.temp('tableout2.fits'), 'append')
fout2.append(fd[0])
fout2.append(tbhdu)
fout2.close()
tt.close()
fd.close()
def test_binary_table(self):
# binary table:
t = fits.open(self.data('tb.fits'))
assert t[1].header['tform1'] == '1J'
info = {'name': ['c1', 'c2', 'c3', 'c4'],
'format': ['1J', '3A', '1E', '1L'],
'unit': ['', '', '', ''],
'null': [-2147483647, '', '', ''],
'bscale': ['', '', 3, ''],
'bzero': ['', '', 0.4, ''],
'disp': ['I11', 'A3', 'G15.7', 'L6'],
'start': ['', '', '', ''],
'dim': ['', '', '', ''],
'coord_inc': ['', '', '', ''],
'coord_type': ['', '', '', ''],
'coord_unit': ['', '', '', ''],
'coord_ref_point': ['', '', '', ''],
'coord_ref_value': ['', '', '', ''],
'time_ref_pos': ['', '', '', '']}
assert t[1].columns.info(output=False) == info
ra = np.rec.array([
(1, 'abc', 3.7000002861022949, 0),
(2, 'xy ', 6.6999998092651367, 1)], names='c1, c2, c3, c4')
assert comparerecords(t[1].data, ra[:2])
# Change scaled field and scale back to the original array
t[1].data.field('c4')[0] = 1
t[1].data._scale_back()
assert str(np.rec.recarray.field(t[1].data, 'c4')) == '[84 84]'
# look at data column-wise
assert (t[1].data.field(0) == np.array([1, 2])).all()
# When there are scaled columns, the raw data are in data._parent
t.close()
def test_ascii_table(self):
# ASCII table
a = fits.open(self.data('ascii.fits'))
ra1 = np.rec.array([
(10.123000144958496, 37),
(5.1999998092651367, 23),
(15.609999656677246, 17),
(0.0, 0),
(345.0, 345)], names='c1, c2')
assert comparerecords(a[1].data, ra1)
# Test slicing
a2 = a[1].data[2:][2:]
ra2 = np.rec.array([(345.0, 345)], names='c1, c2')
assert comparerecords(a2, ra2)
assert (a2.field(1) == np.array([345])).all()
ra3 = np.rec.array([
(10.123000144958496, 37),
(15.609999656677246, 17),
(345.0, 345)
], names='c1, c2')
assert comparerecords(a[1].data[::2], ra3)
# Test Start Column
a1 = chararray.array(['abcd', 'def'])
r1 = np.array([11., 12.])
c1 = fits.Column(name='abc', format='A3', start=19, array=a1)
c2 = fits.Column(name='def', format='E', start=3, array=r1)
c3 = fits.Column(name='t1', format='I', array=[91, 92, 93])
hdu = fits.TableHDU.from_columns([c2, c1, c3])
assert (dict(hdu.data.dtype.fields) ==
{'abc': (np.dtype('|S3'), 18),
'def': (np.dtype('|S15'), 2),
't1': (np.dtype('|S10'), 21)})
hdu.writeto(self.temp('toto.fits'), overwrite=True)
hdul = fits.open(self.temp('toto.fits'))
assert comparerecords(hdu.data, hdul[1].data)
hdul.close()
# Test Scaling
r1 = np.array([11., 12.])
c2 = fits.Column(name='def', format='D', array=r1, bscale=2.3,
bzero=0.6)
hdu = fits.TableHDU.from_columns([c2])
hdu.writeto(self.temp('toto.fits'), overwrite=True)
with open(self.temp('toto.fits')) as f:
assert '4.95652173913043548D+00' in f.read()
with fits.open(self.temp('toto.fits')) as hdul:
assert comparerecords(hdu.data, hdul[1].data)
# Test Integer precision according to width
c1 = fits.Column(name='t2', format='I2', array=[91, 92, 93])
c2 = fits.Column(name='t4', format='I5', array=[91, 92, 93])
c3 = fits.Column(name='t8', format='I10', array=[91, 92, 93])
hdu = fits.TableHDU.from_columns([c1, c2, c3])
assert c1.array.dtype == np.int16
assert c2.array.dtype == np.int32
assert c3.array.dtype == np.int64
hdu.writeto(self.temp('toto.fits'), overwrite=True)
with fits.open(self.temp('toto.fits')) as hdul:
assert comparerecords(hdu.data, hdul[1].data)
a.close()
def test_endianness(self):
x = np.ndarray((1,), dtype=object)
channelsIn = np.array([3], dtype='uint8')
x[0] = channelsIn
col = fits.Column(name="Channels", format="PB()", array=x)
cols = fits.ColDefs([col])
tbhdu = fits.BinTableHDU.from_columns(cols)
tbhdu.name = "RFI"
tbhdu.writeto(self.temp('testendian.fits'), overwrite=True)
hduL = fits.open(self.temp('testendian.fits'))
rfiHDU = hduL['RFI']
data = rfiHDU.data
channelsOut = data.field('Channels')[0]
assert (channelsIn == channelsOut).all()
hduL.close()
def test_column_endianness(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/77
(Astropy doesn't preserve byte order of non-native order column arrays)
"""
a = [1., 2., 3., 4.]
a1 = np.array(a, dtype='<f8')
a2 = np.array(a, dtype='>f8')
col1 = fits.Column(name='a', format='D', array=a1)
col2 = fits.Column(name='b', format='D', array=a2)
cols = fits.ColDefs([col1, col2])
tbhdu = fits.BinTableHDU.from_columns(cols)
assert (tbhdu.data['a'] == a1).all()
assert (tbhdu.data['b'] == a2).all()
# Double check that the array is converted to the correct byte-order
# for FITS (big-endian).
tbhdu.writeto(self.temp('testendian.fits'), overwrite=True)
with fits.open(self.temp('testendian.fits')) as hdul:
assert (hdul[1].data['a'] == a2).all()
assert (hdul[1].data['b'] == a2).all()
def test_recarray_to_bintablehdu(self):
bright = np.rec.array(
[(1, 'Serius', -1.45, 'A1V'),
(2, 'Canopys', -0.73, 'F0Ib'),
(3, 'Rigil Kent', -0.1, 'G2V')],
formats='int16,a20,float32,a10',
names='order,name,mag,Sp')
hdu = fits.BinTableHDU(bright)
assert comparerecords(hdu.data, bright)
hdu.writeto(self.temp('toto.fits'), overwrite=True)
hdul = fits.open(self.temp('toto.fits'))
assert comparerecords(hdu.data, hdul[1].data)
assert comparerecords(bright, hdul[1].data)
hdul.close()
def test_numpy_ndarray_to_bintablehdu(self):
desc = np.dtype({'names': ['order', 'name', 'mag', 'Sp'],
'formats': ['int', 'S20', 'float32', 'S10']})
a = np.array([(1, 'Serius', -1.45, 'A1V'),
(2, 'Canopys', -0.73, 'F0Ib'),
(3, 'Rigil Kent', -0.1, 'G2V')], dtype=desc)
hdu = fits.BinTableHDU(a)
assert comparerecords(hdu.data, a.view(fits.FITS_rec))
hdu.writeto(self.temp('toto.fits'), overwrite=True)
hdul = fits.open(self.temp('toto.fits'))
assert comparerecords(hdu.data, hdul[1].data)
hdul.close()
def test_numpy_ndarray_to_bintablehdu_with_unicode(self):
desc = np.dtype({'names': ['order', 'name', 'mag', 'Sp'],
'formats': ['int', 'U20', 'float32', 'U10']})
a = np.array([(1, 'Serius', -1.45, 'A1V'),
(2, 'Canopys', -0.73, 'F0Ib'),
(3, 'Rigil Kent', -0.1, 'G2V')], dtype=desc)
hdu = fits.BinTableHDU(a)
assert comparerecords(hdu.data, a.view(fits.FITS_rec))
hdu.writeto(self.temp('toto.fits'), overwrite=True)
hdul = fits.open(self.temp('toto.fits'))
assert comparerecords(hdu.data, hdul[1].data)
hdul.close()
def test_new_table_from_recarray(self):
bright = np.rec.array([(1, 'Serius', -1.45, 'A1V'),
(2, 'Canopys', -0.73, 'F0Ib'),
(3, 'Rigil Kent', -0.1, 'G2V')],
formats='int16,a20,float64,a10',
names='order,name,mag,Sp')
hdu = fits.TableHDU.from_columns(bright, nrows=2)
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert (id(hdu.data._coldefs.columns[0].array) ==
id(hdu.data._coldefs._arrays[0]))
assert (id(hdu.data._coldefs.columns[0].array) ==
id(hdu.columns.columns[0].array))
assert (id(hdu.data._coldefs.columns[0].array) ==
id(hdu.columns._arrays[0]))
# Ensure I can change the value of one data element and it effects
# all of the others.
hdu.data[0][0] = 213
assert hdu.data[0][0] == 213
assert hdu.data._coldefs._arrays[0][0] == 213
assert hdu.data._coldefs.columns[0].array[0] == 213
assert hdu.columns._arrays[0][0] == 213
assert hdu.columns.columns[0].array[0] == 213
hdu.data._coldefs._arrays[0][0] = 100
assert hdu.data[0][0] == 100
assert hdu.data._coldefs._arrays[0][0] == 100
assert hdu.data._coldefs.columns[0].array[0] == 100
assert hdu.columns._arrays[0][0] == 100
assert hdu.columns.columns[0].array[0] == 100
hdu.data._coldefs.columns[0].array[0] = 500
assert hdu.data[0][0] == 500
assert hdu.data._coldefs._arrays[0][0] == 500
assert hdu.data._coldefs.columns[0].array[0] == 500
assert hdu.columns._arrays[0][0] == 500
assert hdu.columns.columns[0].array[0] == 500
hdu.columns._arrays[0][0] = 600
assert hdu.data[0][0] == 600
assert hdu.data._coldefs._arrays[0][0] == 600
assert hdu.data._coldefs.columns[0].array[0] == 600
assert hdu.columns._arrays[0][0] == 600
assert hdu.columns.columns[0].array[0] == 600
hdu.columns.columns[0].array[0] = 800
assert hdu.data[0][0] == 800
assert hdu.data._coldefs._arrays[0][0] == 800
assert hdu.data._coldefs.columns[0].array[0] == 800
assert hdu.columns._arrays[0][0] == 800
assert hdu.columns.columns[0].array[0] == 800
assert (hdu.data.field(0) ==
np.array([800, 2], dtype=np.int16)).all()
assert hdu.data[0][1] == 'Serius'
assert hdu.data[1][1] == 'Canopys'
assert (hdu.data.field(2) ==
np.array([-1.45, -0.73], dtype=np.float64)).all()
assert hdu.data[0][3] == 'A1V'
assert hdu.data[1][3] == 'F0Ib'
hdu.writeto(self.temp('toto.fits'), overwrite=True)
with fits.open(self.temp('toto.fits')) as hdul:
assert (hdul[1].data.field(0) ==
np.array([800, 2], dtype=np.int16)).all()
assert hdul[1].data[0][1] == 'Serius'
assert hdul[1].data[1][1] == 'Canopys'
assert (hdul[1].data.field(2) ==
np.array([-1.45, -0.73], dtype=np.float64)).all()
assert hdul[1].data[0][3] == 'A1V'
assert hdul[1].data[1][3] == 'F0Ib'
del hdul
hdu = fits.BinTableHDU.from_columns(bright, nrows=2)
tmp = np.rec.array([(1, 'Serius', -1.45, 'A1V'),
(2, 'Canopys', -0.73, 'F0Ib')],
formats='int16,a20,float64,a10',
names='order,name,mag,Sp')
assert comparerecords(hdu.data, tmp)
hdu.writeto(self.temp('toto.fits'), overwrite=True)
with fits.open(self.temp('toto.fits')) as hdul:
assert comparerecords(hdu.data, hdul[1].data)
def test_new_fitsrec(self):
"""
Tests creating a new FITS_rec object from a multi-field ndarray.
"""
with fits.open(self.data('tb.fits')) as h:
data = h[1].data
new_data = np.array([(3, 'qwe', 4.5, False)], dtype=data.dtype)
appended = np.append(data, new_data).view(fits.FITS_rec)
assert repr(appended).startswith('FITS_rec(')
# This test used to check the entire string representation of FITS_rec,
# but that has problems between different numpy versions. Instead just
# check that the FITS_rec was created, and we'll let subsequent tests
# worry about checking values and such
def test_appending_a_column(self):
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp('table1.fits'))
counts = np.array([412, 434, 408, 417])
names = np.array(['NGC5', 'NGC6', 'NGC7', 'NCG8'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[0, 1, 0, 0])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp('table2.fits'))
# Append the rows of table 2 after the rows of table 1
# The column definitions are assumed to be the same
# Open the two files we want to append
t1 = fits.open(self.temp('table1.fits'))
t2 = fits.open(self.temp('table2.fits'))
# Get the number of rows in the table from the first file
nrows1 = t1[1].data.shape[0]
# Get the total number of rows in the resulting appended table
nrows = t1[1].data.shape[0] + t2[1].data.shape[0]
assert (t1[1].columns._arrays[1] is t1[1].columns.columns[1].array)
# Create a new table that consists of the data from the first table
# but has enough space in the ndarray to hold the data from both tables
hdu = fits.BinTableHDU.from_columns(t1[1].columns, nrows=nrows)
# For each column in the tables append the data from table 2 after the
# data from table 1.
for i in range(len(t1[1].columns)):
hdu.data.field(i)[nrows1:] = t2[1].data.field(i)
hdu.writeto(self.temp('newtable.fits'))
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 4, (), '', ''),
(1, '', 1, 'BinTableHDU', 19, '8R x 5C', '[10A, J, 10A, 5E, L]',
'')]
assert fits.info(self.temp('newtable.fits'), output=False) == info
z = np.array([0., 0., 0., 0., 0.], dtype=np.float32)
array = np.rec.array(
[('NGC1', 312, '', z, True),
('NGC2', 334, '', z, False),
('NGC3', 308, '', z, True),
('NCG4', 317, '', z, True),
('NGC5', 412, '', z, False),
('NGC6', 434, '', z, True),
('NGC7', 408, '', z, False),
('NCG8', 417, '', z, False)],
formats='a10,u4,a10,5f4,l')
assert comparerecords(hdu.data, array)
# Verify that all of the references to the data point to the same
# numarray
hdu.data[0][1] = 300
assert hdu.data._coldefs._arrays[1][0] == 300
assert hdu.data._coldefs.columns[1].array[0] == 300
assert hdu.columns._arrays[1][0] == 300
assert hdu.columns.columns[1].array[0] == 300
assert hdu.data[0][1] == 300
hdu.data._coldefs._arrays[1][0] = 200
assert hdu.data._coldefs._arrays[1][0] == 200
assert hdu.data._coldefs.columns[1].array[0] == 200
assert hdu.columns._arrays[1][0] == 200
assert hdu.columns.columns[1].array[0] == 200
assert hdu.data[0][1] == 200
hdu.data._coldefs.columns[1].array[0] = 100
assert hdu.data._coldefs._arrays[1][0] == 100
assert hdu.data._coldefs.columns[1].array[0] == 100
assert hdu.columns._arrays[1][0] == 100
assert hdu.columns.columns[1].array[0] == 100
assert hdu.data[0][1] == 100
hdu.columns._arrays[1][0] = 90
assert hdu.data._coldefs._arrays[1][0] == 90
assert hdu.data._coldefs.columns[1].array[0] == 90
assert hdu.columns._arrays[1][0] == 90
assert hdu.columns.columns[1].array[0] == 90
assert hdu.data[0][1] == 90
hdu.columns.columns[1].array[0] = 80
assert hdu.data._coldefs._arrays[1][0] == 80
assert hdu.data._coldefs.columns[1].array[0] == 80
assert hdu.columns._arrays[1][0] == 80
assert hdu.columns.columns[1].array[0] == 80
assert hdu.data[0][1] == 80
# Same verification from the file
hdul = fits.open(self.temp('newtable.fits'))
hdu = hdul[1]
hdu.data[0][1] = 300
assert hdu.data._coldefs._arrays[1][0] == 300
assert hdu.data._coldefs.columns[1].array[0] == 300
assert hdu.columns._arrays[1][0] == 300
assert hdu.columns.columns[1].array[0] == 300
assert hdu.data[0][1] == 300
hdu.data._coldefs._arrays[1][0] = 200
assert hdu.data._coldefs._arrays[1][0] == 200
assert hdu.data._coldefs.columns[1].array[0] == 200
assert hdu.columns._arrays[1][0] == 200
assert hdu.columns.columns[1].array[0] == 200
assert hdu.data[0][1] == 200
hdu.data._coldefs.columns[1].array[0] = 100
assert hdu.data._coldefs._arrays[1][0] == 100
assert hdu.data._coldefs.columns[1].array[0] == 100
assert hdu.columns._arrays[1][0] == 100
assert hdu.columns.columns[1].array[0] == 100
assert hdu.data[0][1] == 100
hdu.columns._arrays[1][0] = 90
assert hdu.data._coldefs._arrays[1][0] == 90
assert hdu.data._coldefs.columns[1].array[0] == 90
assert hdu.columns._arrays[1][0] == 90
assert hdu.columns.columns[1].array[0] == 90
assert hdu.data[0][1] == 90
hdu.columns.columns[1].array[0] = 80
assert hdu.data._coldefs._arrays[1][0] == 80
assert hdu.data._coldefs.columns[1].array[0] == 80
assert hdu.columns._arrays[1][0] == 80
assert hdu.columns.columns[1].array[0] == 80
assert hdu.data[0][1] == 80
t1.close()
t2.close()
hdul.close()
def test_adding_a_column(self):
# Tests adding a column to a table.
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
assert tbhdu.columns.names == ['target', 'counts', 'notes', 'spectrum']
coldefs1 = coldefs + c5
tbhdu1 = fits.BinTableHDU.from_columns(coldefs1)
assert tbhdu1.columns.names == ['target', 'counts', 'notes',
'spectrum', 'flag']
z = np.array([0., 0., 0., 0., 0.], dtype=np.float32)
array = np.rec.array(
[('NGC1', 312, '', z, True),
('NGC2', 334, '', z, False),
('NGC3', 308, '', z, True),
('NCG4', 317, '', z, True)],
formats='a10,u4,a10,5f4,l')
assert comparerecords(tbhdu1.data, array)
def test_adding_a_column_inplace(self):
# Tests adding a column to a table.
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
assert tbhdu.columns.names == ['target', 'counts', 'notes', 'spectrum']
tbhdu.columns.add_col(c5)
assert tbhdu.columns.names == ['target', 'counts', 'notes',
'spectrum', 'flag']
z = np.array([0., 0., 0., 0., 0.], dtype=np.float32)
array = np.rec.array(
[('NGC1', 312, '', z, True),
('NGC2', 334, '', z, False),
('NGC3', 308, '', z, True),
('NCG4', 317, '', z, True)],
formats='a10,u4,a10,5f4,l')
assert comparerecords(tbhdu.data, array)
def test_adding_a_column_to_file(self):
hdul = fits.open(self.data('table.fits'))
tbhdu = hdul[1]
col = fits.Column(name='a', array=np.array([1, 2]), format='K')
tbhdu.columns.add_col(col)
assert tbhdu.columns.names == ['target', 'V_mag', 'a']
array = np.rec.array(
[('NGC1001', 11.1, 1),
('NGC1002', 12.3, 2),
('NGC1003', 15.2, 0)],
formats='a20,f4,i8')
assert comparerecords(tbhdu.data, array)
hdul.close()
def test_removing_a_column_inplace(self):
# Tests adding a column to a table.
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
assert tbhdu.columns.names == ['target', 'counts', 'notes',
'spectrum', 'flag']
tbhdu.columns.del_col('flag')
assert tbhdu.columns.names == ['target', 'counts', 'notes', 'spectrum']
z = np.array([0., 0., 0., 0., 0.], dtype=np.float32)
array = np.rec.array(
[('NGC1', 312, '', z),
('NGC2', 334, '', z),
('NGC3', 308, '', z),
('NCG4', 317, '', z)],
formats='a10,u4,a10,5f4')
assert comparerecords(tbhdu.data, array)
tbhdu.columns.del_col('counts')
tbhdu.columns.del_col('notes')
assert tbhdu.columns.names == ['target', 'spectrum']
array = np.rec.array(
[('NGC1', z),
('NGC2', z),
('NGC3', z),
('NCG4', z)],
formats='a10,5f4')
assert comparerecords(tbhdu.data, array)
def test_removing_a_column_from_file(self):
hdul = fits.open(self.data('table.fits'))
tbhdu = hdul[1]
tbhdu.columns.del_col('V_mag')
assert tbhdu.columns.names == ['target']
array = np.rec.array(
[('NGC1001', ),
('NGC1002', ),
('NGC1003', )],
formats='a20')
assert comparerecords(tbhdu.data, array)
hdul.close()
def test_merge_tables(self):
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp('table1.fits'))
counts = np.array([412, 434, 408, 417])
names = np.array(['NGC5', 'NGC6', 'NGC7', 'NCG8'])
c1 = fits.Column(name='target1', format='10A', array=names)
c2 = fits.Column(name='counts1', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes1', format='A10')
c4 = fits.Column(name='spectrum1', format='5E')
c5 = fits.Column(name='flag1', format='L', array=[0, 1, 0, 0])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp('table2.fits'))
# Merge the columns of table 2 after the columns of table 1
# The column names are assumed to be different
# Open the two files we want to append
t1 = fits.open(self.temp('table1.fits'))
t2 = fits.open(self.temp('table2.fits'))
hdu = fits.BinTableHDU.from_columns(t1[1].columns + t2[1].columns)
z = np.array([0., 0., 0., 0., 0.], dtype=np.float32)
array = np.rec.array(
[('NGC1', 312, '', z, True, 'NGC5', 412, '', z, False),
('NGC2', 334, '', z, False, 'NGC6', 434, '', z, True),
('NGC3', 308, '', z, True, 'NGC7', 408, '', z, False),
('NCG4', 317, '', z, True, 'NCG8', 417, '', z, False)],
formats='a10,u4,a10,5f4,l,a10,u4,a10,5f4,l')
assert comparerecords(hdu.data, array)
hdu.writeto(self.temp('newtable.fits'))
# Verify that all of the references to the data point to the same
# numarray
hdu.data[0][1] = 300
assert hdu.data._coldefs._arrays[1][0] == 300
assert hdu.data._coldefs.columns[1].array[0] == 300
assert hdu.columns._arrays[1][0] == 300
assert hdu.columns.columns[1].array[0] == 300
assert hdu.data[0][1] == 300
hdu.data._coldefs._arrays[1][0] = 200
assert hdu.data._coldefs._arrays[1][0] == 200
assert hdu.data._coldefs.columns[1].array[0] == 200
assert hdu.columns._arrays[1][0] == 200
assert hdu.columns.columns[1].array[0] == 200
assert hdu.data[0][1] == 200
hdu.data._coldefs.columns[1].array[0] = 100
assert hdu.data._coldefs._arrays[1][0] == 100
assert hdu.data._coldefs.columns[1].array[0] == 100
assert hdu.columns._arrays[1][0] == 100
assert hdu.columns.columns[1].array[0] == 100
assert hdu.data[0][1] == 100
hdu.columns._arrays[1][0] = 90
assert hdu.data._coldefs._arrays[1][0] == 90
assert hdu.data._coldefs.columns[1].array[0] == 90
assert hdu.columns._arrays[1][0] == 90
assert hdu.columns.columns[1].array[0] == 90
assert hdu.data[0][1] == 90
hdu.columns.columns[1].array[0] = 80
assert hdu.data._coldefs._arrays[1][0] == 80
assert hdu.data._coldefs.columns[1].array[0] == 80
assert hdu.columns._arrays[1][0] == 80
assert hdu.columns.columns[1].array[0] == 80
assert hdu.data[0][1] == 80
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 4, (), '', ''),
(1, '', 1, 'BinTableHDU', 30, '4R x 10C',
'[10A, J, 10A, 5E, L, 10A, J, 10A, 5E, L]', '')]
assert fits.info(self.temp('newtable.fits'), output=False) == info
hdul = fits.open(self.temp('newtable.fits'))
hdu = hdul[1]
assert (hdu.columns.names ==
['target', 'counts', 'notes', 'spectrum', 'flag', 'target1',
'counts1', 'notes1', 'spectrum1', 'flag1'])
z = np.array([0., 0., 0., 0., 0.], dtype=np.float32)
array = np.rec.array(
[('NGC1', 312, '', z, True, 'NGC5', 412, '', z, False),
('NGC2', 334, '', z, False, 'NGC6', 434, '', z, True),
('NGC3', 308, '', z, True, 'NGC7', 408, '', z, False),
('NCG4', 317, '', z, True, 'NCG8', 417, '', z, False)],
formats='a10,u4,a10,5f4,l,a10,u4,a10,5f4,l')
assert comparerecords(hdu.data, array)
# Same verification from the file
hdu.data[0][1] = 300
assert hdu.data._coldefs._arrays[1][0] == 300
assert hdu.data._coldefs.columns[1].array[0] == 300
assert hdu.columns._arrays[1][0] == 300
assert hdu.columns.columns[1].array[0] == 300
assert hdu.data[0][1] == 300
hdu.data._coldefs._arrays[1][0] = 200
assert hdu.data._coldefs._arrays[1][0] == 200
assert hdu.data._coldefs.columns[1].array[0] == 200
assert hdu.columns._arrays[1][0] == 200
assert hdu.columns.columns[1].array[0] == 200
assert hdu.data[0][1] == 200
hdu.data._coldefs.columns[1].array[0] = 100
assert hdu.data._coldefs._arrays[1][0] == 100
assert hdu.data._coldefs.columns[1].array[0] == 100
assert hdu.columns._arrays[1][0] == 100
assert hdu.columns.columns[1].array[0] == 100
assert hdu.data[0][1] == 100
hdu.columns._arrays[1][0] = 90
assert hdu.data._coldefs._arrays[1][0] == 90
assert hdu.data._coldefs.columns[1].array[0] == 90
assert hdu.columns._arrays[1][0] == 90
assert hdu.columns.columns[1].array[0] == 90
assert hdu.data[0][1] == 90
hdu.columns.columns[1].array[0] = 80
assert hdu.data._coldefs._arrays[1][0] == 80
assert hdu.data._coldefs.columns[1].array[0] == 80
assert hdu.columns._arrays[1][0] == 80
assert hdu.columns.columns[1].array[0] == 80
assert hdu.data[0][1] == 80
t1.close()
t2.close()
hdul.close()
def test_modify_column_attributes(self):
"""Regression test for https://github.com/astropy/astropy/issues/996
This just tests one particular use case, but it should apply pretty
well to other similar cases.
"""
NULLS = {'a': 2, 'b': 'b', 'c': 2.3}
data = np.array(list(zip([1, 2, 3, 4],
['a', 'b', 'c', 'd'],
[2.3, 4.5, 6.7, 8.9])),
dtype=[('a', int), ('b', 'S1'), ('c', float)])
b = fits.BinTableHDU(data=data)
for col in b.columns:
col.null = NULLS[col.name]
b.writeto(self.temp('test.fits'), overwrite=True)
with fits.open(self.temp('test.fits')) as hdul:
header = hdul[1].header
assert header['TNULL1'] == 2
assert header['TNULL2'] == 'b'
assert header['TNULL3'] == 2.3
def test_multidimension_table_from_numpy_rec_columns(self):
"""Regression test for https://github.com/astropy/astropy/issues/5280
and https://github.com/astropy/astropy/issues/5287
multidimentional tables can now be written with the correct TDIM.
Author: Stephen Bailey.
"""
dtype = [
('x', (str, 5)), # 1D column of 5-character strings
('y', (str, 3), (4,)), # 2D column; each row is four 3-char strings
]
data = np.zeros(2, dtype=dtype)
data['x'] = ['abcde', 'xyz']
data['y'][0] = ['A', 'BC', 'DEF', '123']
data['y'][1] = ['X', 'YZ', 'PQR', '999']
table = Table(data)
# Test convenience functions io.fits.writeto / getdata
fits.writeto(self.temp('test.fits'), data)
dx = fits.getdata(self.temp('test.fits'))
assert data['x'].dtype == dx['x'].dtype
assert data['y'].dtype == dx['y'].dtype
assert np.all(data['x'] == dx['x']), 'x: {} != {}'.format(data['x'], dx['x'])
assert np.all(data['y'] == dx['y']), 'y: {} != {}'.format(data['y'], dx['y'])
# Test fits.BinTableHDU(data) and avoid convenience functions
hdu0 = fits.PrimaryHDU()
hdu1 = fits.BinTableHDU(data)
hx = fits.HDUList([hdu0, hdu1])
hx.writeto(self.temp('test2.fits'))
fx = fits.open(self.temp('test2.fits'))
dx = fx[1].data
fx.close()
assert data['x'].dtype == dx['x'].dtype
assert data['y'].dtype == dx['y'].dtype
assert np.all(data['x'] == dx['x']), 'x: {} != {}'.format(data['x'], dx['x'])
assert np.all(data['y'] == dx['y']), 'y: {} != {}'.format(data['y'], dx['y'])
# Test Table write and read
table.write(self.temp('test3.fits'))
tx = Table.read(self.temp('test3.fits'), character_as_bytes=False)
assert table['x'].dtype == tx['x'].dtype
assert table['y'].dtype == tx['y'].dtype
assert np.all(table['x'] == tx['x']), 'x: {} != {}'.format(table['x'], tx['x'])
assert np.all(table['y'] == tx['y']), 'y: {} != {}'.format(table['y'], tx['y'])
def test_mask_array(self):
t = fits.open(self.data('table.fits'))
tbdata = t[1].data
mask = tbdata.field('V_mag') > 12
newtbdata = tbdata[mask]
hdu = fits.BinTableHDU(newtbdata)
hdu.writeto(self.temp('newtable.fits'))
hdul = fits.open(self.temp('newtable.fits'))
# match to a regex rather than a specific string.
expect = r"\[\('NGC1002',\s+12.3[0-9]*\) \(\'NGC1003\',\s+15.[0-9]+\)\]"
assert re.match(expect, str(hdu.data))
assert re.match(expect, str(hdul[1].data))
t.close()
hdul.close()
def test_slice_a_row(self):
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp('table1.fits'))
t1 = fits.open(self.temp('table1.fits'))
row = t1[1].data[2]
assert row['counts'] == 308
a, b, c = row[1:4]
assert a == counts[2]
assert b == ''
assert (c == np.array([0., 0., 0., 0., 0.], dtype=np.float32)).all()
row['counts'] = 310
assert row['counts'] == 310
row[1] = 315
assert row['counts'] == 315
assert row[1:4]['counts'] == 315
pytest.raises(KeyError, lambda r: r[1:4]['flag'], row)
row[1:4]['counts'] = 300
assert row[1:4]['counts'] == 300
assert row['counts'] == 300
row[1:4][0] = 400
assert row[1:4]['counts'] == 400
row[1:4]['counts'] = 300
assert row[1:4]['counts'] == 300
# Test stepping for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/59
row[1:4][::-1][-1] = 500
assert row[1:4]['counts'] == 500
row[1:4:2][0] = 300
assert row[1:4]['counts'] == 300
pytest.raises(KeyError, lambda r: r[1:4]['flag'], row)
assert row[1:4].field(0) == 300
assert row[1:4].field('counts') == 300
pytest.raises(KeyError, row[1:4].field, 'flag')
row[1:4].setfield('counts', 500)
assert row[1:4].field(0) == 500
pytest.raises(KeyError, row[1:4].setfield, 'flag', False)
assert t1[1].data._coldefs._arrays[1][2] == 500
assert t1[1].data._coldefs.columns[1].array[2] == 500
assert t1[1].columns._arrays[1][2] == 500
assert t1[1].columns.columns[1].array[2] == 500
assert t1[1].data[2][1] == 500
t1.close()
def test_fits_record_len(self):
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp('table1.fits'))
t1 = fits.open(self.temp('table1.fits'))
assert len(t1[1].data[0]) == 5
assert len(t1[1].data[0][0:4]) == 4
assert len(t1[1].data[0][0:5]) == 5
assert len(t1[1].data[0][0:6]) == 5
assert len(t1[1].data[0][0:7]) == 5
assert len(t1[1].data[0][1:4]) == 3
assert len(t1[1].data[0][1:5]) == 4
assert len(t1[1].data[0][1:6]) == 4
assert len(t1[1].data[0][1:7]) == 4
t1.close()
def test_add_data_by_rows(self):
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu1 = fits.BinTableHDU.from_columns(coldefs)
c1 = fits.Column(name='target', format='10A')
c2 = fits.Column(name='counts', format='J', unit='DN')
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L')
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs, nrows=5)
# Test assigning data to a tables row using a FITS_record
tbhdu.data[0] = tbhdu1.data[0]
tbhdu.data[4] = tbhdu1.data[3]
# Test assigning data to a tables row using a tuple
tbhdu.data[2] = ('NGC1', 312, 'A Note',
np.array([1.1, 2.2, 3.3, 4.4, 5.5], dtype=np.float32),
True)
# Test assigning data to a tables row using a list
tbhdu.data[3] = ['JIM1', '33', 'A Note',
np.array([1., 2., 3., 4., 5.], dtype=np.float32),
True]
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert (id(tbhdu.data._coldefs.columns[0].array) ==
id(tbhdu.data._coldefs._arrays[0]))
assert (id(tbhdu.data._coldefs.columns[0].array) ==
id(tbhdu.columns.columns[0].array))
assert (id(tbhdu.data._coldefs.columns[0].array) ==
id(tbhdu.columns._arrays[0]))
assert tbhdu.data[0][1] == 312
assert tbhdu.data._coldefs._arrays[1][0] == 312
assert tbhdu.data._coldefs.columns[1].array[0] == 312
assert tbhdu.columns._arrays[1][0] == 312
assert tbhdu.columns.columns[1].array[0] == 312
assert tbhdu.columns.columns[0].array[0] == 'NGC1'
assert tbhdu.columns.columns[2].array[0] == ''
assert (tbhdu.columns.columns[3].array[0] ==
np.array([0., 0., 0., 0., 0.], dtype=np.float32)).all()
assert tbhdu.columns.columns[4].array[0] == True # noqa
assert tbhdu.data[3][1] == 33
assert tbhdu.data._coldefs._arrays[1][3] == 33
assert tbhdu.data._coldefs.columns[1].array[3] == 33
assert tbhdu.columns._arrays[1][3] == 33
assert tbhdu.columns.columns[1].array[3] == 33
assert tbhdu.columns.columns[0].array[3] == 'JIM1'
assert tbhdu.columns.columns[2].array[3] == 'A Note'
assert (tbhdu.columns.columns[3].array[3] ==
np.array([1., 2., 3., 4., 5.], dtype=np.float32)).all()
assert tbhdu.columns.columns[4].array[3] == True # noqa
def test_assign_multiple_rows_to_table(self):
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu1 = fits.BinTableHDU.from_columns(coldefs)
counts = np.array([112, 134, 108, 117])
names = np.array(['NGC5', 'NGC6', 'NGC7', 'NCG8'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[0, 1, 0, 0])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.data[0][3] = np.array([1., 2., 3., 4., 5.], dtype=np.float32)
tbhdu2 = fits.BinTableHDU.from_columns(tbhdu1.data, nrows=9)
# Assign the 4 rows from the second table to rows 5 thru 8 of the
# new table. Note that the last row of the new table will still be
# initialized to the default values.
tbhdu2.data[4:] = tbhdu.data
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert (id(tbhdu2.data._coldefs.columns[0].array) ==
id(tbhdu2.data._coldefs._arrays[0]))
assert (id(tbhdu2.data._coldefs.columns[0].array) ==
id(tbhdu2.columns.columns[0].array))
assert (id(tbhdu2.data._coldefs.columns[0].array) ==
id(tbhdu2.columns._arrays[0]))
assert tbhdu2.data[0][1] == 312
assert tbhdu2.data._coldefs._arrays[1][0] == 312
assert tbhdu2.data._coldefs.columns[1].array[0] == 312
assert tbhdu2.columns._arrays[1][0] == 312
assert tbhdu2.columns.columns[1].array[0] == 312
assert tbhdu2.columns.columns[0].array[0] == 'NGC1'
assert tbhdu2.columns.columns[2].array[0] == ''
assert (tbhdu2.columns.columns[3].array[0] ==
np.array([0., 0., 0., 0., 0.], dtype=np.float32)).all()
assert tbhdu2.columns.columns[4].array[0] == True # noqa
assert tbhdu2.data[4][1] == 112
assert tbhdu2.data._coldefs._arrays[1][4] == 112
assert tbhdu2.data._coldefs.columns[1].array[4] == 112
assert tbhdu2.columns._arrays[1][4] == 112
assert tbhdu2.columns.columns[1].array[4] == 112
assert tbhdu2.columns.columns[0].array[4] == 'NGC5'
assert tbhdu2.columns.columns[2].array[4] == ''
assert (tbhdu2.columns.columns[3].array[4] ==
np.array([1., 2., 3., 4., 5.], dtype=np.float32)).all()
assert tbhdu2.columns.columns[4].array[4] == False # noqa
assert tbhdu2.columns.columns[1].array[8] == 0
assert tbhdu2.columns.columns[0].array[8] == ''
assert tbhdu2.columns.columns[2].array[8] == ''
assert (tbhdu2.columns.columns[3].array[8] ==
np.array([0., 0., 0., 0., 0.], dtype=np.float32)).all()
assert tbhdu2.columns.columns[4].array[8] == False # noqa
def test_verify_data_references(self):
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
# Verify that original ColDefs object has independent Column
# objects.
assert id(coldefs.columns[0]) != id(c1)
# Verify that original ColDefs object has independent ndarray
# objects.
assert id(coldefs.columns[0].array) != id(names)
# Verify that original ColDefs object references the same data
# object as the original Column object.
assert id(coldefs.columns[0].array) == id(c1.array)
assert id(coldefs.columns[0].array) == id(coldefs._arrays[0])
# Verify new HDU has an independent ColDefs object.
assert id(coldefs) != id(tbhdu.columns)
# Verify new HDU has independent Column objects.
assert id(coldefs.columns[0]) != id(tbhdu.columns.columns[0])
# Verify new HDU has independent ndarray objects.
assert (id(coldefs.columns[0].array) !=
id(tbhdu.columns.columns[0].array))
# Verify that both ColDefs objects in the HDU reference the same
# Coldefs object.
assert id(tbhdu.columns) == id(tbhdu.data._coldefs)
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert (id(tbhdu.data._coldefs.columns[0].array) ==
id(tbhdu.data._coldefs._arrays[0]))
assert (id(tbhdu.data._coldefs.columns[0].array) ==
id(tbhdu.columns.columns[0].array))
assert (id(tbhdu.data._coldefs.columns[0].array) ==
id(tbhdu.columns._arrays[0]))
tbhdu.writeto(self.temp('table1.fits'))
t1 = fits.open(self.temp('table1.fits'))
t1[1].data[0][1] = 213
assert t1[1].data[0][1] == 213
assert t1[1].data._coldefs._arrays[1][0] == 213
assert t1[1].data._coldefs.columns[1].array[0] == 213
assert t1[1].columns._arrays[1][0] == 213
assert t1[1].columns.columns[1].array[0] == 213
t1[1].data._coldefs._arrays[1][0] = 100
assert t1[1].data[0][1] == 100
assert t1[1].data._coldefs._arrays[1][0] == 100
assert t1[1].data._coldefs.columns[1].array[0] == 100
assert t1[1].columns._arrays[1][0] == 100
assert t1[1].columns.columns[1].array[0] == 100
t1[1].data._coldefs.columns[1].array[0] = 500
assert t1[1].data[0][1] == 500
assert t1[1].data._coldefs._arrays[1][0] == 500
assert t1[1].data._coldefs.columns[1].array[0] == 500
assert t1[1].columns._arrays[1][0] == 500
assert t1[1].columns.columns[1].array[0] == 500
t1[1].columns._arrays[1][0] = 600
assert t1[1].data[0][1] == 600
assert t1[1].data._coldefs._arrays[1][0] == 600
assert t1[1].data._coldefs.columns[1].array[0] == 600
assert t1[1].columns._arrays[1][0] == 600
assert t1[1].columns.columns[1].array[0] == 600
t1[1].columns.columns[1].array[0] = 800
assert t1[1].data[0][1] == 800
assert t1[1].data._coldefs._arrays[1][0] == 800
assert t1[1].data._coldefs.columns[1].array[0] == 800
assert t1[1].columns._arrays[1][0] == 800
assert t1[1].columns.columns[1].array[0] == 800
t1.close()
def test_new_table_with_ndarray(self):
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu1 = fits.BinTableHDU.from_columns(tbhdu.data.view(np.ndarray))
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert (id(tbhdu1.data._coldefs.columns[0].array) ==
id(tbhdu1.data._coldefs._arrays[0]))
assert (id(tbhdu1.data._coldefs.columns[0].array) ==
id(tbhdu1.columns.columns[0].array))
assert (id(tbhdu1.data._coldefs.columns[0].array) ==
id(tbhdu1.columns._arrays[0]))
# Ensure I can change the value of one data element and it effects
# all of the others.
tbhdu1.data[0][1] = 213
assert tbhdu1.data[0][1] == 213
assert tbhdu1.data._coldefs._arrays[1][0] == 213
assert tbhdu1.data._coldefs.columns[1].array[0] == 213
assert tbhdu1.columns._arrays[1][0] == 213
assert tbhdu1.columns.columns[1].array[0] == 213
tbhdu1.data._coldefs._arrays[1][0] = 100
assert tbhdu1.data[0][1] == 100
assert tbhdu1.data._coldefs._arrays[1][0] == 100
assert tbhdu1.data._coldefs.columns[1].array[0] == 100
assert tbhdu1.columns._arrays[1][0] == 100
assert tbhdu1.columns.columns[1].array[0] == 100
tbhdu1.data._coldefs.columns[1].array[0] = 500
assert tbhdu1.data[0][1] == 500
assert tbhdu1.data._coldefs._arrays[1][0] == 500
assert tbhdu1.data._coldefs.columns[1].array[0] == 500
assert tbhdu1.columns._arrays[1][0] == 500
assert tbhdu1.columns.columns[1].array[0] == 500
tbhdu1.columns._arrays[1][0] = 600
assert tbhdu1.data[0][1] == 600
assert tbhdu1.data._coldefs._arrays[1][0] == 600
assert tbhdu1.data._coldefs.columns[1].array[0] == 600
assert tbhdu1.columns._arrays[1][0] == 600
assert tbhdu1.columns.columns[1].array[0] == 600
tbhdu1.columns.columns[1].array[0] = 800
assert tbhdu1.data[0][1] == 800
assert tbhdu1.data._coldefs._arrays[1][0] == 800
assert tbhdu1.data._coldefs.columns[1].array[0] == 800
assert tbhdu1.columns._arrays[1][0] == 800
assert tbhdu1.columns.columns[1].array[0] == 800
tbhdu1.writeto(self.temp('table1.fits'))
t1 = fits.open(self.temp('table1.fits'))
t1[1].data[0][1] = 213
assert t1[1].data[0][1] == 213
assert t1[1].data._coldefs._arrays[1][0] == 213
assert t1[1].data._coldefs.columns[1].array[0] == 213
assert t1[1].columns._arrays[1][0] == 213
assert t1[1].columns.columns[1].array[0] == 213
t1[1].data._coldefs._arrays[1][0] = 100
assert t1[1].data[0][1] == 100
assert t1[1].data._coldefs._arrays[1][0] == 100
assert t1[1].data._coldefs.columns[1].array[0] == 100
assert t1[1].columns._arrays[1][0] == 100
assert t1[1].columns.columns[1].array[0] == 100
t1[1].data._coldefs.columns[1].array[0] = 500
assert t1[1].data[0][1] == 500
assert t1[1].data._coldefs._arrays[1][0] == 500
assert t1[1].data._coldefs.columns[1].array[0] == 500
assert t1[1].columns._arrays[1][0] == 500
assert t1[1].columns.columns[1].array[0] == 500
t1[1].columns._arrays[1][0] = 600
assert t1[1].data[0][1] == 600
assert t1[1].data._coldefs._arrays[1][0] == 600
assert t1[1].data._coldefs.columns[1].array[0] == 600
assert t1[1].columns._arrays[1][0] == 600
assert t1[1].columns.columns[1].array[0] == 600
t1[1].columns.columns[1].array[0] = 800
assert t1[1].data[0][1] == 800
assert t1[1].data._coldefs._arrays[1][0] == 800
assert t1[1].data._coldefs.columns[1].array[0] == 800
assert t1[1].columns._arrays[1][0] == 800
assert t1[1].columns.columns[1].array[0] == 800
t1.close()
def test_new_table_with_fits_rec(self):
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.data[0][1] = 213
assert tbhdu.data[0][1] == 213
assert tbhdu.data._coldefs._arrays[1][0] == 213
assert tbhdu.data._coldefs.columns[1].array[0] == 213
assert tbhdu.columns._arrays[1][0] == 213
assert tbhdu.columns.columns[1].array[0] == 213
tbhdu.data._coldefs._arrays[1][0] = 100
assert tbhdu.data[0][1] == 100
assert tbhdu.data._coldefs._arrays[1][0] == 100
assert tbhdu.data._coldefs.columns[1].array[0] == 100
assert tbhdu.columns._arrays[1][0] == 100
assert tbhdu.columns.columns[1].array[0] == 100
tbhdu.data._coldefs.columns[1].array[0] = 500
assert tbhdu.data[0][1] == 500
assert tbhdu.data._coldefs._arrays[1][0] == 500
assert tbhdu.data._coldefs.columns[1].array[0] == 500
assert tbhdu.columns._arrays[1][0] == 500
assert tbhdu.columns.columns[1].array[0] == 500
tbhdu.columns._arrays[1][0] = 600
assert tbhdu.data[0][1] == 600
assert tbhdu.data._coldefs._arrays[1][0] == 600
assert tbhdu.data._coldefs.columns[1].array[0] == 600
assert tbhdu.columns._arrays[1][0] == 600
assert tbhdu.columns.columns[1].array[0] == 600
tbhdu.columns.columns[1].array[0] = 800
assert tbhdu.data[0][1] == 800
assert tbhdu.data._coldefs._arrays[1][0] == 800
assert tbhdu.data._coldefs.columns[1].array[0] == 800
assert tbhdu.columns._arrays[1][0] == 800
assert tbhdu.columns.columns[1].array[0] == 800
tbhdu.columns.columns[1].array[0] = 312
tbhdu.writeto(self.temp('table1.fits'))
t1 = fits.open(self.temp('table1.fits'))
t1[1].data[0][1] = 1
fr = t1[1].data
assert t1[1].data[0][1] == 1
assert t1[1].data._coldefs._arrays[1][0] == 1
assert t1[1].data._coldefs.columns[1].array[0] == 1
assert t1[1].columns._arrays[1][0] == 1
assert t1[1].columns.columns[1].array[0] == 1
assert fr[0][1] == 1
assert fr._coldefs._arrays[1][0] == 1
assert fr._coldefs.columns[1].array[0] == 1
fr._coldefs.columns[1].array[0] = 312
tbhdu1 = fits.BinTableHDU.from_columns(fr)
i = 0
for row in tbhdu1.data:
for j in range(len(row)):
if isinstance(row[j], np.ndarray):
assert (row[j] == tbhdu.data[i][j]).all()
else:
assert row[j] == tbhdu.data[i][j]
i = i + 1
tbhdu1.data[0][1] = 213
assert t1[1].data[0][1] == 312
assert t1[1].data._coldefs._arrays[1][0] == 312
assert t1[1].data._coldefs.columns[1].array[0] == 312
assert t1[1].columns._arrays[1][0] == 312
assert t1[1].columns.columns[1].array[0] == 312
assert fr[0][1] == 312
assert fr._coldefs._arrays[1][0] == 312
assert fr._coldefs.columns[1].array[0] == 312
assert tbhdu1.data[0][1] == 213
assert tbhdu1.data._coldefs._arrays[1][0] == 213
assert tbhdu1.data._coldefs.columns[1].array[0] == 213
assert tbhdu1.columns._arrays[1][0] == 213
assert tbhdu1.columns.columns[1].array[0] == 213
t1[1].data[0][1] = 10
assert t1[1].data[0][1] == 10
assert t1[1].data._coldefs._arrays[1][0] == 10
assert t1[1].data._coldefs.columns[1].array[0] == 10
assert t1[1].columns._arrays[1][0] == 10
assert t1[1].columns.columns[1].array[0] == 10
assert fr[0][1] == 10
assert fr._coldefs._arrays[1][0] == 10
assert fr._coldefs.columns[1].array[0] == 10
assert tbhdu1.data[0][1] == 213
assert tbhdu1.data._coldefs._arrays[1][0] == 213
assert tbhdu1.data._coldefs.columns[1].array[0] == 213
assert tbhdu1.columns._arrays[1][0] == 213
assert tbhdu1.columns.columns[1].array[0] == 213
tbhdu1.data._coldefs._arrays[1][0] = 666
assert t1[1].data[0][1] == 10
assert t1[1].data._coldefs._arrays[1][0] == 10
assert t1[1].data._coldefs.columns[1].array[0] == 10
assert t1[1].columns._arrays[1][0] == 10
assert t1[1].columns.columns[1].array[0] == 10
assert fr[0][1] == 10
assert fr._coldefs._arrays[1][0] == 10
assert fr._coldefs.columns[1].array[0] == 10
assert tbhdu1.data[0][1] == 666
assert tbhdu1.data._coldefs._arrays[1][0] == 666
assert tbhdu1.data._coldefs.columns[1].array[0] == 666
assert tbhdu1.columns._arrays[1][0] == 666
assert tbhdu1.columns.columns[1].array[0] == 666
t1.close()
def test_bin_table_hdu_constructor(self):
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu1 = fits.BinTableHDU.from_columns(coldefs)
hdu = fits.BinTableHDU(tbhdu1.data)
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert (id(hdu.data._coldefs.columns[0].array) ==
id(hdu.data._coldefs._arrays[0]))
assert (id(hdu.data._coldefs.columns[0].array) ==
id(hdu.columns.columns[0].array))
assert (id(hdu.data._coldefs.columns[0].array) ==
id(hdu.columns._arrays[0]))
# Verify that the references in the original HDU are the same as the
# references in the new HDU.
assert (id(tbhdu1.data._coldefs.columns[0].array) ==
id(hdu.data._coldefs._arrays[0]))
# Verify that a change in the new HDU is reflected in both the new
# and original HDU.
hdu.data[0][1] = 213
assert hdu.data[0][1] == 213
assert hdu.data._coldefs._arrays[1][0] == 213
assert hdu.data._coldefs.columns[1].array[0] == 213
assert hdu.columns._arrays[1][0] == 213
assert hdu.columns.columns[1].array[0] == 213
assert tbhdu1.data[0][1] == 213
assert tbhdu1.data._coldefs._arrays[1][0] == 213
assert tbhdu1.data._coldefs.columns[1].array[0] == 213
assert tbhdu1.columns._arrays[1][0] == 213
assert tbhdu1.columns.columns[1].array[0] == 213
hdu.data._coldefs._arrays[1][0] = 100
assert hdu.data[0][1] == 100
assert hdu.data._coldefs._arrays[1][0] == 100
assert hdu.data._coldefs.columns[1].array[0] == 100
assert hdu.columns._arrays[1][0] == 100
assert hdu.columns.columns[1].array[0] == 100
assert tbhdu1.data[0][1] == 100
assert tbhdu1.data._coldefs._arrays[1][0] == 100
assert tbhdu1.data._coldefs.columns[1].array[0] == 100
assert tbhdu1.columns._arrays[1][0] == 100
assert tbhdu1.columns.columns[1].array[0] == 100
hdu.data._coldefs.columns[1].array[0] = 500
assert hdu.data[0][1] == 500
assert hdu.data._coldefs._arrays[1][0] == 500
assert hdu.data._coldefs.columns[1].array[0] == 500
assert hdu.columns._arrays[1][0] == 500
assert hdu.columns.columns[1].array[0] == 500
assert tbhdu1.data[0][1] == 500
assert tbhdu1.data._coldefs._arrays[1][0] == 500
assert tbhdu1.data._coldefs.columns[1].array[0] == 500
assert tbhdu1.columns._arrays[1][0] == 500
assert tbhdu1.columns.columns[1].array[0] == 500
hdu.columns._arrays[1][0] = 600
assert hdu.data[0][1] == 600
assert hdu.data._coldefs._arrays[1][0] == 600
assert hdu.data._coldefs.columns[1].array[0] == 600
assert hdu.columns._arrays[1][0] == 600
assert hdu.columns.columns[1].array[0] == 600
assert tbhdu1.data[0][1] == 600
assert tbhdu1.data._coldefs._arrays[1][0] == 600
assert tbhdu1.data._coldefs.columns[1].array[0] == 600
assert tbhdu1.columns._arrays[1][0] == 600
assert tbhdu1.columns.columns[1].array[0] == 600
hdu.columns.columns[1].array[0] = 800
assert hdu.data[0][1] == 800
assert hdu.data._coldefs._arrays[1][0] == 800
assert hdu.data._coldefs.columns[1].array[0] == 800
assert hdu.columns._arrays[1][0] == 800
assert hdu.columns.columns[1].array[0] == 800
assert tbhdu1.data[0][1] == 800
assert tbhdu1.data._coldefs._arrays[1][0] == 800
assert tbhdu1.data._coldefs.columns[1].array[0] == 800
assert tbhdu1.columns._arrays[1][0] == 800
assert tbhdu1.columns.columns[1].array[0] == 800
def test_constructor_name_arg(self):
"""testConstructorNameArg
Passing name='...' to the BinTableHDU and TableHDU constructors
should set the .name attribute and 'EXTNAME' header keyword, and
override any name in an existing 'EXTNAME' value.
"""
for hducls in [fits.BinTableHDU, fits.TableHDU]:
# First test some default assumptions
hdu = hducls()
assert hdu.name == ''
assert 'EXTNAME' not in hdu.header
hdu.name = 'FOO'
assert hdu.name == 'FOO'
assert hdu.header['EXTNAME'] == 'FOO'
# Passing name to constructor
hdu = hducls(name='FOO')
assert hdu.name == 'FOO'
assert hdu.header['EXTNAME'] == 'FOO'
# And overriding a header with a different extname
hdr = fits.Header()
hdr['EXTNAME'] = 'EVENTS'
hdu = hducls(header=hdr, name='FOO')
assert hdu.name == 'FOO'
assert hdu.header['EXTNAME'] == 'FOO'
def test_constructor_ver_arg(self):
for hducls in [fits.BinTableHDU, fits.TableHDU]:
# First test some default assumptions
hdu = hducls()
assert hdu.ver == 1
assert 'EXTVER' not in hdu.header
hdu.ver = 2
assert hdu.ver == 2
assert hdu.header['EXTVER'] == 2
# Passing name to constructor
hdu = hducls(ver=3)
assert hdu.ver == 3
assert hdu.header['EXTVER'] == 3
# And overriding a header with a different extver
hdr = fits.Header()
hdr['EXTVER'] = 4
hdu = hducls(header=hdr, ver=5)
assert hdu.ver == 5
assert hdu.header['EXTVER'] == 5
def test_unicode_colname(self):
"""
Regression test for https://github.com/astropy/astropy/issues/5204
"Handle unicode FITS BinTable column names on Python 2"
"""
col = fits.Column(name='spam', format='E', array=[42.])
# This used to raise a TypeError, now it works
fits.BinTableHDU.from_columns([col])
def test_bin_table_with_logical_array(self):
c1 = fits.Column(name='flag', format='2L',
array=[[True, False], [False, True]])
coldefs = fits.ColDefs([c1])
tbhdu1 = fits.BinTableHDU.from_columns(coldefs)
assert (tbhdu1.data.field('flag')[0] ==
np.array([True, False], dtype=bool)).all()
assert (tbhdu1.data.field('flag')[1] ==
np.array([False, True], dtype=bool)).all()
tbhdu = fits.BinTableHDU.from_columns(tbhdu1.data)
assert (tbhdu.data.field('flag')[0] ==
np.array([True, False], dtype=bool)).all()
assert (tbhdu.data.field('flag')[1] ==
np.array([False, True], dtype=bool)).all()
def test_fits_rec_column_access(self):
tbdata = fits.getdata(self.data('table.fits'))
assert (tbdata.V_mag == tbdata.field('V_mag')).all()
assert (tbdata.V_mag == tbdata['V_mag']).all()
# Table with scaling (c3) and tnull (c1)
tbdata = fits.getdata(self.data('tb.fits'))
for col in ('c1', 'c2', 'c3', 'c4'):
data = getattr(tbdata, col)
assert (data == tbdata.field(col)).all()
assert (data == tbdata[col]).all()
# ascii table
tbdata = fits.getdata(self.data('ascii.fits'))
for col in ('a', 'b'):
data = getattr(tbdata, col)
assert (data == tbdata.field(col)).all()
assert (data == tbdata[col]).all()
# with VLA column
col1 = fits.Column(name='x', format='PI()',
array=np.array([[45, 56], [11, 12, 13]],
dtype=np.object_))
hdu = fits.BinTableHDU.from_columns([col1])
assert type(hdu.data['x']) == type(hdu.data.x) # noqa
assert (hdu.data['x'][0] == hdu.data.x[0]).all()
assert (hdu.data['x'][1] == hdu.data.x[1]).all()
def test_table_with_zero_width_column(self):
hdul = fits.open(self.data('zerowidth.fits'))
tbhdu = hdul[2] # This HDU contains a zero-width column 'ORBPARM'
assert 'ORBPARM' in tbhdu.columns.names
# The ORBPARM column should not be in the data, though the data should
# be readable
assert 'ORBPARM' in tbhdu.data.names
assert 'ORBPARM' in tbhdu.data.dtype.names
# Verify that some of the data columns are still correctly accessible
# by name
assert tbhdu.data[0]['ANNAME'] == 'VLA:_W16'
assert comparefloats(
tbhdu.data[0]['STABXYZ'],
np.array([499.85566663, -1317.99231554, -735.18866164],
dtype=np.float64))
assert tbhdu.data[0]['NOSTA'] == 1
assert tbhdu.data[0]['MNTSTA'] == 0
assert tbhdu.data[-1]['ANNAME'] == 'VPT:_OUT'
assert comparefloats(
tbhdu.data[-1]['STABXYZ'],
np.array([0.0, 0.0, 0.0], dtype=np.float64))
assert tbhdu.data[-1]['NOSTA'] == 29
assert tbhdu.data[-1]['MNTSTA'] == 0
hdul.writeto(self.temp('newtable.fits'))
hdul.close()
hdul = fits.open(self.temp('newtable.fits'))
tbhdu = hdul[2]
# Verify that the previous tests still hold after writing
assert 'ORBPARM' in tbhdu.columns.names
assert 'ORBPARM' in tbhdu.data.names
assert 'ORBPARM' in tbhdu.data.dtype.names
assert tbhdu.data[0]['ANNAME'] == 'VLA:_W16'
assert comparefloats(
tbhdu.data[0]['STABXYZ'],
np.array([499.85566663, -1317.99231554, -735.18866164],
dtype=np.float64))
assert tbhdu.data[0]['NOSTA'] == 1
assert tbhdu.data[0]['MNTSTA'] == 0
assert tbhdu.data[-1]['ANNAME'] == 'VPT:_OUT'
assert comparefloats(
tbhdu.data[-1]['STABXYZ'],
np.array([0.0, 0.0, 0.0], dtype=np.float64))
assert tbhdu.data[-1]['NOSTA'] == 29
assert tbhdu.data[-1]['MNTSTA'] == 0
hdul.close()
def test_string_column_padding(self):
a = ['img1', 'img2', 'img3a', 'p']
s = 'img1\x00\x00\x00\x00\x00\x00' \
'img2\x00\x00\x00\x00\x00\x00' \
'img3a\x00\x00\x00\x00\x00' \
'p\x00\x00\x00\x00\x00\x00\x00\x00\x00'
acol = fits.Column(name='MEMNAME', format='A10',
array=chararray.array(a))
ahdu = fits.BinTableHDU.from_columns([acol])
assert ahdu.data.tobytes().decode('raw-unicode-escape') == s
ahdu.writeto(self.temp('newtable.fits'))
with fits.open(self.temp('newtable.fits')) as hdul:
assert hdul[1].data.tobytes().decode('raw-unicode-escape') == s
assert (hdul[1].data['MEMNAME'] == a).all()
del hdul
ahdu = fits.TableHDU.from_columns([acol])
ahdu.writeto(self.temp('newtable.fits'), overwrite=True)
with fits.open(self.temp('newtable.fits')) as hdul:
assert (hdul[1].data.tobytes().decode('raw-unicode-escape') ==
s.replace('\x00', ' '))
assert (hdul[1].data['MEMNAME'] == a).all()
ahdu = fits.BinTableHDU.from_columns(hdul[1].data.copy())
del hdul
# Now serialize once more as a binary table; padding bytes should
# revert to zeroes
ahdu.writeto(self.temp('newtable.fits'), overwrite=True)
with fits.open(self.temp('newtable.fits')) as hdul:
assert hdul[1].data.tobytes().decode('raw-unicode-escape') == s
assert (hdul[1].data['MEMNAME'] == a).all()
def test_multi_dimensional_columns(self):
"""
Tests the multidimensional column implementation with both numeric
arrays and string arrays.
"""
data = np.rec.array(
[([0, 1, 2, 3, 4, 5], 'row1' * 2),
([6, 7, 8, 9, 0, 1], 'row2' * 2),
([2, 3, 4, 5, 6, 7], 'row3' * 2)], formats='6i4,a8')
thdu = fits.BinTableHDU.from_columns(data)
thdu.writeto(self.temp('newtable.fits'))
with fits.open(self.temp('newtable.fits'), mode='update') as hdul:
# Modify the TDIM fields to my own specification
hdul[1].header['TDIM1'] = '(2,3)'
hdul[1].header['TDIM2'] = '(4,2)'
with fits.open(self.temp('newtable.fits')) as hdul:
thdu = hdul[1]
c1 = thdu.data.field(0)
c2 = thdu.data.field(1)
assert c1.shape == (3, 3, 2)
assert c2.shape == (3, 2)
assert (c1 == np.array([[[0, 1], [2, 3], [4, 5]],
[[6, 7], [8, 9], [0, 1]],
[[2, 3], [4, 5], [6, 7]]])).all()
assert (c2 == np.array([['row1', 'row1'],
['row2', 'row2'],
['row3', 'row3']])).all()
del c1
del c2
del thdu
del hdul
# Test setting the TDIMn header based on the column data
data = np.zeros(3, dtype=[('x', 'f4'), ('s', 'S5', 4)])
data['x'] = 1, 2, 3
data['s'] = 'ok'
fits.writeto(self.temp('newtable.fits'), data, overwrite=True)
t = fits.getdata(self.temp('newtable.fits'))
assert t.field(1).dtype.str[-1] == '5'
assert t.field(1).shape == (3, 4)
# Like the previous test, but with an extra dimension (a bit more
# complicated)
data = np.zeros(3, dtype=[('x', 'f4'), ('s', 'S5', (4, 3))])
data['x'] = 1, 2, 3
data['s'] = 'ok'
del t
fits.writeto(self.temp('newtable.fits'), data, overwrite=True)
t = fits.getdata(self.temp('newtable.fits'))
assert t.field(1).dtype.str[-1] == '5'
assert t.field(1).shape == (3, 4, 3)
def test_oned_array_single_element(self):
# a table with rows that are 1d arrays of a single value
data = np.array([(1, ), (2, )], dtype=([('x', 'i4', (1, ))]))
thdu = fits.BinTableHDU.from_columns(data)
thdu.writeto(self.temp('onedtable.fits'))
with fits.open(self.temp('onedtable.fits')) as hdul:
thdu = hdul[1]
c = thdu.data.field(0)
assert c.shape == (2, 1)
assert thdu.header['TDIM1'] == '(1)'
def test_bin_table_init_from_string_array_column(self):
"""
Tests two ways of creating a new `BinTableHDU` from a column of
string arrays.
This tests for a couple different regressions, and ensures that
both BinTableHDU(data=arr) and BinTableHDU.from_columns(arr) work
equivalently.
Some of this is redundant with the following test, but checks some
subtly different cases.
"""
data = [[b'abcd', b'efgh'],
[b'ijkl', b'mnop'],
[b'qrst', b'uvwx']]
arr = np.array([(data,), (data,), (data,), (data,), (data,)],
dtype=[('S', '(3, 2)S4')])
tbhdu1 = fits.BinTableHDU(data=arr)
def test_dims_and_roundtrip(tbhdu):
assert tbhdu.data['S'].shape == (5, 3, 2)
assert tbhdu.data['S'].dtype.str.endswith('U4')
tbhdu.writeto(self.temp('test.fits'), overwrite=True)
with fits.open(self.temp('test.fits')) as hdul:
tbhdu2 = hdul[1]
assert tbhdu2.header['TDIM1'] == '(4,2,3)'
assert tbhdu2.data['S'].shape == (5, 3, 2)
assert tbhdu.data['S'].dtype.str.endswith('U4')
assert np.all(tbhdu2.data['S'] == tbhdu.data['S'])
test_dims_and_roundtrip(tbhdu1)
tbhdu2 = fits.BinTableHDU.from_columns(arr)
test_dims_and_roundtrip(tbhdu2)
def test_columns_with_truncating_tdim(self):
"""
According to the FITS standard (section 7.3.2):
If the number of elements in the array implied by the TDIMn is less
than the allocated size of the ar- ray in the FITS file, then the
unused trailing elements should be interpreted as containing
undefined fill values.
*deep sigh* What this means is if a column has a repeat count larger
than the number of elements indicated by its TDIM (ex: TDIM1 = '(2,2)',
but TFORM1 = 6I), then instead of this being an outright error we are
to take the first 4 elements as implied by the TDIM and ignore the
additional two trailing elements.
"""
# It's hard to even successfully create a table like this. I think
# it *should* be difficult, but once created it should at least be
# possible to read.
arr1 = [[b'ab', b'cd'], [b'ef', b'gh'], [b'ij', b'kl']]
arr2 = [1, 2, 3, 4, 5]
arr = np.array([(arr1, arr2), (arr1, arr2)],
dtype=[('a', '(3, 2)S2'), ('b', '5i8')])
tbhdu = fits.BinTableHDU(data=arr)
tbhdu.writeto(self.temp('test.fits'))
with open(self.temp('test.fits'), 'rb') as f:
raw_bytes = f.read()
# Artificially truncate TDIM in the header; this seems to be the
# easiest way to do this while getting around Astropy's insistence on the
# data and header matching perfectly; again, we have no interest in
# making it possible to write files in this format, only read them
with open(self.temp('test.fits'), 'wb') as f:
f.write(raw_bytes.replace(b'(2,2,3)', b'(2,2,2)'))
with fits.open(self.temp('test.fits')) as hdul:
tbhdu2 = hdul[1]
assert tbhdu2.header['TDIM1'] == '(2,2,2)'
assert tbhdu2.header['TFORM1'] == '12A'
for row in tbhdu2.data:
assert np.all(row['a'] == [['ab', 'cd'], ['ef', 'gh']])
assert np.all(row['b'] == [1, 2, 3, 4, 5])
def test_string_array_round_trip(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/201"""
data = [['abc', 'def', 'ghi'],
['jkl', 'mno', 'pqr'],
['stu', 'vwx', 'yz ']]
recarr = np.rec.array([(data,), (data,)], formats=['(3,3)S3'])
t = fits.BinTableHDU(data=recarr)
t.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as h:
assert 'TDIM1' in h[1].header
assert h[1].header['TDIM1'] == '(3,3,3)'
assert len(h[1].data) == 2
assert len(h[1].data[0]) == 1
assert (h[1].data.field(0)[0] ==
np.char.decode(recarr.field(0)[0], 'ascii')).all()
with fits.open(self.temp('test.fits')) as h:
# Access the data; I think this is necessary to exhibit the bug
# reported in https://aeon.stsci.edu/ssb/trac/pyfits/ticket/201
h[1].data[:]
h.writeto(self.temp('test2.fits'))
with fits.open(self.temp('test2.fits')) as h:
assert 'TDIM1' in h[1].header
assert h[1].header['TDIM1'] == '(3,3,3)'
assert len(h[1].data) == 2
assert len(h[1].data[0]) == 1
assert (h[1].data.field(0)[0] ==
np.char.decode(recarr.field(0)[0], 'ascii')).all()
def test_new_table_with_nd_column(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/3
"""
arra = np.array(['a', 'b'], dtype='|S1')
arrb = np.array([['a', 'bc'], ['cd', 'e']], dtype='|S2')
arrc = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
cols = [
fits.Column(name='str', format='1A', array=arra),
fits.Column(name='strarray', format='4A', dim='(2,2)',
array=arrb),
fits.Column(name='intarray', format='4I', dim='(2, 2)',
array=arrc)
]
hdu = fits.BinTableHDU.from_columns(fits.ColDefs(cols))
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as h:
# Need to force string arrays to byte arrays in order to compare
# correctly on Python 3
assert (h[1].data['str'].encode('ascii') == arra).all()
assert (h[1].data['strarray'].encode('ascii') == arrb).all()
assert (h[1].data['intarray'] == arrc).all()
def test_mismatched_tform_and_tdim(self):
"""Normally the product of the dimensions listed in a TDIMn keyword
must be less than or equal to the repeat count in the TFORMn keyword.
This tests that this works if less than (treating the trailing bytes
as unspecified fill values per the FITS standard) and fails if the
dimensions specified by TDIMn are greater than the repeat count.
"""
arra = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
arrb = np.array([[[9, 10], [11, 12]], [[13, 14], [15, 16]]])
cols = [fits.Column(name='a', format='20I', dim='(2,2)',
array=arra),
fits.Column(name='b', format='4I', dim='(2,2)',
array=arrb)]
# The first column has the mismatched repeat count
hdu = fits.BinTableHDU.from_columns(fits.ColDefs(cols))
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as h:
assert h[1].header['TFORM1'] == '20I'
assert h[1].header['TFORM2'] == '4I'
assert h[1].header['TDIM1'] == h[1].header['TDIM2'] == '(2,2)'
assert (h[1].data['a'] == arra).all()
assert (h[1].data['b'] == arrb).all()
assert h[1].data.itemsize == 48 # 16-bits times 24
# If dims is more than the repeat count in the format specifier raise
# an error
pytest.raises(VerifyError, fits.Column, name='a', format='2I',
dim='(2,2)', array=arra)
def test_tdim_of_size_one(self):
"""Regression test for https://github.com/astropy/astropy/pull/3580"""
with fits.open(self.data('tdim.fits')) as hdulist:
assert hdulist[1].data['V_mag'].shape == (3, 1, 1)
def test_slicing(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/52"""
with fits.open(self.data('table.fits')) as f:
data = f[1].data
targets = data.field('target')
s = data[:]
assert (s.field('target') == targets).all()
for n in range(len(targets) + 2):
s = data[:n]
assert (s.field('target') == targets[:n]).all()
s = data[n:]
assert (s.field('target') == targets[n:]).all()
s = data[::2]
assert (s.field('target') == targets[::2]).all()
s = data[::-1]
assert (s.field('target') == targets[::-1]).all()
def test_array_slicing(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/55"""
with fits.open(self.data('table.fits')) as f:
data = f[1].data
s1 = data[data['target'] == 'NGC1001']
s2 = data[np.where(data['target'] == 'NGC1001')]
s3 = data[[0]]
s4 = data[:1]
for s in [s1, s2, s3, s4]:
assert isinstance(s, fits.FITS_rec)
assert comparerecords(s1, s2)
assert comparerecords(s2, s3)
assert comparerecords(s3, s4)
def test_array_broadcasting(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/pull/48
"""
with fits.open(self.data('table.fits')) as hdu:
data = hdu[1].data
data['V_mag'] = 0
assert np.all(data['V_mag'] == 0)
data['V_mag'] = 1
assert np.all(data['V_mag'] == 1)
for container in (list, tuple, np.array):
data['V_mag'] = container([1, 2, 3])
assert np.array_equal(data['V_mag'], np.array([1, 2, 3]))
def test_array_slicing_readonly(self):
"""
Like test_array_slicing but with the file opened in 'readonly' mode.
Regression test for a crash when slicing readonly memmap'd tables.
"""
with fits.open(self.data('table.fits'), mode='readonly') as f:
data = f[1].data
s1 = data[data['target'] == 'NGC1001']
s2 = data[np.where(data['target'] == 'NGC1001')]
s3 = data[[0]]
s4 = data[:1]
for s in [s1, s2, s3, s4]:
assert isinstance(s, fits.FITS_rec)
assert comparerecords(s1, s2)
assert comparerecords(s2, s3)
assert comparerecords(s3, s4)
@pytest.mark.parametrize('tablename', ['table.fits', 'tb.fits'])
def test_dump_load_round_trip(self, tablename):
"""
A simple test of the dump/load methods; dump the data, column, and
header files and try to reload the table from them.
"""
with fits.open(self.data(tablename)) as hdul:
tbhdu = hdul[1]
datafile = self.temp('data.txt')
cdfile = self.temp('coldefs.txt')
hfile = self.temp('header.txt')
tbhdu.dump(datafile, cdfile, hfile)
new_tbhdu = fits.BinTableHDU.load(datafile, cdfile, hfile)
assert comparerecords(tbhdu.data, new_tbhdu.data)
_assert_attr_col(new_tbhdu, hdul[1])
def test_dump_load_array_colums(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/22
Ensures that a table containing a multi-value array column can be
dumped and loaded successfully.
"""
data = np.rec.array([('a', [1, 2, 3, 4], 0.1),
('b', [5, 6, 7, 8], 0.2)],
formats='a1,4i4,f8')
tbhdu = fits.BinTableHDU.from_columns(data)
datafile = self.temp('data.txt')
cdfile = self.temp('coldefs.txt')
hfile = self.temp('header.txt')
tbhdu.dump(datafile, cdfile, hfile)
new_tbhdu = fits.BinTableHDU.load(datafile, cdfile, hfile)
assert comparerecords(tbhdu.data, new_tbhdu.data)
assert str(tbhdu.header) == str(new_tbhdu.header)
def test_load_guess_format(self):
"""
Tests loading a table dump with no supplied coldefs or header, so that
the table format has to be guessed at. There is of course no exact
science to this; the table that's produced simply uses sensible guesses
for that format. Ideally this should never have to be used.
"""
# Create a table containing a variety of data types.
a0 = np.array([False, True, False], dtype=bool)
c0 = fits.Column(name='c0', format='L', array=a0)
# Format X currently not supported by the format
# a1 = np.array([[0], [1], [0]], dtype=np.uint8)
# c1 = fits.Column(name='c1', format='X', array=a1)
a2 = np.array([1, 128, 255], dtype=np.uint8)
c2 = fits.Column(name='c2', format='B', array=a2)
a3 = np.array([-30000, 1, 256], dtype=np.int16)
c3 = fits.Column(name='c3', format='I', array=a3)
a4 = np.array([-123123123, 1234, 123123123], dtype=np.int32)
c4 = fits.Column(name='c4', format='J', array=a4)
a5 = np.array(['a', 'abc', 'ab'])
c5 = fits.Column(name='c5', format='A3', array=a5)
a6 = np.array([1.1, 2.2, 3.3], dtype=np.float64)
c6 = fits.Column(name='c6', format='D', array=a6)
a7 = np.array([1.1 + 2.2j, 3.3 + 4.4j, 5.5 + 6.6j],
dtype=np.complex128)
c7 = fits.Column(name='c7', format='M', array=a7)
a8 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int32)
c8 = fits.Column(name='c8', format='PJ()', array=a8)
tbhdu = fits.BinTableHDU.from_columns([c0, c2, c3, c4, c5, c6, c7, c8])
datafile = self.temp('data.txt')
tbhdu.dump(datafile)
new_tbhdu = fits.BinTableHDU.load(datafile)
# In this particular case the record data at least should be equivalent
assert comparerecords(tbhdu.data, new_tbhdu.data)
def test_attribute_field_shadowing(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/86
Numpy recarray objects have a poorly-considered feature of allowing
field access by attribute lookup. However, if a field name coincides
with an existing attribute/method of the array, the existing name takes
presence (making the attribute-based field lookup completely unreliable
in general cases).
This ensures that any FITS_rec attributes still work correctly even
when there is a field with the same name as that attribute.
"""
c1 = fits.Column(name='names', format='I', array=[1])
c2 = fits.Column(name='formats', format='I', array=[2])
c3 = fits.Column(name='other', format='I', array=[3])
t = fits.BinTableHDU.from_columns([c1, c2, c3])
assert t.data.names == ['names', 'formats', 'other']
assert t.data.formats == ['I'] * 3
assert (t.data['names'] == [1]).all()
assert (t.data['formats'] == [2]).all()
assert (t.data.other == [3]).all()
def test_table_from_bool_fields(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/113
Tests creating a table from a recarray containing numpy.bool columns.
"""
array = np.rec.array([(True, False), (False, True)], formats='|b1,|b1')
thdu = fits.BinTableHDU.from_columns(array)
assert thdu.columns.formats == ['L', 'L']
assert comparerecords(thdu.data, array)
# Test round trip
thdu.writeto(self.temp('table.fits'))
data = fits.getdata(self.temp('table.fits'), ext=1)
assert thdu.columns.formats == ['L', 'L']
assert comparerecords(data, array)
def test_table_from_bool_fields2(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/215
Tests the case where a multi-field ndarray (not a recarray) containing
a bool field is used to initialize a `BinTableHDU`.
"""
arr = np.array([(False,), (True,), (False,)], dtype=[('a', '?')])
hdu = fits.BinTableHDU(data=arr)
assert (hdu.data['a'] == arr['a']).all()
def test_bool_column_update(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/139"""
c1 = fits.Column('F1', 'L', array=[True, False])
c2 = fits.Column('F2', 'L', array=[False, True])
thdu = fits.BinTableHDU.from_columns(fits.ColDefs([c1, c2]))
thdu.writeto(self.temp('table.fits'))
with fits.open(self.temp('table.fits'), mode='update') as hdul:
hdul[1].data['F1'][1] = True
hdul[1].data['F2'][0] = True
with fits.open(self.temp('table.fits')) as hdul:
assert (hdul[1].data['F1'] == [True, True]).all()
assert (hdul[1].data['F2'] == [True, True]).all()
def test_missing_tnull(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/197"""
c = fits.Column('F1', 'A3', null='---',
array=np.array(['1.0', '2.0', '---', '3.0']),
ascii=True)
table = fits.TableHDU.from_columns([c])
table.writeto(self.temp('test.fits'))
# Now let's delete the TNULL1 keyword, making this essentially
# unreadable
with fits.open(self.temp('test.fits'), mode='update') as h:
h[1].header['TFORM1'] = 'E3'
del h[1].header['TNULL1']
with fits.open(self.temp('test.fits')) as h:
pytest.raises(ValueError, lambda: h[1].data['F1'])
try:
with fits.open(self.temp('test.fits')) as h:
h[1].data['F1']
except ValueError as e:
assert str(e).endswith(
"the header may be missing the necessary TNULL1 "
"keyword or the table contains invalid data")
def test_blank_field_zero(self):
"""Regression test for https://github.com/astropy/astropy/issues/5134
Blank values in numerical columns of ASCII tables should be replaced
with zeros, so they can be loaded into numpy arrays.
When a TNULL value is set and there are blank fields not equal to that
value, they should be replaced with zeros.
"""
# Test an integer column with blank string as null
nullval1 = ' '
c1 = fits.Column('F1', format='I8', null=nullval1,
array=np.array([0, 1, 2, 3, 4]),
ascii=True)
table = fits.TableHDU.from_columns([c1])
table.writeto(self.temp('ascii_null.fits'))
# Replace the 1st col, 3rd row, with a null field.
with open(self.temp('ascii_null.fits'), mode='r+') as h:
nulled = h.read().replace('2 ', ' ')
h.seek(0)
h.write(nulled)
with fits.open(self.temp('ascii_null.fits'), memmap=True) as f:
assert f[1].data[2][0] == 0
# Test a float column with a null value set and blank fields.
nullval2 = 'NaN'
c2 = fits.Column('F1', format='F12.8', null=nullval2,
array=np.array([1.0, 2.0, 3.0, 4.0]),
ascii=True)
table = fits.TableHDU.from_columns([c2])
table.writeto(self.temp('ascii_null2.fits'))
# Replace the 1st col, 3rd row, with a null field.
with open(self.temp('ascii_null2.fits'), mode='r+') as h:
nulled = h.read().replace('3.00000000', ' ')
h.seek(0)
h.write(nulled)
with fits.open(self.temp('ascii_null2.fits'), memmap=True) as f:
# (Currently it should evaluate to 0.0, but if a TODO in fitsrec is
# completed, then it should evaluate to NaN.)
assert f[1].data[2][0] == 0.0 or np.isnan(f[1].data[2][0])
def test_column_array_type_mismatch(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/218"""
arr = [-99] * 20
col = fits.Column('mag', format='E', array=arr)
assert (arr == col.array).all()
def test_table_none(self):
"""Regression test
for https://github.com/spacetelescope/PyFITS/issues/27
"""
with fits.open(self.data('tb.fits')) as h:
h[1].data
h[1].data = None
assert isinstance(h[1].data, fits.FITS_rec)
assert len(h[1].data) == 0
h[1].writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as h:
assert h[1].header['NAXIS'] == 2
assert h[1].header['NAXIS1'] == 12
assert h[1].header['NAXIS2'] == 0
assert isinstance(h[1].data, fits.FITS_rec)
assert len(h[1].data) == 0
def test_unncessary_table_load(self):
"""Test unnecessary parsing and processing of FITS tables when writing
directly from one FITS file to a new file without first reading the
data for user manipulation.
In other words, it should be possible to do a direct copy of the raw
data without unnecessary processing of the data.
"""
with fits.open(self.data('table.fits')) as h:
h[1].writeto(self.temp('test.fits'))
# Since this was a direct copy the h[1].data attribute should not have
# even been accessed (since this means the data was read and parsed)
assert 'data' not in h[1].__dict__
with fits.open(self.data('table.fits')) as h1:
with fits.open(self.temp('test.fits')) as h2:
assert str(h1[1].header) == str(h2[1].header)
assert comparerecords(h1[1].data, h2[1].data)
def test_table_from_columns_of_other_table(self):
"""Tests a rare corner case where the columns of an existing table
are used to create a new table with the new_table function. In this
specific case, however, the existing table's data has not been read
yet, so new_table has to get at it through the Delayed proxy.
Note: Although this previously tested new_table it now uses
BinTableHDU.from_columns directly, around which new_table is a mere
wrapper.
"""
hdul = fits.open(self.data('table.fits'))
# Make sure the column array is in fact delayed...
assert isinstance(hdul[1].columns._arrays[0], Delayed)
# Create a new table...
t = fits.BinTableHDU.from_columns(hdul[1].columns)
# The original columns should no longer be delayed...
assert not isinstance(hdul[1].columns._arrays[0], Delayed)
t.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul2:
assert comparerecords(hdul[1].data, hdul2[1].data)
hdul.close()
def test_bintable_to_asciitable(self):
"""Tests initializing a TableHDU with the data from a BinTableHDU."""
with fits.open(self.data('tb.fits')) as hdul:
tbdata = hdul[1].data
tbhdu = fits.TableHDU(data=tbdata)
tbhdu.writeto(self.temp('test.fits'), overwrite=True)
with fits.open(self.temp('test.fits')) as hdul2:
tbdata2 = hdul2[1].data
assert np.all(tbdata['c1'] == tbdata2['c1'])
assert np.all(tbdata['c2'] == tbdata2['c2'])
# c3 gets converted from float32 to float64 when writing
# test.fits, so cast to float32 before testing that the correct
# value is retrieved
assert np.all(tbdata['c3'].astype(np.float32) ==
tbdata2['c3'].astype(np.float32))
# c4 is a boolean column in the original table; we want ASCII
# columns to convert these to columns of 'T'/'F' strings
assert np.all(np.where(tbdata['c4'], 'T', 'F') ==
tbdata2['c4'])
def test_pickle(self):
"""
Regression test for https://github.com/astropy/astropy/issues/1597
Tests for pickling FITS_rec objects
"""
# open existing FITS tables (images pickle by default, no test needed):
with fits.open(self.data('tb.fits')) as btb:
# Test column array is delayed and can pickle
assert isinstance(btb[1].columns._arrays[0], Delayed)
btb_pd = pickle.dumps(btb[1].data)
btb_pl = pickle.loads(btb_pd)
# It should not be delayed any more
assert not isinstance(btb[1].columns._arrays[0], Delayed)
assert comparerecords(btb_pl, btb[1].data)
with fits.open(self.data('ascii.fits')) as asc:
asc_pd = pickle.dumps(asc[1].data)
asc_pl = pickle.loads(asc_pd)
assert comparerecords(asc_pl, asc[1].data)
with fits.open(self.data('random_groups.fits')) as rgr:
rgr_pd = pickle.dumps(rgr[0].data)
rgr_pl = pickle.loads(rgr_pd)
assert comparerecords(rgr_pl, rgr[0].data)
with fits.open(self.data('zerowidth.fits')) as zwc:
# Doesn't pickle zero-width (_phanotm) column 'ORBPARM'
zwc_pd = pickle.dumps(zwc[2].data)
zwc_pl = pickle.loads(zwc_pd)
with pytest.warns(UserWarning, match='Field 2 has a repeat count of 0'):
assert comparerecords(zwc_pl, zwc[2].data)
def test_zero_length_table(self):
array = np.array([], dtype=[
('a', 'i8'),
('b', 'S64'),
('c', ('i4', (3, 2)))])
hdu = fits.BinTableHDU(array)
assert hdu.header['NAXIS1'] == 96
assert hdu.header['NAXIS2'] == 0
assert hdu.header['TDIM3'] == '(2,3)'
field = hdu.data.field(1)
assert field.shape == (0,)
def test_dim_column_byte_order_mismatch(self):
"""
When creating a table column with non-trivial TDIMn, and
big-endian array data read from an existing FITS file, the data
should not be unnecessarily byteswapped.
Regression test for https://github.com/astropy/astropy/issues/3561
"""
data = fits.getdata(self.data('random_groups.fits'))['DATA']
col = fits.Column(name='TEST', array=data, dim='(3,1,128,1,1)',
format='1152E')
thdu = fits.BinTableHDU.from_columns([col])
thdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
assert np.all(hdul[1].data['TEST'] == data)
def test_fits_rec_from_existing(self):
"""
Tests creating a `FITS_rec` object with `FITS_rec.from_columns`
from an existing `FITS_rec` object read from a FITS file.
This ensures that the per-column arrays are updated properly.
Regression test for https://github.com/spacetelescope/PyFITS/issues/99
"""
# The use case that revealed this problem was trying to create a new
# table from an existing table, but with additional rows so that we can
# append data from a second table (with the same column structure)
data1 = fits.getdata(self.data('tb.fits'))
data2 = fits.getdata(self.data('tb.fits'))
nrows = len(data1) + len(data2)
merged = fits.FITS_rec.from_columns(data1, nrows=nrows)
merged[len(data1):] = data2
mask = merged['c1'] > 1
masked = merged[mask]
# The test table only has two rows, only the second of which is > 1 for
# the 'c1' column
assert comparerecords(data1[1:], masked[:1])
assert comparerecords(data1[1:], masked[1:])
# Double check that the original data1 table hasn't been affected by
# its use in creating the "merged" table
assert comparerecords(data1, fits.getdata(self.data('tb.fits')))
def test_update_string_column_inplace(self):
"""
Regression test for https://github.com/astropy/astropy/issues/4452
Ensure that changes to values in a string column are saved when
a file is opened in ``mode='update'``.
"""
data = np.array([('abc',)], dtype=[('a', 'S3')])
fits.writeto(self.temp('test.fits'), data)
with fits.open(self.temp('test.fits'), mode='update') as hdul:
hdul[1].data['a'][0] = 'XYZ'
assert hdul[1].data['a'][0] == 'XYZ'
with fits.open(self.temp('test.fits')) as hdul:
assert hdul[1].data['a'][0] == 'XYZ'
# Test update but with a non-trivial TDIMn
data = np.array([([['abc', 'def', 'geh'],
['ijk', 'lmn', 'opq']],)],
dtype=[('a', ('S3', (2, 3)))])
fits.writeto(self.temp('test2.fits'), data)
expected = [['abc', 'def', 'geh'],
['ijk', 'XYZ', 'opq']]
with fits.open(self.temp('test2.fits'), mode='update') as hdul:
assert hdul[1].header['TDIM1'] == '(3,3,2)'
# Note: Previously I wrote data['a'][0][1, 1] to address
# the single row. However, this is broken for chararray because
# data['a'][0] does *not* return a view of the original array--this
# is a bug in chararray though and not a bug in any FITS-specific
# code so we'll roll with it for now...
# (by the way the bug in question is fixed in newer Numpy versions)
hdul[1].data['a'][0, 1, 1] = 'XYZ'
assert np.all(hdul[1].data['a'][0] == expected)
with fits.open(self.temp('test2.fits')) as hdul:
assert hdul[1].header['TDIM1'] == '(3,3,2)'
assert np.all(hdul[1].data['a'][0] == expected)
@pytest.mark.skipif('not HAVE_OBJGRAPH')
def test_reference_leak(self):
"""Regression test for https://github.com/astropy/astropy/pull/520"""
def readfile(filename):
with fits.open(filename) as hdul:
data = hdul[1].data.copy()
for colname in data.dtype.names:
data[colname]
with _refcounting('FITS_rec'):
readfile(self.data('memtest.fits'))
@pytest.mark.skipif('not HAVE_OBJGRAPH')
@pytest.mark.slow
def test_reference_leak2(self, tmpdir):
"""
Regression test for https://github.com/astropy/astropy/pull/4539
This actually re-runs a small set of tests that I found, during
careful testing, exhibited the reference leaks fixed by #4539, but
now with reference counting around each test to ensure that the
leaks are fixed.
"""
from .test_core import TestCore
from .test_connect import TestMultipleHDU
t1 = TestCore()
t1.setup()
try:
with _refcounting('FITS_rec'):
t1.test_add_del_columns2()
finally:
t1.teardown()
del t1
t2 = self.__class__()
for test_name in ['test_recarray_to_bintablehdu',
'test_numpy_ndarray_to_bintablehdu',
'test_new_table_from_recarray',
'test_new_fitsrec']:
t2.setup()
try:
with _refcounting('FITS_rec'):
getattr(t2, test_name)()
finally:
t2.teardown()
del t2
t3 = TestMultipleHDU()
t3.setup_class()
try:
with _refcounting('FITS_rec'):
t3.test_read(tmpdir)
finally:
t3.teardown_class()
del t3
def test_dump_overwrite(self):
with fits.open(self.data('table.fits')) as hdul:
tbhdu = hdul[1]
datafile = self.temp('data.txt')
cdfile = self.temp('coldefs.txt')
hfile = self.temp('header.txt')
tbhdu.dump(datafile, cdfile, hfile)
msg = (r"File .* already exists\. File .* already exists\. File "
r".* already exists\. If you mean to replace the "
r"file\(s\) then use the argument 'overwrite=True'\.")
with pytest.raises(OSError, match=msg):
tbhdu.dump(datafile, cdfile, hfile)
tbhdu.dump(datafile, cdfile, hfile, overwrite=True)
def test_pseudo_unsigned_ints(self):
"""
Tests updating a table column containing pseudo-unsigned ints.
"""
data = np.array([1, 2, 3], dtype=np.uint32)
col = fits.Column(name='A', format='1J', bzero=2**31, array=data)
thdu = fits.BinTableHDU.from_columns([col])
thdu.writeto(self.temp('test.fits'))
# Test that the file wrote out correctly
with fits.open(self.temp('test.fits'), uint=True) as hdul:
hdu = hdul[1]
assert 'TZERO1' in hdu.header
assert hdu.header['TZERO1'] == 2**31
assert hdu.data['A'].dtype == np.dtype('uint32')
assert np.all(hdu.data['A'] == data)
# Test updating the unsigned int data
hdu.data['A'][0] = 99
hdu.writeto(self.temp('test2.fits'))
with fits.open(self.temp('test2.fits'), uint=True) as hdul:
hdu = hdul[1]
assert 'TZERO1' in hdu.header
assert hdu.header['TZERO1'] == 2**31
assert hdu.data['A'].dtype == np.dtype('uint32')
assert np.all(hdu.data['A'] == [99, 2, 3])
def test_column_with_scaling(self):
"""Check that a scaled column if correctly saved once it is modified.
Regression test for https://github.com/astropy/astropy/issues/6887
"""
c1 = fits.Column(name='c1', array=np.array([1], dtype='>i2'),
format='1I', bscale=1, bzero=32768)
S = fits.HDUList([fits.PrimaryHDU(),
fits.BinTableHDU.from_columns([c1])])
# Change value in memory
S[1].data['c1'][0] = 2
S.writeto(self.temp("a.fits"))
assert S[1].data['c1'] == 2
# Read and change value in memory
with fits.open(self.temp("a.fits")) as X:
X[1].data['c1'][0] = 10
assert X[1].data['c1'][0] == 10
# Write back to file
X.writeto(self.temp("b.fits"))
# Now check the file
with fits.open(self.temp("b.fits")) as hdul:
assert hdul[1].data['c1'][0] == 10
def test_ascii_inttypes(self):
"""
Test correct integer dtypes according to ASCII table field widths.
Regression for https://github.com/astropy/astropy/issues/9899
"""
i08 = np.array([2**3, 2**23, -2**22, 10, 2**23], dtype='i4')
i10 = np.array([2**8, 2**31-1, -2**29, 30, 2**31-1], dtype='i8')
i20 = np.array([2**16, 2**63-1, -2**63, 40, 2**63-1], dtype='i8')
i02 = np.array([2**8, 2**13, -2**9, 50, 2**13], dtype='i2')
t0 = Table([i08, i08*2, i10, i20, i02])
t1 = Table.read(self.data('ascii_i4-i20.fits'))
assert t1.dtype == t0.dtype
assert comparerecords(t1, t0)
@contextlib.contextmanager
def _refcounting(type_):
"""
Perform the body of a with statement with reference counting for the
given type (given by class name)--raises an assertion error if there
are more unfreed objects of the given type than when we entered the
with statement.
"""
gc.collect()
refcount = len(objgraph.by_type(type_))
yield refcount
gc.collect()
assert len(objgraph.by_type(type_)) <= refcount, \
"More {0!r} objects still in memory than before."
class TestVLATables(FitsTestCase):
"""Tests specific to tables containing variable-length arrays."""
def test_variable_length_columns(self):
def test(format_code):
col = fits.Column(name='QUAL_SPE', format=format_code,
array=[[0] * 1571] * 225)
tb_hdu = fits.BinTableHDU.from_columns([col])
pri_hdu = fits.PrimaryHDU()
hdu_list = fits.HDUList([pri_hdu, tb_hdu])
hdu_list.writeto(self.temp('toto.fits'), overwrite=True)
with fits.open(self.temp('toto.fits')) as toto:
q = toto[1].data.field('QUAL_SPE')
assert (q[0][4:8] ==
np.array([0, 0, 0, 0], dtype=np.uint8)).all()
assert toto[1].columns[0].format.endswith('J(1571)')
for code in ('PJ()', 'QJ()'):
test(code)
def test_extend_variable_length_array(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/54"""
def test(format_code):
arr = [[1] * 10] * 10
col1 = fits.Column(name='TESTVLF', format=format_code, array=arr)
col2 = fits.Column(name='TESTSCA', format='J', array=[1] * 10)
tb_hdu = fits.BinTableHDU.from_columns([col1, col2], nrows=15)
# This asserts that the normal 'scalar' column's length was extended
assert len(tb_hdu.data['TESTSCA']) == 15
# And this asserts that the VLF column was extended in the same manner
assert len(tb_hdu.data['TESTVLF']) == 15
# We can't compare the whole array since the _VLF is an array of
# objects, but comparing just the edge case rows should suffice
assert (tb_hdu.data['TESTVLF'][0] == arr[0]).all()
assert (tb_hdu.data['TESTVLF'][9] == arr[9]).all()
assert (tb_hdu.data['TESTVLF'][10] == ([0] * 10)).all()
assert (tb_hdu.data['TESTVLF'][-1] == ([0] * 10)).all()
for code in ('PJ()', 'QJ()'):
test(code)
def test_variable_length_table_format_pd_from_object_array(self):
def test(format_code):
a = np.array([np.array([7.2e-20, 7.3e-20]), np.array([0.0]),
np.array([0.0])], 'O')
acol = fits.Column(name='testa', format=format_code, array=a)
tbhdu = fits.BinTableHDU.from_columns([acol])
tbhdu.writeto(self.temp('newtable.fits'), overwrite=True)
with fits.open(self.temp('newtable.fits')) as tbhdu1:
assert tbhdu1[1].columns[0].format.endswith('D(2)')
for j in range(3):
for i in range(len(a[j])):
assert tbhdu1[1].data.field(0)[j][i] == a[j][i]
for code in ('PD()', 'QD()'):
test(code)
def test_variable_length_table_format_pd_from_list(self):
def test(format_code):
a = [np.array([7.2e-20, 7.3e-20]), np.array([0.0]),
np.array([0.0])]
acol = fits.Column(name='testa', format=format_code, array=a)
tbhdu = fits.BinTableHDU.from_columns([acol])
tbhdu.writeto(self.temp('newtable.fits'), overwrite=True)
with fits.open(self.temp('newtable.fits')) as tbhdu1:
assert tbhdu1[1].columns[0].format.endswith('D(2)')
for j in range(3):
for i in range(len(a[j])):
assert tbhdu1[1].data.field(0)[j][i] == a[j][i]
for code in ('PD()', 'QD()'):
test(code)
def test_variable_length_table_format_pa_from_object_array(self):
def test(format_code):
a = np.array([np.array(['a', 'b', 'c']), np.array(['d', 'e']),
np.array(['f'])], 'O')
acol = fits.Column(name='testa', format=format_code, array=a)
tbhdu = fits.BinTableHDU.from_columns([acol])
tbhdu.writeto(self.temp('newtable.fits'), overwrite=True)
with fits.open(self.temp('newtable.fits')) as hdul:
assert hdul[1].columns[0].format.endswith('A(3)')
for j in range(3):
for i in range(len(a[j])):
assert hdul[1].data.field(0)[j][i] == a[j][i]
for code in ('PA()', 'QA()'):
test(code)
def test_variable_length_table_format_pa_from_list(self):
def test(format_code):
a = ['a', 'ab', 'abc']
acol = fits.Column(name='testa', format=format_code, array=a)
tbhdu = fits.BinTableHDU.from_columns([acol])
tbhdu.writeto(self.temp('newtable.fits'), overwrite=True)
with fits.open(self.temp('newtable.fits')) as hdul:
assert hdul[1].columns[0].format.endswith('A(3)')
for j in range(3):
for i in range(len(a[j])):
assert hdul[1].data.field(0)[j][i] == a[j][i]
for code in ('PA()', 'QA()'):
test(code)
def test_getdata_vla(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/200"""
def test(format_code):
col = fits.Column(name='QUAL_SPE', format=format_code,
array=[np.arange(1572)] * 225)
tb_hdu = fits.BinTableHDU.from_columns([col])
pri_hdu = fits.PrimaryHDU()
hdu_list = fits.HDUList([pri_hdu, tb_hdu])
hdu_list.writeto(self.temp('toto.fits'), overwrite=True)
data = fits.getdata(self.temp('toto.fits'))
# Need to compare to the original data row by row since the FITS_rec
# returns an array of _VLA objects
for row_a, row_b in zip(data['QUAL_SPE'], col.array):
assert (row_a == row_b).all()
for code in ('PJ()', 'QJ()'):
test(code)
@pytest.mark.skipif(not NUMPY_LT_1_22 and NUMPY_LT_1_22_1 and sys.platform == 'win32',
reason='https://github.com/numpy/numpy/issues/20699')
def test_copy_vla(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/47
"""
# Make a file containing a couple of VLA tables
arr1 = [np.arange(n + 1) for n in range(255)]
arr2 = [np.arange(255, 256 + n) for n in range(255)]
# A dummy non-VLA column needed to reproduce issue #47
c = fits.Column('test', format='J', array=np.arange(255))
c1 = fits.Column('A', format='PJ', array=arr1)
c2 = fits.Column('B', format='PJ', array=arr2)
t1 = fits.BinTableHDU.from_columns([c, c1])
t2 = fits.BinTableHDU.from_columns([c, c2])
hdul = fits.HDUList([fits.PrimaryHDU(), t1, t2])
hdul.writeto(self.temp('test.fits'), overwrite=True)
# Just test that the test file wrote out correctly
with fits.open(self.temp('test.fits')) as h:
assert h[1].header['TFORM2'] == 'PJ(255)'
assert h[2].header['TFORM2'] == 'PJ(255)'
assert comparerecords(h[1].data, t1.data)
assert comparerecords(h[2].data, t2.data)
# Try copying the second VLA and writing to a new file
with fits.open(self.temp('test.fits')) as h:
new_hdu = fits.BinTableHDU(data=h[2].data, header=h[2].header)
new_hdu.writeto(self.temp('test3.fits'))
with fits.open(self.temp('test3.fits')) as h2:
assert comparerecords(h2[1].data, t2.data)
new_hdul = fits.HDUList([fits.PrimaryHDU()])
new_hdul.writeto(self.temp('test2.fits'))
# Open several copies of the test file and append copies of the second
# VLA table
with fits.open(self.temp('test2.fits'), mode='append') as new_hdul:
for _ in range(2):
with fits.open(self.temp('test.fits')) as h:
new_hdul.append(h[2])
new_hdul.flush()
# Test that all the VLA copies wrote correctly
with fits.open(self.temp('test2.fits')) as new_hdul:
for idx in range(1, 3):
assert comparerecords(new_hdul[idx].data, t2.data)
def test_vla_with_gap(self):
hdul = fits.open(self.data('theap-gap.fits'))
data = hdul[1].data
assert data.shape == (500,)
assert data['i'][497] == 497
assert np.array_equal(data['arr'][497], [0, 1, 2, 3, 4])
hdul.close()
def test_tolist(self):
col = fits.Column(
name='var', format='PI()',
array=np.array([[1, 2, 3], [11, 12]], dtype=np.object_))
hdu = fits.BinTableHDU.from_columns([col])
assert hdu.data.tolist() == [[[1, 2, 3]], [[11, 12]]]
assert hdu.data['var'].tolist() == [[1, 2, 3], [11, 12]]
def test_tolist_from_file(self):
filename = self.data('variable_length_table.fits')
with fits.open(filename) as hdul:
hdu = hdul[1]
assert hdu.data.tolist() == [[[45, 56], [11, 3]], [[11, 12, 13], [12, 4]]]
assert hdu.data['var'].tolist() == [[45, 56], [11, 12, 13]]
# These are tests that solely test the Column and ColDefs interfaces and
# related functionality without directly involving full tables; currently there
# are few of these but I expect there to be more as I improve the test coverage
class TestColumnFunctions(FitsTestCase):
def test_column_format_interpretation(self):
"""
Test to ensure that when Numpy-style record formats are passed in to
the Column constructor for the format argument, they are recognized so
long as it's unambiguous (where "unambiguous" here is questionable
since Numpy is case insensitive when parsing the format codes. But
their "proper" case is lower-case, so we can accept that. Basically,
actually, any key in the NUMPY2FITS dict should be accepted.
"""
for recformat, fitsformat in NUMPY2FITS.items():
c = fits.Column('TEST', np.dtype(recformat))
c.format == fitsformat
c = fits.Column('TEST', recformat)
c.format == fitsformat
c = fits.Column('TEST', fitsformat)
c.format == fitsformat
# Test a few cases that are ambiguous in that they *are* valid binary
# table formats though not ones that are likely to be used, but are
# also valid common ASCII table formats
c = fits.Column('TEST', 'I4')
assert c.format == 'I4'
assert c.format.format == 'I'
assert c.format.width == 4
c = fits.Column('TEST', 'F15.8')
assert c.format == 'F15.8'
assert c.format.format == 'F'
assert c.format.width == 15
assert c.format.precision == 8
c = fits.Column('TEST', 'E15.8')
assert c.format.format == 'E'
assert c.format.width == 15
assert c.format.precision == 8
c = fits.Column('TEST', 'D15.8')
assert c.format.format == 'D'
assert c.format.width == 15
assert c.format.precision == 8
# zero-precision should be allowed as well, for float types
# https://github.com/astropy/astropy/issues/3422
c = fits.Column('TEST', 'F10.0')
assert c.format.format == 'F'
assert c.format.width == 10
assert c.format.precision == 0
c = fits.Column('TEST', 'E10.0')
assert c.format.format == 'E'
assert c.format.width == 10
assert c.format.precision == 0
c = fits.Column('TEST', 'D10.0')
assert c.format.format == 'D'
assert c.format.width == 10
assert c.format.precision == 0
# These are a couple cases where the format code is a valid binary
# table format, and is not strictly a valid ASCII table format but
# could be *interpreted* as one by appending a default width. This
# will only happen either when creating an ASCII table or when
# explicitly specifying ascii=True when the column is created
c = fits.Column('TEST', 'I')
assert c.format == 'I'
assert c.format.recformat == 'i2'
c = fits.Column('TEST', 'I', ascii=True)
assert c.format == 'I10'
assert c.format.recformat == 'i4'
# With specified widths, integer precision should be set appropriately
c = fits.Column('TEST', 'I4', ascii=True)
assert c.format == 'I4'
assert c.format.recformat == 'i2'
c = fits.Column('TEST', 'I9', ascii=True)
assert c.format == 'I9'
assert c.format.recformat == 'i4'
c = fits.Column('TEST', 'I12', ascii=True)
assert c.format == 'I12'
assert c.format.recformat == 'i8'
c = fits.Column('TEST', 'E')
assert c.format == 'E'
assert c.format.recformat == 'f4'
c = fits.Column('TEST', 'E', ascii=True)
assert c.format == 'E15.7'
# F is not a valid binary table format so it should be unambiguously
# treated as an ASCII column
c = fits.Column('TEST', 'F')
assert c.format == 'F16.7'
c = fits.Column('TEST', 'D')
assert c.format == 'D'
assert c.format.recformat == 'f8'
c = fits.Column('TEST', 'D', ascii=True)
assert c.format == 'D25.17'
def test_zero_precision_float_column(self):
"""
Regression test for https://github.com/astropy/astropy/issues/3422
"""
c = fits.Column('TEST', 'F5.0', array=[1.1, 2.2, 3.3])
# The decimal places will be clipped
t = fits.TableHDU.from_columns([c])
t.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
assert hdul[1].header['TFORM1'] == 'F5.0'
assert hdul[1].data['TEST'].dtype == np.dtype('float64')
assert np.all(hdul[1].data['TEST'] == [1.0, 2.0, 3.0])
# Check how the raw data looks
raw = np.rec.recarray.field(hdul[1].data, 'TEST')
assert raw.tobytes() == b' 1. 2. 3.'
def test_column_array_type_mismatch(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/218"""
arr = [-99] * 20
col = fits.Column('mag', format='E', array=arr)
assert (arr == col.array).all()
def test_new_coldefs_with_invalid_seqence(self):
"""Test that a TypeError is raised when a ColDefs is instantiated with
a sequence of non-Column objects.
"""
pytest.raises(TypeError, fits.ColDefs, [1, 2, 3])
def test_coldefs_init_from_array(self):
"""Test that ColDefs._init_from_array works with single element data-
types as well as multi-element data-types
"""
nd_array = np.ndarray((1,), dtype=[('A', '<u4', (2,)), ('B', '>u2')])
col_defs = fits.column.ColDefs(nd_array)
assert 2**31 == col_defs['A'].bzero
assert 2**15 == col_defs['B'].bzero
def test_pickle(self):
"""
Regression test for https://github.com/astropy/astropy/issues/1597
Tests for pickling FITS_rec objects
"""
# open existing FITS tables (images pickle by default, no test needed):
with fits.open(self.data('tb.fits')) as btb:
# Test column array is delayed and can pickle
assert isinstance(btb[1].columns._arrays[0], Delayed)
btb_pd = pickle.dumps(btb[1].data)
btb_pl = pickle.loads(btb_pd)
# It should not be delayed any more
assert not isinstance(btb[1].columns._arrays[0], Delayed)
assert comparerecords(btb_pl, btb[1].data)
with fits.open(self.data('ascii.fits')) as asc:
asc_pd = pickle.dumps(asc[1].data)
asc_pl = pickle.loads(asc_pd)
assert comparerecords(asc_pl, asc[1].data)
with fits.open(self.data('random_groups.fits')) as rgr:
rgr_pd = pickle.dumps(rgr[0].data)
rgr_pl = pickle.loads(rgr_pd)
assert comparerecords(rgr_pl, rgr[0].data)
with fits.open(self.data('zerowidth.fits')) as zwc:
# Doesn't pickle zero-width (_phanotm) column 'ORBPARM'
zwc_pd = pickle.dumps(zwc[2].data)
zwc_pl = pickle.loads(zwc_pd)
with pytest.warns(UserWarning, match=r'Field 2 has a repeat count '
r'of 0 in its format code'):
assert comparerecords(zwc_pl, zwc[2].data)
def test_column_lookup_by_name(self):
"""Tests that a `ColDefs` can be indexed by column name."""
a = fits.Column(name='a', format='D')
b = fits.Column(name='b', format='D')
cols = fits.ColDefs([a, b])
assert cols['a'] == cols[0]
assert cols['b'] == cols[1]
def test_column_attribute_change_after_removal(self):
"""
This is a test of the column attribute change notification system.
After a column has been removed from a table (but other references
are kept to that same column) changes to that column's attributes
should not trigger a notification on the table it was removed from.
"""
# One way we can check this is to ensure there are no further changes
# to the header
table = fits.BinTableHDU.from_columns([
fits.Column('a', format='D'),
fits.Column('b', format='D')])
b = table.columns['b']
table.columns.del_col('b')
assert table.data.dtype.names == ('a',)
b.name = 'HELLO'
assert b.name == 'HELLO'
assert 'TTYPE2' not in table.header
assert table.header['TTYPE1'] == 'a'
assert table.columns.names == ['a']
with pytest.raises(KeyError):
table.columns['b']
# Make sure updates to the remaining column still work
table.columns.change_name('a', 'GOODBYE')
with pytest.raises(KeyError):
table.columns['a']
assert table.columns['GOODBYE'].name == 'GOODBYE'
assert table.data.dtype.names == ('GOODBYE',)
assert table.columns.names == ['GOODBYE']
assert table.data.columns.names == ['GOODBYE']
table.columns['GOODBYE'].name = 'foo'
with pytest.raises(KeyError):
table.columns['GOODBYE']
assert table.columns['foo'].name == 'foo'
assert table.data.dtype.names == ('foo',)
assert table.columns.names == ['foo']
assert table.data.columns.names == ['foo']
def test_x_column_deepcopy(self):
"""
Regression test for https://github.com/astropy/astropy/pull/4514
Tests that columns with the X (bit array) format can be deep-copied.
"""
c = fits.Column('xcol', format='5X', array=[1, 0, 0, 1, 0])
c2 = copy.deepcopy(c)
assert c2.name == c.name
assert c2.format == c.format
assert np.all(c2.array == c.array)
def test_p_column_deepcopy(self):
"""
Regression test for https://github.com/astropy/astropy/pull/4514
Tests that columns with the P/Q formats (variable length arrays) can be
deep-copied.
"""
c = fits.Column('pcol', format='PJ', array=[[1, 2], [3, 4, 5]])
c2 = copy.deepcopy(c)
assert c2.name == c.name
assert c2.format == c.format
assert np.all(c2.array[0] == c.array[0])
assert np.all(c2.array[1] == c.array[1])
c3 = fits.Column('qcol', format='QJ', array=[[1, 2], [3, 4, 5]])
c4 = copy.deepcopy(c3)
assert c4.name == c3.name
assert c4.format == c3.format
assert np.all(c4.array[0] == c3.array[0])
assert np.all(c4.array[1] == c3.array[1])
def test_column_verify_keywords(self):
"""
Test that the keyword arguments used to initialize a Column, specifically
those that typically read from a FITS header (so excluding array),
are verified to have a valid value.
"""
with pytest.raises(AssertionError) as err:
_ = fits.Column(1, format='I', array=[1, 2, 3, 4, 5])
assert 'Column name must be a string able to fit' in str(err.value)
with pytest.raises(VerifyError) as err:
_ = fits.Column('col', format=0, null='Nan', disp=1, coord_type=1,
coord_unit=2, coord_inc='1', time_ref_pos=1,
coord_ref_point='1', coord_ref_value='1')
err_msgs = ['keyword arguments to Column were invalid',
'TFORM', 'TNULL', 'TDISP', 'TCTYP', 'TCUNI', 'TCRPX',
'TCRVL', 'TCDLT', 'TRPOS']
for msg in err_msgs:
assert msg in str(err.value)
def test_column_verify_start(self):
"""
Regression test for https://github.com/astropy/astropy/pull/6359
Test the validation of the column start position option (ASCII table only),
corresponding to ``TBCOL`` keyword.
Test whether the VerifyError message generated is the one with highest priority,
i.e. the order of error messages to be displayed is maintained.
"""
with pytest.raises(VerifyError) as err:
_ = fits.Column('a', format='B', start='a', array=[1, 2, 3])
assert "start option (TBCOLn) is not allowed for binary table columns" in str(err.value)
with pytest.raises(VerifyError) as err:
_ = fits.Column('a', format='I', start='a', array=[1, 2, 3])
assert "start option (TBCOLn) must be a positive integer (got 'a')." in str(err.value)
with pytest.raises(VerifyError) as err:
_ = fits.Column('a', format='I', start='-56', array=[1, 2, 3])
assert "start option (TBCOLn) must be a positive integer (got -56)." in str(err.value)
@pytest.mark.parametrize('keys',
[{'TFORM': 'Z', 'TDISP': 'E'},
{'TFORM': '2', 'TDISP': '2E'},
{'TFORM': 3, 'TDISP': 6.3},
{'TFORM': float, 'TDISP': np.float64},
{'TFORM': '', 'TDISP': 'E.5'}])
def test_column_verify_formats(self, keys):
"""
Additional tests for verification of 'TFORM' and 'TDISP' keyword
arguments used to initialize a Column.
"""
with pytest.raises(VerifyError) as err:
_ = fits.Column('col', format=keys['TFORM'], disp=keys['TDISP'])
for key in keys.keys():
assert key in str(err.value)
assert str(keys[key]) in str(err.value)
def test_regression_5383():
# Regression test for an undefined variable
x = np.array([1, 2, 3])
col = fits.Column(name='a', array=x, format='E')
hdu = fits.BinTableHDU.from_columns([col])
del hdu._header['TTYPE1']
hdu.columns[0].name = 'b'
def test_table_to_hdu():
from astropy.table import Table
table = Table([[1, 2, 3], ['a', 'b', 'c'], [2.3, 4.5, 6.7]],
names=['a', 'b', 'c'], dtype=['i', 'U1', 'f'])
table['a'].unit = 'm/s'
table['b'].unit = 'not-a-unit'
table.meta['foo'] = 'bar'
with pytest.warns(UnitsWarning, match="'not-a-unit' did not parse as"
" fits unit") as w:
hdu = fits.BinTableHDU(table, header=fits.Header({'TEST': 1}))
assert len(w) == 1
for name in 'abc':
assert np.array_equal(table[name], hdu.data[name])
# Check that TUNITn cards appear in the correct order
# (https://github.com/astropy/astropy/pull/5720)
assert hdu.header.index('TUNIT1') < hdu.header.index('TTYPE2')
assert hdu.header['FOO'] == 'bar'
assert hdu.header['TEST'] == 1
def test_regression_scalar_indexing():
# Indexing a FITS_rec with a tuple that returns a scalar record
# should work
x = np.array([(1.0, 2), (3.0, 4)],
dtype=[('x', float), ('y', int)]).view(fits.FITS_rec)
x1a = x[1]
# this should succeed.
x1b = x[(1,)]
# FITS_record does not define __eq__; so test elements.
assert all(a == b for a, b in zip(x1a, x1b))
def test_new_column_attributes_preserved(tmpdir):
# Regression test for https://github.com/astropy/astropy/issues/7145
# This makes sure that for now we don't clear away keywords that have
# newly been recognized (in Astropy 3.0) as special column attributes but
# instead just warn that we might do so in future. The new keywords are:
# TCTYP, TCUNI, TCRPX, TCRVL, TCDLT, TRPOS
col = []
col.append(fits.Column(name="TIME", format="1E", unit="s"))
col.append(fits.Column(name="RAWX", format="1I", unit="pixel"))
col.append(fits.Column(name="RAWY", format="1I"))
cd = fits.ColDefs(col)
hdr = fits.Header()
# Keywords that will get ignored in favor of these in the data
hdr['TUNIT1'] = 'pixel'
hdr['TUNIT2'] = 'm'
hdr['TUNIT3'] = 'm'
# Keywords that were added in Astropy 3.0 that should eventually be
# ignored and set on the data instead
hdr['TCTYP2'] = 'RA---TAN'
hdr['TCTYP3'] = 'ANGLE'
hdr['TCRVL2'] = -999.0
hdr['TCRVL3'] = -999.0
hdr['TCRPX2'] = 1.0
hdr['TCRPX3'] = 1.0
hdr['TALEN2'] = 16384
hdr['TALEN3'] = 1024
hdr['TCUNI2'] = 'angstrom'
hdr['TCUNI3'] = 'deg'
# Other non-relevant keywords
hdr['RA'] = 1.5
hdr['DEC'] = 3.0
with pytest.warns(AstropyDeprecationWarning) as warning_list:
hdu = fits.BinTableHDU.from_columns(cd, hdr)
assert str(warning_list[0].message).startswith(
"The following keywords are now recognized as special")
# First, check that special keywords such as TUNIT are ignored in the header
# We may want to change that behavior in future, but this is the way it's
# been for a while now.
assert hdu.columns[0].unit == 's'
assert hdu.columns[1].unit == 'pixel'
assert hdu.columns[2].unit is None
assert hdu.header['TUNIT1'] == 's'
assert hdu.header['TUNIT2'] == 'pixel'
assert 'TUNIT3' not in hdu.header # TUNIT3 was removed
# Now, check that the new special keywords are actually still there
# but weren't used to set the attributes on the data
assert hdu.columns[0].coord_type is None
assert hdu.columns[1].coord_type is None
assert hdu.columns[2].coord_type is None
assert 'TCTYP1' not in hdu.header
assert hdu.header['TCTYP2'] == 'RA---TAN'
assert hdu.header['TCTYP3'] == 'ANGLE'
# Make sure that other keywords are still there
assert hdu.header['RA'] == 1.5
assert hdu.header['DEC'] == 3.0
# Now we can write this HDU to a file and re-load. Re-loading *should*
# cause the special column attribtues to be picked up (it's just that when a
# header is manually specified, these values are ignored)
filename = tmpdir.join('test.fits').strpath
hdu.writeto(filename)
# Make sure we don't emit a warning in this case
with warnings.catch_warnings(record=True) as warning_list:
with fits.open(filename) as hdul:
hdu2 = hdul[1]
assert len(warning_list) == 0
# Check that column attributes are now correctly set
assert hdu2.columns[0].unit == 's'
assert hdu2.columns[1].unit == 'pixel'
assert hdu2.columns[2].unit is None
assert hdu2.header['TUNIT1'] == 's'
assert hdu2.header['TUNIT2'] == 'pixel'
assert 'TUNIT3' not in hdu2.header # TUNIT3 was removed
# Now, check that the new special keywords are actually still there
# but weren't used to set the attributes on the data
assert hdu2.columns[0].coord_type is None
assert hdu2.columns[1].coord_type == 'RA---TAN'
assert hdu2.columns[2].coord_type == 'ANGLE'
assert 'TCTYP1' not in hdu2.header
assert hdu2.header['TCTYP2'] == 'RA---TAN'
assert hdu2.header['TCTYP3'] == 'ANGLE'
# Make sure that other keywords are still there
assert hdu2.header['RA'] == 1.5
assert hdu2.header['DEC'] == 3.0
def test_empty_table(tmpdir):
ofile = str(tmpdir.join('emptytable.fits'))
hdu = fits.BinTableHDU(header=None, data=None, name='TEST')
hdu.writeto(ofile)
with fits.open(ofile) as hdul:
assert hdul['TEST'].data.size == 0
ofile = str(tmpdir.join('emptytable.fits.gz'))
hdu = fits.BinTableHDU(header=None, data=None, name='TEST')
hdu.writeto(ofile, overwrite=True)
with fits.open(ofile) as hdul:
assert hdul['TEST'].data.size == 0
def test_a3dtable(tmpdir):
testfile = str(tmpdir.join('test.fits'))
hdu = fits.BinTableHDU.from_columns([
fits.Column(name='FOO', format='J', array=np.arange(10))
])
hdu.header['XTENSION'] = 'A3DTABLE'
hdu.writeto(testfile, output_verify='ignore')
with fits.open(testfile) as hdul:
assert hdul[1].header['XTENSION'] == 'A3DTABLE'
with pytest.warns(AstropyUserWarning) as w:
hdul.verify('fix')
assert str(w[0].message) == 'Verification reported errors:'
assert str(w[2].message).endswith(
'Converted the XTENSION keyword to BINTABLE.')
assert hdul[1].header['XTENSION'] == 'BINTABLE'
def test_invalid_file(tmp_path):
hdu = fits.BinTableHDU()
# little trick to write an invalid card ...
hdu.header['FOO'] = None
hdu.header.cards['FOO']._value = np.nan
testfile = tmp_path / 'test.fits'
hdu.writeto(testfile, output_verify='ignore')
with fits.open(testfile) as hdul:
assert hdul[1].data is not None
def test_unit_parse_strict(tmp_path):
path = tmp_path / 'invalid_unit.fits'
# this is a unit parseable by the generic format but invalid for FITS
invalid_unit = '1 / (MeV sr s)'
unit = Unit(invalid_unit)
t = Table({'a': [1, 2, 3]})
t.write(path)
with fits.open(path, mode='update') as hdul:
hdul[1].header['TUNIT1'] = invalid_unit
# default is "warn"
with pytest.warns(UnitsWarning):
t = Table.read(path)
assert isinstance(t['a'].unit, UnrecognizedUnit)
t = Table.read(path, unit_parse_strict='silent')
assert isinstance(t['a'].unit, UnrecognizedUnit)
with pytest.raises(ValueError):
Table.read(path, unit_parse_strict='raise')
with pytest.warns(UnitsWarning):
Table.read(path, unit_parse_strict='warn')
|
Subsets and Splits