hash
stringlengths
64
64
content
stringlengths
0
1.51M
deb48411e12af050d66625a13379fb60af1e3737ca57dcc41fafee210506964a
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst # The idea for this module (but no code) was borrowed from the # quantities (http://pythonhosted.org/quantities/) package. """Helper functions for Quantity. In particular, this implements the logic that determines scaling and result units for a given ufunc, given input units. """ from fractions import Fraction import numpy as np from . import UFUNC_HELPERS, UNSUPPORTED_UFUNCS from astropy.units.core import ( UnitsError, UnitConversionError, UnitTypeError, dimensionless_unscaled, get_current_unit_registry) def _d(unit): if unit is None: return dimensionless_unscaled else: return unit def get_converter(from_unit, to_unit): """Like Unit._get_converter, except returns None if no scaling is needed, i.e., if the inferred scale is unity.""" try: scale = from_unit._to(to_unit) except UnitsError: return from_unit._apply_equivalencies( from_unit, to_unit, get_current_unit_registry().equivalencies) except AttributeError: raise UnitTypeError("Unit '{0}' cannot be converted to '{1}'" .format(from_unit, to_unit)) if scale == 1.: return None else: return lambda val: scale * val def get_converters_and_unit(f, unit1, unit2): converters = [None, None] # By default, we try adjusting unit2 to unit1, so that the result will # be unit1 as well. But if there is no second unit, we have to try # adjusting unit1 (to dimensionless, see below). if unit2 is None: if unit1 is None: # No units for any input -- e.g., np.add(a1, a2, out=q) return converters, dimensionless_unscaled changeable = 0 # swap units. unit2 = unit1 unit1 = None elif unit2 is unit1: # ensure identical units is fast ("==" is slow, so avoid that). return converters, unit1 else: changeable = 1 # Try to get a converter from unit2 to unit1. if unit1 is None: try: converters[changeable] = get_converter(unit2, dimensionless_unscaled) except UnitsError: # special case: would be OK if unitless number is zero, inf, nan converters[1-changeable] = False return converters, unit2 else: return converters, dimensionless_unscaled else: try: converters[changeable] = get_converter(unit2, unit1) except UnitsError: raise UnitConversionError( "Can only apply '{0}' function to quantities " "with compatible dimensions" .format(f.__name__)) return converters, unit1 # SINGLE ARGUMENT UFUNC HELPERS # # The functions below take a single argument, which is the quantity upon which # the ufunc is being used. The output of the helper function should be two # values: a list with a single converter to be used to scale the input before # it is being passed to the ufunc (or None if no conversion is needed), and # the unit the output will be in. def helper_onearg_test(f, unit): return ([None], None) def helper_invariant(f, unit): return ([None], _d(unit)) def helper_square(f, unit): return ([None], unit ** 2 if unit is not None else dimensionless_unscaled) def helper_reciprocal(f, unit): return ([None], unit ** -1 if unit is not None else dimensionless_unscaled) one_half = 0.5 # faster than Fraction(1, 2) one_third = Fraction(1, 3) def helper_sqrt(f, unit): return ([None], unit ** one_half if unit is not None else dimensionless_unscaled) def helper_cbrt(f, unit): return ([None], (unit ** one_third if unit is not None else dimensionless_unscaled)) def helper_modf(f, unit): if unit is None: return [None], (dimensionless_unscaled, dimensionless_unscaled) try: return ([get_converter(unit, dimensionless_unscaled)], (dimensionless_unscaled, dimensionless_unscaled)) except UnitsError: raise UnitTypeError("Can only apply '{0}' function to " "dimensionless quantities" .format(f.__name__)) def helper__ones_like(f, unit): return [None], dimensionless_unscaled def helper_dimensionless_to_dimensionless(f, unit): if unit is None: return [None], dimensionless_unscaled try: return ([get_converter(unit, dimensionless_unscaled)], dimensionless_unscaled) except UnitsError: raise UnitTypeError("Can only apply '{0}' function to " "dimensionless quantities" .format(f.__name__)) def helper_dimensionless_to_radian(f, unit): from astropy.units.si import radian if unit is None: return [None], radian try: return [get_converter(unit, dimensionless_unscaled)], radian except UnitsError: raise UnitTypeError("Can only apply '{0}' function to " "dimensionless quantities" .format(f.__name__)) def helper_degree_to_radian(f, unit): from astropy.units.si import degree, radian try: return [get_converter(unit, degree)], radian except UnitsError: raise UnitTypeError("Can only apply '{0}' function to " "quantities with angle units" .format(f.__name__)) def helper_radian_to_degree(f, unit): from astropy.units.si import degree, radian try: return [get_converter(unit, radian)], degree except UnitsError: raise UnitTypeError("Can only apply '{0}' function to " "quantities with angle units" .format(f.__name__)) def helper_radian_to_dimensionless(f, unit): from astropy.units.si import radian try: return [get_converter(unit, radian)], dimensionless_unscaled except UnitsError: raise UnitTypeError("Can only apply '{0}' function to " "quantities with angle units" .format(f.__name__)) def helper_frexp(f, unit): if not unit.is_unity(): raise UnitTypeError("Can only apply '{0}' function to " "unscaled dimensionless quantities" .format(f.__name__)) return [None], (None, None) # TWO ARGUMENT UFUNC HELPERS # # The functions below take a two arguments. The output of the helper function # should be two values: a tuple of two converters to be used to scale the # inputs before being passed to the ufunc (None if no conversion is needed), # and the unit the output will be in. def helper_multiplication(f, unit1, unit2): return [None, None], _d(unit1) * _d(unit2) def helper_division(f, unit1, unit2): return [None, None], _d(unit1) / _d(unit2) def helper_power(f, unit1, unit2): # TODO: find a better way to do this, currently need to signal that one # still needs to raise power of unit1 in main code if unit2 is None: return [None, None], False try: return [None, get_converter(unit2, dimensionless_unscaled)], False except UnitsError: raise UnitTypeError("Can only raise something to a " "dimensionless quantity") def helper_ldexp(f, unit1, unit2): if unit2 is not None: raise TypeError("Cannot use ldexp with a quantity " "as second argument.") else: return [None, None], _d(unit1) def helper_copysign(f, unit1, unit2): # if first arg is not a quantity, just return plain array if unit1 is None: return [None, None], None else: return [None, None], unit1 def helper_heaviside(f, unit1, unit2): try: converter2 = (get_converter(unit2, dimensionless_unscaled) if unit2 is not None else None) except UnitsError: raise UnitTypeError("Can only apply 'heaviside' function with a " "dimensionless second argument.") return ([None, converter2], dimensionless_unscaled) def helper_two_arg_dimensionless(f, unit1, unit2): try: converter1 = (get_converter(unit1, dimensionless_unscaled) if unit1 is not None else None) converter2 = (get_converter(unit2, dimensionless_unscaled) if unit2 is not None else None) except UnitsError: raise UnitTypeError("Can only apply '{0}' function to " "dimensionless quantities" .format(f.__name__)) return ([converter1, converter2], dimensionless_unscaled) # This used to be a separate function that just called get_converters_and_unit. # Using it directly saves a few us; keeping the clearer name. helper_twoarg_invariant = get_converters_and_unit def helper_twoarg_comparison(f, unit1, unit2): converters, _ = get_converters_and_unit(f, unit1, unit2) return converters, None def helper_twoarg_invtrig(f, unit1, unit2): from astropy.units.si import radian converters, _ = get_converters_and_unit(f, unit1, unit2) return converters, radian def helper_twoarg_floor_divide(f, unit1, unit2): converters, _ = get_converters_and_unit(f, unit1, unit2) return converters, dimensionless_unscaled def helper_divmod(f, unit1, unit2): converters, result_unit = get_converters_and_unit(f, unit1, unit2) return converters, (dimensionless_unscaled, result_unit) # list of ufuncs: # http://docs.scipy.org/doc/numpy/reference/ufuncs.html#available-ufuncs UNSUPPORTED_UFUNCS |= { np.bitwise_and, np.bitwise_or, np.bitwise_xor, np.invert, np.left_shift, np.right_shift, np.logical_and, np.logical_or, np.logical_xor, np.logical_not} for name in 'isnat', 'gcd', 'lcm': # isnat was introduced in numpy 1.14, gcd+lcm in 1.15 ufunc = getattr(np, name, None) if isinstance(ufunc, np.ufunc): UNSUPPORTED_UFUNCS |= {ufunc} # SINGLE ARGUMENT UFUNCS # ufuncs that return a boolean and do not care about the unit onearg_test_ufuncs = (np.isfinite, np.isinf, np.isnan, np.sign, np.signbit) for ufunc in onearg_test_ufuncs: UFUNC_HELPERS[ufunc] = helper_onearg_test # ufuncs that return a value with the same unit as the input invariant_ufuncs = (np.absolute, np.fabs, np.conj, np.conjugate, np.negative, np.spacing, np.rint, np.floor, np.ceil, np.trunc, np.positive) for ufunc in invariant_ufuncs: UFUNC_HELPERS[ufunc] = helper_invariant # ufuncs that require dimensionless input and and give dimensionless output dimensionless_to_dimensionless_ufuncs = (np.exp, np.expm1, np.exp2, np.log, np.log10, np.log2, np.log1p) # As found out in gh-7058, some numpy 1.13 conda installations also provide # np.erf, even though upstream doesn't have it. We include it if present. if isinstance(getattr(np.core.umath, 'erf', None), np.ufunc): dimensionless_to_dimensionless_ufuncs += (np.core.umath.erf,) for ufunc in dimensionless_to_dimensionless_ufuncs: UFUNC_HELPERS[ufunc] = helper_dimensionless_to_dimensionless # ufuncs that require dimensionless input and give output in radians dimensionless_to_radian_ufuncs = (np.arccos, np.arcsin, np.arctan, np.arccosh, np.arcsinh, np.arctanh) for ufunc in dimensionless_to_radian_ufuncs: UFUNC_HELPERS[ufunc] = helper_dimensionless_to_radian # ufuncs that require input in degrees and give output in radians degree_to_radian_ufuncs = (np.radians, np.deg2rad) for ufunc in degree_to_radian_ufuncs: UFUNC_HELPERS[ufunc] = helper_degree_to_radian # ufuncs that require input in radians and give output in degrees radian_to_degree_ufuncs = (np.degrees, np.rad2deg) for ufunc in radian_to_degree_ufuncs: UFUNC_HELPERS[ufunc] = helper_radian_to_degree # ufuncs that require input in radians and give dimensionless output radian_to_dimensionless_ufuncs = (np.cos, np.sin, np.tan, np.cosh, np.sinh, np.tanh) for ufunc in radian_to_dimensionless_ufuncs: UFUNC_HELPERS[ufunc] = helper_radian_to_dimensionless # ufuncs handled as special cases UFUNC_HELPERS[np.sqrt] = helper_sqrt UFUNC_HELPERS[np.square] = helper_square UFUNC_HELPERS[np.reciprocal] = helper_reciprocal UFUNC_HELPERS[np.cbrt] = helper_cbrt UFUNC_HELPERS[np.core.umath._ones_like] = helper__ones_like UFUNC_HELPERS[np.modf] = helper_modf UFUNC_HELPERS[np.frexp] = helper_frexp # TWO ARGUMENT UFUNCS # two argument ufuncs that require dimensionless input and and give # dimensionless output two_arg_dimensionless_ufuncs = (np.logaddexp, np.logaddexp2) for ufunc in two_arg_dimensionless_ufuncs: UFUNC_HELPERS[ufunc] = helper_two_arg_dimensionless # two argument ufuncs that return a value with the same unit as the input twoarg_invariant_ufuncs = (np.add, np.subtract, np.hypot, np.maximum, np.minimum, np.fmin, np.fmax, np.nextafter, np.remainder, np.mod, np.fmod) for ufunc in twoarg_invariant_ufuncs: UFUNC_HELPERS[ufunc] = helper_twoarg_invariant # two argument ufuncs that need compatible inputs and return a boolean twoarg_comparison_ufuncs = (np.greater, np.greater_equal, np.less, np.less_equal, np.not_equal, np.equal) for ufunc in twoarg_comparison_ufuncs: UFUNC_HELPERS[ufunc] = helper_twoarg_comparison # two argument ufuncs that do inverse trigonometry twoarg_invtrig_ufuncs = (np.arctan2,) # another private function in numpy; use getattr in case it disappears if isinstance(getattr(np.core.umath, '_arg', None), np.ufunc): twoarg_invtrig_ufuncs += (np.core.umath._arg,) for ufunc in twoarg_invtrig_ufuncs: UFUNC_HELPERS[ufunc] = helper_twoarg_invtrig # ufuncs handled as special cases UFUNC_HELPERS[np.multiply] = helper_multiplication if isinstance(getattr(np, 'matmul', None), np.ufunc): UFUNC_HELPERS[np.matmul] = helper_multiplication UFUNC_HELPERS[np.divide] = helper_division UFUNC_HELPERS[np.true_divide] = helper_division UFUNC_HELPERS[np.power] = helper_power UFUNC_HELPERS[np.ldexp] = helper_ldexp UFUNC_HELPERS[np.copysign] = helper_copysign UFUNC_HELPERS[np.floor_divide] = helper_twoarg_floor_divide UFUNC_HELPERS[np.heaviside] = helper_heaviside UFUNC_HELPERS[np.float_power] = helper_power UFUNC_HELPERS[np.divmod] = helper_divmod
e614e891d5169b4e5b13399b520b575c25aa225470975978e531e5ba2c461bf7
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """Converters for Quantity.""" import numpy as np from astropy.units.core import (UnitsError, UnitConversionError, UnitTypeError, dimensionless_unscaled) __all__ = ['can_have_arbitrary_unit', 'converters_and_unit', 'check_output', 'UFUNC_HELPERS', 'UNSUPPORTED_UFUNCS'] class UfuncHelpers(dict): """Registry of unit conversion functions to help ufunc evaluation. Based on dict for quick access, but with a missing method to load helpers for additional modules such as scipy.special and erfa. Such modules should be registered using ``register_module``. """ UNSUPPORTED = set() def register_module(self, module, names, importer): """Register (but do not import) a set of ufunc helpers. Parameters ---------- module : str Name of the module with the ufuncs (e.g., 'scipy.special'). names : iterable of str Names of the module ufuncs for which helpers are available. importer : callable Function that imports the ufuncs and returns a dict of helpers keyed by those ufuncs. If the value is `None`, the ufunc is explicitly *not* supported. """ self.modules[module] = {'names': names, 'importer': importer} @property def modules(self): """Modules for which helpers are available (but not yet loaded).""" if not hasattr(self, '_modules'): self._modules = {} return self._modules def import_module(self, module): """Import the helpers from the given module using its helper function. Parameters ---------- module : str Name of the module. Has to have been registered beforehand. """ module_info = self.modules.pop(module) self.update(module_info['importer']()) def __missing__(self, ufunc): """Called if a ufunc is not found. Check if the ufunc is in any of the available modules, and, if so, import the helpers for that module. """ if ufunc in self.UNSUPPORTED: raise TypeError("Cannot use ufunc '{0}' with quantities" .format(ufunc.__name__)) for module, module_info in list(self.modules.items()): if ufunc.__name__ in module_info['names']: # A ufunc with the same name is supported by this module. # Of course, this doesn't necessarily mean it is the # right module. So, we try let the importer do its work. # If it fails (e.g., for `scipy.special`), then that's # fine, just raise the TypeError. If it succeeds, but # the ufunc is not found, that is also fine: we will # enter __missing__ again and either find another # module or get the TypeError there. try: self.import_module(module) except ImportError: pass else: return self[ufunc] raise TypeError("unknown ufunc {0}. If you believe this ufunc " "should be supported, please raise an issue on " "https://github.com/astropy/astropy" .format(ufunc.__name__)) def __setitem__(self, key, value): # Implementation note: in principle, we could just let `None` # mean that something is not implemented, but this means an # extra if clause for the output, slowing down the common # path where a ufunc is supported. if value is None: self.UNSUPPORTED |= {key} self.pop(key, None) else: super().__setitem__(key, value) self.UNSUPPORTED -= {key} UFUNC_HELPERS = UfuncHelpers() UNSUPPORTED_UFUNCS = UFUNC_HELPERS.UNSUPPORTED def can_have_arbitrary_unit(value): """Test whether the items in value can have arbitrary units Numbers whose value does not change upon a unit change, i.e., zero, infinity, or not-a-number Parameters ---------- value : number or array Returns ------- `True` if each member is either zero or not finite, `False` otherwise """ return np.all(np.logical_or(np.equal(value, 0.), ~np.isfinite(value))) def converters_and_unit(function, method, *args): """Determine the required converters and the unit of the ufunc result. Converters are functions required to convert to a ufunc's expected unit, e.g., radian for np.sin; or to ensure units of two inputs are consistent, e.g., for np.add. In these examples, the unit of the result would be dimensionless_unscaled for np.sin, and the same consistent unit for np.add. Parameters ---------- function : `~numpy.ufunc` Numpy universal function method : str Method with which the function is evaluated, e.g., '__call__', 'reduce', etc. *args : Quantity or other ndarray subclass Input arguments to the function Raises ------ TypeError : when the specified function cannot be used with Quantities (e.g., np.logical_or), or when the routine does not know how to handle the specified function (in which case an issue should be raised on https://github.com/astropy/astropy). UnitTypeError : when the conversion to the required (or consistent) units is not possible. """ # Check whether we support this ufunc, by getting the helper function # (defined in helpers) which returns a list of function(s) that convert the # input(s) to the unit required for the ufunc, as well as the unit the # result will have (a tuple of units if there are multiple outputs). ufunc_helper = UFUNC_HELPERS[function] if method == '__call__' or (method == 'outer' and function.nin == 2): # Find out the units of the arguments passed to the ufunc; usually, # at least one is a quantity, but for two-argument ufuncs, the second # could also be a Numpy array, etc. These are given unit=None. units = [getattr(arg, 'unit', None) for arg in args] # Determine possible conversion functions, and the result unit. converters, result_unit = ufunc_helper(function, *units) if any(converter is False for converter in converters): # for two-argument ufuncs with a quantity and a non-quantity, # the quantity normally needs to be dimensionless, *except* # if the non-quantity can have arbitrary unit, i.e., when it # is all zero, infinity or NaN. In that case, the non-quantity # can just have the unit of the quantity # (this allows, e.g., `q > 0.` independent of unit) maybe_arbitrary_arg = args[converters.index(False)] try: if can_have_arbitrary_unit(maybe_arbitrary_arg): converters = [None, None] else: raise UnitConversionError( "Can only apply '{0}' function to " "dimensionless quantities when other " "argument is not a quantity (unless the " "latter is all zero/infinity/nan)" .format(function.__name__)) except TypeError: # _can_have_arbitrary_unit failed: arg could not be compared # with zero or checked to be finite. Then, ufunc will fail too. raise TypeError("Unsupported operand type(s) for ufunc {0}: " "'{1}' and '{2}'" .format(function.__name__, args[0].__class__.__name__, args[1].__class__.__name__)) # In the case of np.power and np.float_power, the unit itself needs to # be modified by an amount that depends on one of the input values, # so we need to treat this as a special case. # TODO: find a better way to deal with this. if result_unit is False: if units[0] is None or units[0] == dimensionless_unscaled: result_unit = dimensionless_unscaled else: if units[1] is None: p = args[1] else: p = args[1].to(dimensionless_unscaled).value try: result_unit = units[0] ** p except ValueError as exc: # Changing the unit does not work for, e.g., array-shaped # power, but this is OK if we're (scaled) dimensionless. try: converters[0] = units[0]._get_converter( dimensionless_unscaled) except UnitConversionError: raise exc else: result_unit = dimensionless_unscaled else: # methods for which the unit should stay the same nin = function.nin unit = getattr(args[0], 'unit', None) if method == 'at' and nin <= 2: if nin == 1: units = [unit] else: units = [unit, getattr(args[2], 'unit', None)] converters, result_unit = ufunc_helper(function, *units) # ensure there is no 'converter' for indices (2nd argument) converters.insert(1, None) elif method in {'reduce', 'accumulate', 'reduceat'} and nin == 2: converters, result_unit = ufunc_helper(function, unit, unit) converters = converters[:1] if method == 'reduceat': # add 'scale' for indices (2nd argument) converters += [None] else: if method in {'reduce', 'accumulate', 'reduceat', 'outer'} and nin != 2: raise ValueError("{0} only supported for binary functions" .format(method)) raise TypeError("Unexpected ufunc method {0}. If this should " "work, please raise an issue on" "https://github.com/astropy/astropy" .format(method)) # for all but __call__ method, scaling is not allowed if unit is not None and result_unit is None: raise TypeError("Cannot use '{1}' method on ufunc {0} with a " "Quantity instance as the result is not a " "Quantity.".format(function.__name__, method)) if (converters[0] is not None or (unit is not None and unit is not result_unit and (not result_unit.is_equivalent(unit) or result_unit.to(unit) != 1.))): # NOTE: this cannot be the more logical UnitTypeError, since # then things like np.cumprod will not longer fail (they check # for TypeError). raise UnitsError("Cannot use '{1}' method on ufunc {0} with a " "Quantity instance as it would change the unit." .format(function.__name__, method)) return converters, result_unit def check_output(output, unit, inputs, function=None): """Check that function output can be stored in the output array given. Parameters ---------- output : array or `~astropy.units.Quantity` or tuple Array that should hold the function output (or tuple of such arrays). unit : `~astropy.units.Unit` or None, or tuple Unit that the output will have, or `None` for pure numbers (should be tuple of same if output is a tuple of outputs). inputs : tuple Any input arguments. These should be castable to the output. function : callable The function that will be producing the output. If given, used to give a more informative error message. Returns ------- arrays : `~numpy.ndarray` view of ``output`` (or tuple of such views). Raises ------ UnitTypeError : If ``unit`` is inconsistent with the class of ``output`` TypeError : If the ``inputs`` cannot be cast safely to ``output``. """ if isinstance(output, tuple): return tuple(check_output(output_, unit_, inputs, function) for output_, unit_ in zip(output, unit)) # ``None`` indicates no actual array is needed. This can happen, e.g., # with np.modf(a, out=(None, b)). if output is None: return None if hasattr(output, '__quantity_subclass__'): # Check that we're not trying to store a plain Numpy array or a # Quantity with an inconsistent unit (e.g., not angular for Angle). if unit is None: raise TypeError("Cannot store non-quantity output{0} in {1} " "instance".format( (" from {0} function".format(function.__name__) if function is not None else ""), type(output))) if output.__quantity_subclass__(unit)[0] is not type(output): raise UnitTypeError( "Cannot store output with unit '{0}'{1} " "in {2} instance. Use {3} instance instead." .format(unit, (" from {0} function".format(function.__name__) if function is not None else ""), type(output), output.__quantity_subclass__(unit)[0])) # Turn into ndarray, so we do not loop into array_wrap/array_ufunc # if the output is used to store results of a function. output = output.view(np.ndarray) else: # output is not a Quantity, so cannot obtain a unit. if not (unit is None or unit is dimensionless_unscaled): raise UnitTypeError("Cannot store quantity with dimension " "{0}in a non-Quantity instance." .format("" if function is None else "resulting from {0} function " .format(function.__name__))) # check we can handle the dtype (e.g., that we are not int # when float is required). if not np.can_cast(np.result_type(*inputs), output.dtype, casting='same_kind'): raise TypeError("Arguments cannot be cast safely to inplace " "output with dtype={0}".format(output.dtype)) return output
a0a4850efbfdfecac31d9c2d49fb372eb5863c0d2617e3d56bb02460dd8b81e2
# Licensed under a 3-clause BSD style license - see LICENSE.rst """Helper functions for Quantity. In particular, this implements the logic that determines scaling and result units for a given ufunc, given input units. """ from .converters import * # By importing helpers, all the unit conversion functions needed for # numpy ufuncs are defined. from . import helpers # For scipy.special and erfa, importing the helper modules ensures # the definitions are added as modules to UFUNC_HELPERS, to be loaded # on demand. from . import scipy_special, erfa
8b9401e22dc418f0ec2c65fec5ea3ee16fc4e49f0385b3ba7b99c4bc725006c5
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """Quantity helpers for the ERFA ufuncs.""" from astropy.units.core import UnitsError, UnitTypeError, dimensionless_unscaled from . import UFUNC_HELPERS from .helpers import get_converter, helper_invariant, helper_multiplication erfa_ufuncs = ('s2c', 's2p', 'c2s', 'p2s', 'pm', 'pdp', 'pxp', 'rxp') def helper_s2c(f, unit1, unit2): from astropy.units.si import radian try: return [get_converter(unit1, radian), get_converter(unit2, radian)], dimensionless_unscaled except UnitsError: raise UnitTypeError("Can only apply '{0}' function to " "quantities with angle units" .format(f.__name__)) def helper_s2p(f, unit1, unit2, unit3): from astropy.units.si import radian try: return [get_converter(unit1, radian), get_converter(unit2, radian), None], unit3 except UnitsError: raise UnitTypeError("Can only apply '{0}' function to " "quantities with angle units" .format(f.__name__)) def helper_c2s(f, unit1): from astropy.units.si import radian return [None], (radian, radian) def helper_p2s(f, unit1): from astropy.units.si import radian return [None], (radian, radian, unit1) def get_erfa_helpers(): from astropy._erfa import ufunc as erfa_ufunc ERFA_HELPERS = {} ERFA_HELPERS[erfa_ufunc.s2c] = helper_s2c ERFA_HELPERS[erfa_ufunc.s2p] = helper_s2p ERFA_HELPERS[erfa_ufunc.c2s] = helper_c2s ERFA_HELPERS[erfa_ufunc.p2s] = helper_p2s ERFA_HELPERS[erfa_ufunc.pm] = helper_invariant ERFA_HELPERS[erfa_ufunc.pdp] = helper_multiplication ERFA_HELPERS[erfa_ufunc.pxp] = helper_multiplication ERFA_HELPERS[erfa_ufunc.rxp] = helper_multiplication return ERFA_HELPERS UFUNC_HELPERS.register_module('astropy._erfa.ufunc', erfa_ufuncs, get_erfa_helpers)
5210581995ebb4c0cbf166f86115ed744fa73cefdd06523814ab66b4903574d9
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """Quantity helpers for the scipy.special ufuncs. Available ufuncs in this module are at https://docs.scipy.org/doc/scipy/reference/special.html """ from astropy.units.core import UnitsError, UnitTypeError, dimensionless_unscaled from . import UFUNC_HELPERS from .helpers import (get_converter, helper_dimensionless_to_dimensionless, helper_cbrt, helper_two_arg_dimensionless) # ufuncs that require dimensionless input and give dimensionless output. dimensionless_to_dimensionless_sps_ufuncs = ( 'erf', 'gamma', 'gammasgn', 'psi', 'rgamma', 'erfc', 'erfcx', 'erfi', 'wofz', 'dawsn', 'entr', 'exprel', 'expm1', 'log1p', 'exp2', 'exp10', 'j0', 'j1', 'y0', 'y1', 'i0', 'i0e', 'i1', 'i1e', 'k0', 'k0e', 'k1', 'k1e', 'itj0y0', 'it2j0y0', 'iti0k0', 'it2i0k0', 'loggamma') scipy_special_ufuncs = dimensionless_to_dimensionless_sps_ufuncs # ufuncs that require input in degrees and give dimensionless output. degree_to_dimensionless_sps_ufuncs = ('cosdg', 'sindg', 'tandg', 'cotdg') scipy_special_ufuncs += degree_to_dimensionless_sps_ufuncs # ufuncs that require 2 dimensionless inputs and give dimensionless output. # note: 'jv' and 'jn' are aliases in some scipy versions, which will # cause the same key to be written twice, but since both are handled by the # same helper there is no harm done. two_arg_dimensionless_sps_ufuncs = ( 'jv', 'jn', 'jve', 'yn', 'yv', 'yve', 'kn', 'kv', 'kve', 'iv', 'ive', 'hankel1', 'hankel1e', 'hankel2', 'hankel2e') scipy_special_ufuncs += two_arg_dimensionless_sps_ufuncs # ufuncs handled as special cases scipy_special_ufuncs += ('cbrt', 'radian') def helper_degree_to_dimensionless(f, unit): from astropy.units.si import degree try: return [get_converter(unit, degree)], dimensionless_unscaled except UnitsError: raise UnitTypeError("Can only apply '{0}' function to " "quantities with angle units" .format(f.__name__)) def helper_degree_minute_second_to_radian(f, unit1, unit2, unit3): from astropy.units.si import degree, arcmin, arcsec, radian try: return [get_converter(unit1, degree), get_converter(unit2, arcmin), get_converter(unit3, arcsec)], radian except UnitsError: raise UnitTypeError("Can only apply '{0}' function to " "quantities with angle units" .format(f.__name__)) def get_scipy_special_helpers(): import scipy.special as sps SCIPY_HELPERS = {} for name in dimensionless_to_dimensionless_sps_ufuncs: # TODO: Revert https://github.com/astropy/astropy/pull/7219 when # astropy requires scipy>=0.18, and loggamma is guaranteed # to exist. # See https://github.com/astropy/astropy/issues/7159 ufunc = getattr(sps, name, None) if ufunc: SCIPY_HELPERS[ufunc] = helper_dimensionless_to_dimensionless for ufunc in degree_to_dimensionless_sps_ufuncs: SCIPY_HELPERS[getattr(sps, ufunc)] = helper_degree_to_dimensionless for ufunc in two_arg_dimensionless_sps_ufuncs: SCIPY_HELPERS[getattr(sps, ufunc)] = helper_two_arg_dimensionless # ufuncs handled as special cases SCIPY_HELPERS[sps.cbrt] = helper_cbrt SCIPY_HELPERS[sps.radian] = helper_degree_minute_second_to_radian return SCIPY_HELPERS UFUNC_HELPERS.register_module('scipy.special', scipy_special_ufuncs, get_scipy_special_helpers)
a3c54f2d0a97188a826cc8afab5e04e323d9d2378764caaac074a3aa77099560
# coding: utf-8 # Licensed under a 3-clause BSD style license - see LICENSE.rst """Separate tests specifically for equivalencies.""" # THIRD-PARTY import warnings import pytest import numpy as np from numpy.testing import assert_allclose # LOCAL from astropy import units as u from astropy.units.equivalencies import Equivalency from astropy import constants, cosmology from astropy.tests.helper import assert_quantity_allclose def test_dimensionless_angles(): # test that the angles_dimensionless option allows one to change # by any order in radian in the unit (#1161) rad1 = u.dimensionless_angles() assert u.radian.to(1, equivalencies=rad1) == 1. assert u.deg.to(1, equivalencies=rad1) == u.deg.to(u.rad) assert u.steradian.to(1, equivalencies=rad1) == 1. assert u.dimensionless_unscaled.to(u.steradian, equivalencies=rad1) == 1. # now quantities assert (1.*u.radian).to_value(1, equivalencies=rad1) == 1. assert (1.*u.deg).to_value(1, equivalencies=rad1) == u.deg.to(u.rad) assert (1.*u.steradian).to_value(1, equivalencies=rad1) == 1. # more complicated example I = 1.e45 * u.g * u.cm**2 Omega = u.cycle / (1.*u.s) Erot = 0.5 * I * Omega**2 # check that equivalency makes this work Erot_in_erg1 = Erot.to(u.erg, equivalencies=rad1) # and check that value is correct assert_allclose(Erot_in_erg1.value, (Erot/u.radian**2).to_value(u.erg)) # test build-in equivalency in subclass class MyRad1(u.Quantity): _equivalencies = rad1 phase = MyRad1(1., u.cycle) assert phase.to_value(1) == u.cycle.to(u.radian) @pytest.mark.parametrize('log_unit', (u.mag, u.dex, u.dB)) def test_logarithmic(log_unit): # check conversion of mag, dB, and dex to dimensionless and vice versa with pytest.raises(u.UnitsError): log_unit.to(1, 0.) with pytest.raises(u.UnitsError): u.dimensionless_unscaled.to(log_unit) assert log_unit.to(1, 0., equivalencies=u.logarithmic()) == 1. assert u.dimensionless_unscaled.to(log_unit, equivalencies=u.logarithmic()) == 0. # also try with quantities q_dex = np.array([0., -1., 1., 2.]) * u.dex q_expected = 10.**q_dex.value * u.dimensionless_unscaled q_log_unit = q_dex.to(log_unit) assert np.all(q_log_unit.to(1, equivalencies=u.logarithmic()) == q_expected) assert np.all(q_expected.to(log_unit, equivalencies=u.logarithmic()) == q_log_unit) with u.set_enabled_equivalencies(u.logarithmic()): assert np.all(np.abs(q_log_unit - q_expected.to(log_unit)) < 1.e-10*log_unit) doppler_functions = [u.doppler_optical, u.doppler_radio, u.doppler_relativistic] @pytest.mark.parametrize(('function'), doppler_functions) def test_doppler_frequency_0(function): rest = 105.01 * u.GHz velo0 = rest.to(u.km/u.s, equivalencies=function(rest)) assert velo0.value == 0 @pytest.mark.parametrize(('function'), doppler_functions) def test_doppler_wavelength_0(function): rest = 105.01 * u.GHz q1 = 0.00285489437196 * u.m velo0 = q1.to(u.km/u.s, equivalencies=function(rest)) np.testing.assert_almost_equal(velo0.value, 0, decimal=6) @pytest.mark.parametrize(('function'), doppler_functions) def test_doppler_energy_0(function): rest = 105.01 * u.GHz q1 = 0.0004342864612223407 * u.eV velo0 = q1.to(u.km/u.s, equivalencies=function(rest)) np.testing.assert_almost_equal(velo0.value, 0, decimal=6) @pytest.mark.parametrize(('function'), doppler_functions) def test_doppler_frequency_circle(function): rest = 105.01 * u.GHz shifted = 105.03 * u.GHz velo = shifted.to(u.km/u.s, equivalencies=function(rest)) freq = velo.to(u.GHz, equivalencies=function(rest)) np.testing.assert_almost_equal(freq.value, shifted.value, decimal=7) @pytest.mark.parametrize(('function'), doppler_functions) def test_doppler_wavelength_circle(function): rest = 105.01 * u.nm shifted = 105.03 * u.nm velo = shifted.to(u.km / u.s, equivalencies=function(rest)) wav = velo.to(u.nm, equivalencies=function(rest)) np.testing.assert_almost_equal(wav.value, shifted.value, decimal=7) @pytest.mark.parametrize(('function'), doppler_functions) def test_doppler_energy_circle(function): rest = 1.0501 * u.eV shifted = 1.0503 * u.eV velo = shifted.to(u.km / u.s, equivalencies=function(rest)) en = velo.to(u.eV, equivalencies=function(rest)) np.testing.assert_almost_equal(en.value, shifted.value, decimal=7) values_ghz = (999.899940784289, 999.8999307714406, 999.8999357778647) @pytest.mark.parametrize(('function', 'value'), list(zip(doppler_functions, values_ghz))) def test_30kms(function, value): rest = 1000 * u.GHz velo = 30 * u.km/u.s shifted = velo.to(u.GHz, equivalencies=function(rest)) np.testing.assert_almost_equal(shifted.value, value, decimal=7) bad_values = (5, 5*u.Jy, None) @pytest.mark.parametrize(('function', 'value'), list(zip(doppler_functions, bad_values))) def test_bad_restfreqs(function, value): with pytest.raises(u.UnitsError): function(value) def test_massenergy(): # The relative tolerance of these tests is set by the uncertainties # in the charge of the electron, which is known to about # 3e-9 (relative tolerance). Therefore, we limit the # precision of the tests to 1e-7 to be safe. The masses are # (loosely) known to ~ 5e-8 rel tolerance, so we couldn't test to # 1e-7 if we used the values from astropy.constants; that is, # they might change by more than 1e-7 in some future update, so instead # they are hardwired here. # Electron, proton, neutron, muon, 1g mass_eV = u.Quantity([510.998928e3, 938.272046e6, 939.565378e6, 105.6583715e6, 5.60958884539e32], u.eV) mass_g = u.Quantity([9.10938291e-28, 1.672621777e-24, 1.674927351e-24, 1.88353147e-25, 1], u.g) # Test both ways assert np.allclose(mass_eV.to_value(u.g, equivalencies=u.mass_energy()), mass_g.value, rtol=1e-7) assert np.allclose(mass_g.to_value(u.eV, equivalencies=u.mass_energy()), mass_eV.value, rtol=1e-7) # Basic tests of 'derived' equivalencies # Surface density sdens_eV = u.Quantity(5.60958884539e32, u.eV / u.m**2) sdens_g = u.Quantity(1e-4, u.g / u.cm**2) assert np.allclose(sdens_eV.to_value(u.g / u.cm**2, equivalencies=u.mass_energy()), sdens_g.value, rtol=1e-7) assert np.allclose(sdens_g.to_value(u.eV / u.m**2, equivalencies=u.mass_energy()), sdens_eV.value, rtol=1e-7) # Density dens_eV = u.Quantity(5.60958884539e32, u.eV / u.m**3) dens_g = u.Quantity(1e-6, u.g / u.cm**3) assert np.allclose(dens_eV.to_value(u.g / u.cm**3, equivalencies=u.mass_energy()), dens_g.value, rtol=1e-7) assert np.allclose(dens_g.to_value(u.eV / u.m**3, equivalencies=u.mass_energy()), dens_eV.value, rtol=1e-7) # Power pow_eV = u.Quantity(5.60958884539e32, u.eV / u.s) pow_g = u.Quantity(1, u.g / u.s) assert np.allclose(pow_eV.to_value(u.g / u.s, equivalencies=u.mass_energy()), pow_g.value, rtol=1e-7) assert np.allclose(pow_g.to_value(u.eV / u.s, equivalencies=u.mass_energy()), pow_eV.value, rtol=1e-7) def test_is_equivalent(): assert u.m.is_equivalent(u.pc) assert u.cycle.is_equivalent(u.mas) assert not u.cycle.is_equivalent(u.dimensionless_unscaled) assert u.cycle.is_equivalent(u.dimensionless_unscaled, u.dimensionless_angles()) assert not (u.Hz.is_equivalent(u.J)) assert u.Hz.is_equivalent(u.J, u.spectral()) assert u.J.is_equivalent(u.Hz, u.spectral()) assert u.pc.is_equivalent(u.arcsecond, u.parallax()) assert u.arcminute.is_equivalent(u.au, u.parallax()) # Pass a tuple for multiple possibilities assert u.cm.is_equivalent((u.m, u.s, u.kg)) assert u.ms.is_equivalent((u.m, u.s, u.kg)) assert u.g.is_equivalent((u.m, u.s, u.kg)) assert not u.L.is_equivalent((u.m, u.s, u.kg)) assert not (u.km / u.s).is_equivalent((u.m, u.s, u.kg)) def test_parallax(): a = u.arcsecond.to(u.pc, 10, u.parallax()) assert_allclose(a, 0.10) b = u.pc.to(u.arcsecond, a, u.parallax()) assert_allclose(b, 10) a = u.arcminute.to(u.au, 1, u.parallax()) assert_allclose(a, 3437.7467916) b = u.au.to(u.arcminute, a, u.parallax()) assert_allclose(b, 1) val = (-1 * u.mas).to(u.pc, u.parallax()) assert np.isnan(val.value) val = (-1 * u.mas).to_value(u.pc, u.parallax()) assert np.isnan(val) def test_parallax2(): a = u.arcsecond.to(u.pc, [0.1, 2.5], u.parallax()) assert_allclose(a, [10, 0.4]) def test_spectral(): a = u.AA.to(u.Hz, 1, u.spectral()) assert_allclose(a, 2.9979245799999995e+18) b = u.Hz.to(u.AA, a, u.spectral()) assert_allclose(b, 1) a = u.AA.to(u.MHz, 1, u.spectral()) assert_allclose(a, 2.9979245799999995e+12) b = u.MHz.to(u.AA, a, u.spectral()) assert_allclose(b, 1) a = u.m.to(u.Hz, 1, u.spectral()) assert_allclose(a, 2.9979245799999995e+8) b = u.Hz.to(u.m, a, u.spectral()) assert_allclose(b, 1) def test_spectral2(): a = u.nm.to(u.J, 500, u.spectral()) assert_allclose(a, 3.972891366538605e-19) b = u.J.to(u.nm, a, u.spectral()) assert_allclose(b, 500) a = u.AA.to(u.Hz, 1, u.spectral()) b = u.Hz.to(u.J, a, u.spectral()) c = u.AA.to(u.J, 1, u.spectral()) assert_allclose(b, c) c = u.J.to(u.Hz, b, u.spectral()) assert_allclose(a, c) def test_spectral3(): a = u.nm.to(u.Hz, [1000, 2000], u.spectral()) assert_allclose(a, [2.99792458e+14, 1.49896229e+14]) @pytest.mark.parametrize( ('in_val', 'in_unit'), [([0.1, 5000.0, 10000.0], u.AA), ([1e+5, 2.0, 1.0], u.micron ** -1), ([2.99792458e+19, 5.99584916e+14, 2.99792458e+14], u.Hz), ([1.98644568e-14, 3.97289137e-19, 1.98644568e-19], u.J)]) def test_spectral4(in_val, in_unit): """Wave number conversion w.r.t. wavelength, freq, and energy.""" # Spectroscopic and angular out_units = [u.micron ** -1, u.radian / u.micron] answers = [[1e+5, 2.0, 1.0], [6.28318531e+05, 12.5663706, 6.28318531]] for out_unit, ans in zip(out_units, answers): # Forward a = in_unit.to(out_unit, in_val, u.spectral()) assert_allclose(a, ans) # Backward b = out_unit.to(in_unit, ans, u.spectral()) assert_allclose(b, in_val) def test_spectraldensity2(): # flux density flambda = u.erg / u.angstrom / u.cm ** 2 / u.s fnu = u.erg / u.Hz / u.cm ** 2 / u.s a = flambda.to(fnu, 1, u.spectral_density(u.Quantity(3500, u.AA))) assert_allclose(a, 4.086160166177361e-12) # luminosity density llambda = u.erg / u.angstrom / u.s lnu = u.erg / u.Hz / u.s a = llambda.to(lnu, 1, u.spectral_density(u.Quantity(3500, u.AA))) assert_allclose(a, 4.086160166177361e-12) a = lnu.to(llambda, 1, u.spectral_density(u.Quantity(3500, u.AA))) assert_allclose(a, 2.44728537142857e11) def test_spectraldensity3(): # Define F_nu in Jy f_nu = u.Jy # Define F_lambda in ergs / cm^2 / s / micron f_lambda = u.erg / u.cm ** 2 / u.s / u.micron # 1 GHz one_ghz = u.Quantity(1, u.GHz) # Convert to ergs / cm^2 / s / Hz assert_allclose(f_nu.to(u.erg / u.cm ** 2 / u.s / u.Hz, 1.), 1.e-23, 10) # Convert to ergs / cm^2 / s at 10 Ghz assert_allclose(f_nu.to(u.erg / u.cm ** 2 / u.s, 1., equivalencies=u.spectral_density(one_ghz * 10)), 1.e-13, 10) # Convert to F_lambda at 1 Ghz assert_allclose(f_nu.to(f_lambda, 1., equivalencies=u.spectral_density(one_ghz)), 3.335640951981521e-20, 10) # Convert to Jy at 1 Ghz assert_allclose(f_lambda.to(u.Jy, 1., equivalencies=u.spectral_density(one_ghz)), 1. / 3.335640951981521e-20, 10) # Convert to ergs / cm^2 / s at 10 microns assert_allclose(f_lambda.to(u.erg / u.cm ** 2 / u.s, 1., equivalencies=u.spectral_density(u.Quantity(10, u.micron))), 10., 10) def test_spectraldensity4(): """PHOTLAM and PHOTNU conversions.""" flam = u.erg / (u.cm ** 2 * u.s * u.AA) fnu = u.erg / (u.cm ** 2 * u.s * u.Hz) photlam = u.photon / (u.cm ** 2 * u.s * u.AA) photnu = u.photon / (u.cm ** 2 * u.s * u.Hz) wave = u.Quantity([4956.8, 4959.55, 4962.3], u.AA) flux_photlam = [9.7654e-3, 1.003896e-2, 9.78473e-3] flux_photnu = [8.00335589e-14, 8.23668949e-14, 8.03700310e-14] flux_flam = [3.9135e-14, 4.0209e-14, 3.9169e-14] flux_fnu = [3.20735792e-25, 3.29903646e-25, 3.21727226e-25] flux_jy = [3.20735792e-2, 3.29903646e-2, 3.21727226e-2] flux_stmag = [12.41858665, 12.38919182, 12.41764379] flux_abmag = [12.63463143, 12.60403221, 12.63128047] # PHOTLAM <--> FLAM assert_allclose(photlam.to( flam, flux_photlam, u.spectral_density(wave)), flux_flam, rtol=1e-6) assert_allclose(flam.to( photlam, flux_flam, u.spectral_density(wave)), flux_photlam, rtol=1e-6) # PHOTLAM <--> FNU assert_allclose(photlam.to( fnu, flux_photlam, u.spectral_density(wave)), flux_fnu, rtol=1e-6) assert_allclose(fnu.to( photlam, flux_fnu, u.spectral_density(wave)), flux_photlam, rtol=1e-6) # PHOTLAM <--> Jy assert_allclose(photlam.to( u.Jy, flux_photlam, u.spectral_density(wave)), flux_jy, rtol=1e-6) assert_allclose(u.Jy.to( photlam, flux_jy, u.spectral_density(wave)), flux_photlam, rtol=1e-6) # PHOTLAM <--> PHOTNU assert_allclose(photlam.to( photnu, flux_photlam, u.spectral_density(wave)), flux_photnu, rtol=1e-6) assert_allclose(photnu.to( photlam, flux_photnu, u.spectral_density(wave)), flux_photlam, rtol=1e-6) # PHOTNU <--> FNU assert_allclose(photnu.to( fnu, flux_photnu, u.spectral_density(wave)), flux_fnu, rtol=1e-6) assert_allclose(fnu.to( photnu, flux_fnu, u.spectral_density(wave)), flux_photnu, rtol=1e-6) # PHOTNU <--> FLAM assert_allclose(photnu.to( flam, flux_photnu, u.spectral_density(wave)), flux_flam, rtol=1e-6) assert_allclose(flam.to( photnu, flux_flam, u.spectral_density(wave)), flux_photnu, rtol=1e-6) # PHOTLAM <--> STMAG assert_allclose(photlam.to( u.STmag, flux_photlam, u.spectral_density(wave)), flux_stmag, rtol=1e-6) assert_allclose(u.STmag.to( photlam, flux_stmag, u.spectral_density(wave)), flux_photlam, rtol=1e-6) # PHOTLAM <--> ABMAG assert_allclose(photlam.to( u.ABmag, flux_photlam, u.spectral_density(wave)), flux_abmag, rtol=1e-6) assert_allclose(u.ABmag.to( photlam, flux_abmag, u.spectral_density(wave)), flux_photlam, rtol=1e-6) def test_spectraldensity5(): """ Test photon luminosity density conversions. """ L_la = u.erg / (u.s * u.AA) L_nu = u.erg / (u.s * u.Hz) phot_L_la = u.photon / (u.s * u.AA) phot_L_nu = u.photon / (u.s * u.Hz) wave = u.Quantity([4956.8, 4959.55, 4962.3], u.AA) flux_phot_L_la = [9.7654e-3, 1.003896e-2, 9.78473e-3] flux_phot_L_nu = [8.00335589e-14, 8.23668949e-14, 8.03700310e-14] flux_L_la = [3.9135e-14, 4.0209e-14, 3.9169e-14] flux_L_nu = [3.20735792e-25, 3.29903646e-25, 3.21727226e-25] # PHOTLAM <--> FLAM assert_allclose(phot_L_la.to( L_la, flux_phot_L_la, u.spectral_density(wave)), flux_L_la, rtol=1e-6) assert_allclose(L_la.to( phot_L_la, flux_L_la, u.spectral_density(wave)), flux_phot_L_la, rtol=1e-6) # PHOTLAM <--> FNU assert_allclose(phot_L_la.to( L_nu, flux_phot_L_la, u.spectral_density(wave)), flux_L_nu, rtol=1e-6) assert_allclose(L_nu.to( phot_L_la, flux_L_nu, u.spectral_density(wave)), flux_phot_L_la, rtol=1e-6) # PHOTLAM <--> PHOTNU assert_allclose(phot_L_la.to( phot_L_nu, flux_phot_L_la, u.spectral_density(wave)), flux_phot_L_nu, rtol=1e-6) assert_allclose(phot_L_nu.to( phot_L_la, flux_phot_L_nu, u.spectral_density(wave)), flux_phot_L_la, rtol=1e-6) # PHOTNU <--> FNU assert_allclose(phot_L_nu.to( L_nu, flux_phot_L_nu, u.spectral_density(wave)), flux_L_nu, rtol=1e-6) assert_allclose(L_nu.to( phot_L_nu, flux_L_nu, u.spectral_density(wave)), flux_phot_L_nu, rtol=1e-6) # PHOTNU <--> FLAM assert_allclose(phot_L_nu.to( L_la, flux_phot_L_nu, u.spectral_density(wave)), flux_L_la, rtol=1e-6) assert_allclose(L_la.to( phot_L_nu, flux_L_la, u.spectral_density(wave)), flux_phot_L_nu, rtol=1e-6) def test_equivalent_units(): from astropy.units import imperial with u.add_enabled_units(imperial): units = u.g.find_equivalent_units() units_set = set(units) match = set( [u.M_e, u.M_p, u.g, u.kg, u.solMass, u.t, u.u, u.M_earth, u.M_jup, imperial.oz, imperial.lb, imperial.st, imperial.ton, imperial.slug]) assert units_set == match r = repr(units) assert r.count('\n') == len(units) + 2 def test_equivalent_units2(): units = set(u.Hz.find_equivalent_units(u.spectral())) match = set( [u.AU, u.Angstrom, u.Hz, u.J, u.Ry, u.cm, u.eV, u.erg, u.lyr, u.m, u.micron, u.pc, u.solRad, u.Bq, u.Ci, u.k, u.earthRad, u.jupiterRad]) assert units == match from astropy.units import imperial with u.add_enabled_units(imperial): units = set(u.Hz.find_equivalent_units(u.spectral())) match = set( [u.AU, u.Angstrom, imperial.BTU, u.Hz, u.J, u.Ry, imperial.cal, u.cm, u.eV, u.erg, imperial.ft, imperial.fur, imperial.inch, imperial.kcal, u.lyr, u.m, imperial.mi, imperial.mil, u.micron, u.pc, u.solRad, imperial.yd, u.Bq, u.Ci, imperial.nmi, u.k, u.earthRad, u.jupiterRad]) assert units == match units = set(u.Hz.find_equivalent_units(u.spectral())) match = set( [u.AU, u.Angstrom, u.Hz, u.J, u.Ry, u.cm, u.eV, u.erg, u.lyr, u.m, u.micron, u.pc, u.solRad, u.Bq, u.Ci, u.k, u.earthRad, u.jupiterRad]) assert units == match def test_trivial_equivalency(): assert u.m.to(u.kg, equivalencies=[(u.m, u.kg)]) == 1.0 def test_invalid_equivalency(): with pytest.raises(ValueError): u.m.to(u.kg, equivalencies=[(u.m,)]) with pytest.raises(ValueError): u.m.to(u.kg, equivalencies=[(u.m, 5.0)]) def test_irrelevant_equivalency(): with pytest.raises(u.UnitsError): u.m.to(u.kg, equivalencies=[(u.m, u.l)]) def test_brightness_temperature(): omega_B = np.pi * (50 * u.arcsec) ** 2 nu = u.GHz * 5 tb = 7.052590289134352 * u.K np.testing.assert_almost_equal( tb.value, (1 * u.Jy).to_value( u.K, equivalencies=u.brightness_temperature(nu, beam_area=omega_B))) np.testing.assert_almost_equal( 1.0, tb.to_value( u.Jy, equivalencies=u.brightness_temperature(nu, beam_area=omega_B))) def test_swapped_args_brightness_temperature(): """ #5173 changes the order of arguments but accepts the old (deprecated) args """ omega_B = np.pi * (50 * u.arcsec) ** 2 nu = u.GHz * 5 tb = 7.052590289134352 * u.K # https://docs.pytest.org/en/latest/warnings.html#ensuring-function-triggers with warnings.catch_warnings(): warnings.simplefilter('always') with pytest.warns(DeprecationWarning) as warning_list: result = (1*u.Jy).to(u.K, equivalencies=u.brightness_temperature(omega_B, nu)) roundtrip = result.to(u.Jy, equivalencies=u.brightness_temperature(omega_B, nu)) assert len(warning_list) == 2 np.testing.assert_almost_equal(tb.value, result.value) np.testing.assert_almost_equal(roundtrip.value, 1) def test_surfacebrightness(): sb = 50*u.MJy/u.sr k = sb.to(u.K, u.brightness_temperature(50*u.GHz)) np.testing.assert_almost_equal(k.value, 0.650965, 5) assert k.unit.is_equivalent(u.K) def test_beam(): # pick a beam area: 2 pi r^2 = area of a Gaussina with sigma=50 arcsec omega_B = 2 * np.pi * (50 * u.arcsec) ** 2 new_beam = (5*u.beam).to(u.sr, u.equivalencies.beam_angular_area(omega_B)) np.testing.assert_almost_equal(omega_B.to(u.sr).value * 5, new_beam.value) assert new_beam.unit.is_equivalent(u.sr) # make sure that it's still consistent with 5 beams nbeams = new_beam.to(u.beam, u.equivalencies.beam_angular_area(omega_B)) np.testing.assert_almost_equal(nbeams.value, 5) # test inverse beam equivalency # (this is just a sanity check that the equivalency is defined; # it's not for testing numerical consistency) new_inverse_beam = (5/u.beam).to(1/u.sr, u.equivalencies.beam_angular_area(omega_B)) # test practical case # (this is by far the most important one) flux_density = (5*u.Jy/u.beam).to(u.MJy/u.sr, u.equivalencies.beam_angular_area(omega_B)) np.testing.assert_almost_equal(flux_density.value, 13.5425483146382) def test_thermodynamic_temperature(): nu = 143 * u.GHz tb = 0.0026320518775281975 * u.K np.testing.assert_almost_equal( tb.value, (1 * u.MJy/u.sr).to_value( u.K, equivalencies=u.thermodynamic_temperature(nu, T_cmb=2.7255 * u.K))) np.testing.assert_almost_equal( 1.0, tb.to_value( u.MJy / u.sr, equivalencies=u.thermodynamic_temperature(nu, T_cmb=2.7255 * u.K))) def test_equivalency_context(): with u.set_enabled_equivalencies(u.dimensionless_angles()): phase = u.Quantity(1., u.cycle) assert_allclose(np.exp(1j*phase), 1.) Omega = u.cycle / (1.*u.minute) assert_allclose(np.exp(1j*Omega*60.*u.second), 1.) # ensure we can turn off equivalencies even within the scope with pytest.raises(u.UnitsError): phase.to(1, equivalencies=None) # test the manager also works in the Quantity constructor. q1 = u.Quantity(phase, u.dimensionless_unscaled) assert_allclose(q1.value, u.cycle.to(u.radian)) # and also if we use a class that happens to have a unit attribute. class MyQuantityLookalike(np.ndarray): pass mylookalike = np.array(1.).view(MyQuantityLookalike) mylookalike.unit = 'cycle' # test the manager also works in the Quantity constructor. q2 = u.Quantity(mylookalike, u.dimensionless_unscaled) assert_allclose(q2.value, u.cycle.to(u.radian)) with u.set_enabled_equivalencies(u.spectral()): u.GHz.to(u.cm) eq_on = u.GHz.find_equivalent_units() with pytest.raises(u.UnitsError): u.GHz.to(u.cm, equivalencies=None) # without equivalencies, we should find a smaller (sub)set eq_off = u.GHz.find_equivalent_units() assert all(eq in set(eq_on) for eq in eq_off) assert set(eq_off) < set(eq_on) # Check the equivalency manager also works in ufunc evaluations, # not just using (wrong) scaling. [#2496] l2v = u.doppler_optical(6000 * u.angstrom) l1 = 6010 * u.angstrom assert l1.to(u.km/u.s, equivalencies=l2v) > 100. * u.km / u.s with u.set_enabled_equivalencies(l2v): assert l1 > 100. * u.km / u.s assert abs((l1 - 500. * u.km / u.s).to(u.angstrom)) < 1. * u.km/u.s def test_equivalency_context_manager(): base_registry = u.get_current_unit_registry() def just_to_from_units(equivalencies): return [(equiv[0], equiv[1]) for equiv in equivalencies] tf_dimensionless_angles = just_to_from_units(u.dimensionless_angles()) tf_spectral = just_to_from_units(u.spectral()) assert base_registry.equivalencies == [] with u.set_enabled_equivalencies(u.dimensionless_angles()): new_registry = u.get_current_unit_registry() assert (set(just_to_from_units(new_registry.equivalencies)) == set(tf_dimensionless_angles)) assert set(new_registry.all_units) == set(base_registry.all_units) with u.set_enabled_equivalencies(u.spectral()): newer_registry = u.get_current_unit_registry() assert (set(just_to_from_units(newer_registry.equivalencies)) == set(tf_spectral)) assert (set(newer_registry.all_units) == set(base_registry.all_units)) assert (set(just_to_from_units(new_registry.equivalencies)) == set(tf_dimensionless_angles)) assert set(new_registry.all_units) == set(base_registry.all_units) with u.add_enabled_equivalencies(u.spectral()): newer_registry = u.get_current_unit_registry() assert (set(just_to_from_units(newer_registry.equivalencies)) == set(tf_dimensionless_angles) | set(tf_spectral)) assert (set(newer_registry.all_units) == set(base_registry.all_units)) assert base_registry is u.get_current_unit_registry() def test_temperature(): from astropy.units.imperial import deg_F t_k = 0 * u.K assert_allclose(t_k.to_value(u.deg_C, u.temperature()), -273.15) assert_allclose(t_k.to_value(deg_F, u.temperature()), -459.67) def test_temperature_energy(): x = 1000 * u.K y = (x * constants.k_B).to(u.keV) assert_allclose(x.to_value(u.keV, u.temperature_energy()), y.value) assert_allclose(y.to_value(u.K, u.temperature_energy()), x.value) def test_molar_mass_amu(): x = 1 * (u.g/u.mol) y = 1 * u.u assert_allclose(x.to_value(u.u, u.molar_mass_amu()), y.value) assert_allclose(y.to_value(u.g/u.mol, u.molar_mass_amu()), x.value) with pytest.raises(u.UnitsError): x.to(u.u) def test_compose_equivalencies(): x = u.Unit("arcsec").compose(units=(u.pc,), equivalencies=u.parallax()) assert x[0] == u.pc x = u.Unit("2 arcsec").compose(units=(u.pc,), equivalencies=u.parallax()) assert x[0] == u.Unit(0.5 * u.pc) x = u.degree.compose(equivalencies=u.dimensionless_angles()) assert u.Unit(u.degree.to(u.radian)) in x x = (u.nm).compose(units=(u.m, u.s), equivalencies=u.doppler_optical(0.55*u.micron)) for y in x: if y.bases == [u.m, u.s]: assert y.powers == [1, -1] assert_allclose( y.scale, u.nm.to(u.m / u.s, equivalencies=u.doppler_optical(0.55 * u.micron))) break else: assert False, "Didn't find speed in compose results" def test_pixel_scale(): pix = 75*u.pix asec = 30*u.arcsec pixscale = 0.4*u.arcsec/u.pix pixscale2 = 2.5*u.pix/u.arcsec assert_quantity_allclose(pix.to(u.arcsec, u.pixel_scale(pixscale)), asec) assert_quantity_allclose(pix.to(u.arcmin, u.pixel_scale(pixscale)), asec) assert_quantity_allclose(pix.to(u.arcsec, u.pixel_scale(pixscale2)), asec) assert_quantity_allclose(pix.to(u.arcmin, u.pixel_scale(pixscale2)), asec) assert_quantity_allclose(asec.to(u.pix, u.pixel_scale(pixscale)), pix) assert_quantity_allclose(asec.to(u.pix, u.pixel_scale(pixscale2)), pix) def test_plate_scale(): mm = 1.5*u.mm asec = 30*u.arcsec platescale = 20*u.arcsec/u.mm platescale2 = 0.05*u.mm/u.arcsec assert_quantity_allclose(mm.to(u.arcsec, u.plate_scale(platescale)), asec) assert_quantity_allclose(mm.to(u.arcmin, u.plate_scale(platescale)), asec) assert_quantity_allclose(mm.to(u.arcsec, u.plate_scale(platescale2)), asec) assert_quantity_allclose(mm.to(u.arcmin, u.plate_scale(platescale2)), asec) assert_quantity_allclose(asec.to(u.mm, u.plate_scale(platescale)), mm) assert_quantity_allclose(asec.to(u.mm, u.plate_scale(platescale2)), mm) def test_littleh(): H0_70 = 70*u.km/u.s/u.Mpc h70dist = 70 * u.Mpc/u.littleh assert_quantity_allclose(h70dist.to(u.Mpc, u.with_H0(H0_70)), 100*u.Mpc) # make sure using the default cosmology works cosmodist = cosmology.default_cosmology.get().H0.value * u.Mpc/u.littleh assert_quantity_allclose(cosmodist.to(u.Mpc, u.with_H0()), 100*u.Mpc) # Now try a luminosity scaling h1lum = .49 * u.Lsun * u.littleh**-2 assert_quantity_allclose(h1lum.to(u.Lsun, u.with_H0(H0_70)), 1*u.Lsun) # And the trickiest one: magnitudes. Using H0=10 here for the round numbers H0_10 = 10*u.km/u.s/u.Mpc # assume the "true" magnitude M = 12. # Then M - 5*log_10(h) = M + 5 = 17 withlittlehmag = 17 * (u.mag - u.MagUnit(u.littleh**2)) assert_quantity_allclose(withlittlehmag.to(u.mag, u.with_H0(H0_10)), 12*u.mag) def test_equivelency(): ps = u.pixel_scale(10*u.arcsec/u.pix) assert isinstance(ps, Equivalency) assert isinstance(ps.name, list) assert len(ps.name) == 1 assert ps.name[0] == "pixel_scale" assert isinstance(ps.kwargs, list) assert len(ps.kwargs) == 1 assert ps.kwargs[0] == dict({'pixscale': 10*u.arcsec/u.pix}) def test_add_equivelencies(): e1 = u.pixel_scale(10*u.arcsec/u.pixel) + u.temperature_energy() assert isinstance(e1, Equivalency) assert e1.name == ["pixel_scale", "temperature_energy"] assert isinstance(e1.kwargs, list) assert e1.kwargs == [dict({'pixscale': 10*u.arcsec/u.pix}), dict()] e2 = u.pixel_scale(10*u.arcsec/u.pixel) + [1, 2,3] assert isinstance(e2, list)
04e59b63117ea1ac4b831ec7b350f3ce8154bae96ba94b3aa6c46b145637f917
# coding: utf-8 # Licensed under a 3-clause BSD style license - see LICENSE.rst """ Test utilities for `astropy.units`. """ import numpy as np from numpy import finfo from astropy.units.utils import sanitize_scale from astropy.units.utils import quantity_asanyarray from astropy.units.quantity import Quantity _float_finfo = finfo(float) def test_quantity_asanyarray(): array_of_quantities = [Quantity(1), Quantity(2), Quantity(3)] quantity_array = quantity_asanyarray(array_of_quantities) assert isinstance(quantity_array, Quantity) array_of_integers = [1, 2, 3] np_array = quantity_asanyarray(array_of_integers) assert isinstance(np_array, np.ndarray) def test_sanitize_scale(): assert sanitize_scale( complex(2, _float_finfo.eps) ) == 2 assert sanitize_scale( complex(_float_finfo.eps, 2) ) == 2j
6b8a327aab5af4942457ee7cb3209e26056f33a035ba3aaf93eaaf296b51dd0d
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst import pytest from astropy import units as u # pylint: disable=W0611 @pytest.mark.parametrize("solarx_unit,solary_unit", [ (u.arcsec, u.arcsec), ('angle', 'angle')]) def test_args3(solarx_unit, solary_unit): @u.quantity_input def myfunc_args(solarx: solarx_unit, solary: solary_unit): return solarx, solary solarx, solary = myfunc_args(1*u.arcsec, 1*u.arcsec) assert isinstance(solarx, u.Quantity) assert isinstance(solary, u.Quantity) assert solarx.unit == u.arcsec assert solary.unit == u.arcsec @pytest.mark.parametrize("solarx_unit,solary_unit", [ (u.arcsec, u.arcsec), ('angle', 'angle')]) def test_args_noconvert3(solarx_unit, solary_unit): @u.quantity_input() def myfunc_args(solarx: solarx_unit, solary: solary_unit): return solarx, solary solarx, solary = myfunc_args(1*u.deg, 1*u.arcmin) assert isinstance(solarx, u.Quantity) assert isinstance(solary, u.Quantity) assert solarx.unit == u.deg assert solary.unit == u.arcmin @pytest.mark.parametrize("solarx_unit", [ u.arcsec, 'angle']) def test_args_nonquantity3(solarx_unit): @u.quantity_input def myfunc_args(solarx: solarx_unit, solary): return solarx, solary solarx, solary = myfunc_args(1*u.arcsec, 100) assert isinstance(solarx, u.Quantity) assert isinstance(solary, int) assert solarx.unit == u.arcsec @pytest.mark.parametrize("solarx_unit,solary_unit", [ (u.arcsec, u.eV), ('angle', 'energy')]) def test_arg_equivalencies3(solarx_unit, solary_unit): @u.quantity_input(equivalencies=u.mass_energy()) def myfunc_args(solarx: solarx_unit, solary: solary_unit): return solarx, solary+(10*u.J) # Add an energy to check equiv is working solarx, solary = myfunc_args(1*u.arcsec, 100*u.gram) assert isinstance(solarx, u.Quantity) assert isinstance(solary, u.Quantity) assert solarx.unit == u.arcsec assert solary.unit == u.gram @pytest.mark.parametrize("solarx_unit,solary_unit", [ (u.arcsec, u.deg), ('angle', 'angle')]) def test_wrong_unit3(solarx_unit, solary_unit): @u.quantity_input def myfunc_args(solarx: solarx_unit, solary: solary_unit): return solarx, solary with pytest.raises(u.UnitsError) as e: solarx, solary = myfunc_args(1*u.arcsec, 100*u.km) str_to = str(solary_unit) assert str(e.value) == "Argument 'solary' to function 'myfunc_args' must be in units convertible to '{0}'.".format(str_to) @pytest.mark.parametrize("solarx_unit,solary_unit", [ (u.arcsec, u.deg), ('angle', 'angle')]) def test_not_quantity3(solarx_unit, solary_unit): @u.quantity_input def myfunc_args(solarx: solarx_unit, solary: solary_unit): return solarx, solary with pytest.raises(TypeError) as e: solarx, solary = myfunc_args(1*u.arcsec, 100) assert str(e.value) == "Argument 'solary' to function 'myfunc_args' has no 'unit' attribute. You may want to pass in an astropy Quantity instead." def test_decorator_override(): @u.quantity_input(solarx=u.arcsec) def myfunc_args(solarx: u.km, solary: u.arcsec): return solarx, solary solarx, solary = myfunc_args(1*u.arcsec, 1*u.arcsec) assert isinstance(solarx, u.Quantity) assert isinstance(solary, u.Quantity) assert solarx.unit == u.arcsec assert solary.unit == u.arcsec @pytest.mark.parametrize("solarx_unit,solary_unit", [ (u.arcsec, u.deg), ('angle', 'angle')]) def test_kwargs3(solarx_unit, solary_unit): @u.quantity_input def myfunc_args(solarx: solarx_unit, solary, myk: solary_unit=1*u.arcsec): return solarx, solary, myk solarx, solary, myk = myfunc_args(1*u.arcsec, 100, myk=100*u.deg) assert isinstance(solarx, u.Quantity) assert isinstance(solary, int) assert isinstance(myk, u.Quantity) assert myk.unit == u.deg @pytest.mark.parametrize("solarx_unit,solary_unit", [ (u.arcsec, u.deg), ('angle', 'angle')]) def test_unused_kwargs3(solarx_unit, solary_unit): @u.quantity_input def myfunc_args(solarx: solarx_unit, solary, myk: solary_unit=1*u.arcsec, myk2=1000): return solarx, solary, myk, myk2 solarx, solary, myk, myk2 = myfunc_args(1*u.arcsec, 100, myk=100*u.deg, myk2=10) assert isinstance(solarx, u.Quantity) assert isinstance(solary, int) assert isinstance(myk, u.Quantity) assert isinstance(myk2, int) assert myk.unit == u.deg assert myk2 == 10 @pytest.mark.parametrize("solarx_unit,energy", [ (u.arcsec, u.eV), ('angle', 'energy')]) def test_kwarg_equivalencies3(solarx_unit, energy): @u.quantity_input(equivalencies=u.mass_energy()) def myfunc_args(solarx: solarx_unit, energy: energy=10*u.eV): return solarx, energy+(10*u.J) # Add an energy to check equiv is working solarx, energy = myfunc_args(1*u.arcsec, 100*u.gram) assert isinstance(solarx, u.Quantity) assert isinstance(energy, u.Quantity) assert solarx.unit == u.arcsec assert energy.unit == u.gram @pytest.mark.parametrize("solarx_unit,solary_unit", [ (u.arcsec, u.deg), ('angle', 'angle')]) def test_kwarg_wrong_unit3(solarx_unit, solary_unit): @u.quantity_input def myfunc_args(solarx: solarx_unit, solary: solary_unit=10*u.deg): return solarx, solary with pytest.raises(u.UnitsError) as e: solarx, solary = myfunc_args(1*u.arcsec, solary=100*u.km) str_to = str(solary_unit) assert str(e.value) == "Argument 'solary' to function 'myfunc_args' must be in units convertible to '{0}'.".format(str_to) @pytest.mark.parametrize("solarx_unit,solary_unit", [ (u.arcsec, u.deg), ('angle', 'angle')]) def test_kwarg_not_quantity3(solarx_unit, solary_unit): @u.quantity_input def myfunc_args(solarx: solarx_unit, solary: solary_unit=10*u.deg): return solarx, solary with pytest.raises(TypeError) as e: solarx, solary = myfunc_args(1*u.arcsec, solary=100) assert str(e.value) == "Argument 'solary' to function 'myfunc_args' has no 'unit' attribute. You may want to pass in an astropy Quantity instead." @pytest.mark.parametrize("solarx_unit,solary_unit", [ (u.arcsec, u.deg), ('angle', 'angle')]) def test_kwarg_default3(solarx_unit, solary_unit): @u.quantity_input def myfunc_args(solarx: solarx_unit, solary: solary_unit=10*u.deg): return solarx, solary solarx, solary = myfunc_args(1*u.arcsec) def test_return_annotation(): @u.quantity_input def myfunc_args(solarx: u.arcsec) -> u.deg: return solarx solarx = myfunc_args(1*u.arcsec) assert solarx.unit is u.deg def test_return_annotation_none(): @u.quantity_input def myfunc_args(solarx: u.arcsec) -> None: pass solarx = myfunc_args(1*u.arcsec) assert solarx is None
2065d43406e3483a11597bdd48a7cd8c8b8cdfbaf143b0907a5e21bd057b0a2d
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """ Regression tests for the units.format package """ import pytest from numpy.testing import assert_allclose from astropy.tests.helper import catch_warnings from astropy import units as u from astropy.constants import si from astropy.units import core from astropy.units import format as u_format from astropy.units.utils import is_effectively_unity @pytest.mark.parametrize('strings, unit', [ (["m s", "m*s", "m.s"], u.m * u.s), (["m/s", "m*s**-1", "m /s", "m / s", "m/ s"], u.m / u.s), (["m**2", "m2", "m**(2)", "m**+2", "m+2", "m^(+2)"], u.m ** 2), (["m**-3", "m-3", "m^(-3)", "/m3"], u.m ** -3), (["m**(1.5)", "m(3/2)", "m**(3/2)", "m^(3/2)"], u.m ** 1.5), (["2.54 cm"], u.Unit(u.cm * 2.54)), (["10+8m"], u.Unit(u.m * 1e8)), # This is the VOUnits documentation, but doesn't seem to follow the # unity grammar (["3.45 10**(-4)Jy"], 3.45 * 1e-4 * u.Jy) (["sqrt(m)"], u.m ** 0.5), (["dB(mW)", "dB (mW)"], u.DecibelUnit(u.mW)), (["mag"], u.mag), (["mag(ct/s)"], u.MagUnit(u.ct / u.s)), (["dex"], u.dex), (["dex(cm s**-2)", "dex(cm/s2)"], u.DexUnit(u.cm / u.s**2))]) def test_unit_grammar(strings, unit): for s in strings: print(s) unit2 = u_format.Generic.parse(s) assert unit2 == unit @pytest.mark.parametrize('string', ['sin( /pixel /s)', 'mag(mag)', 'dB(dB(mW))', 'dex()']) def test_unit_grammar_fail(string): with pytest.raises(ValueError): print(string) u_format.Generic.parse(string) @pytest.mark.parametrize('strings, unit', [ (["0.1nm"], u.AA), (["mW/m2"], u.Unit(u.erg / u.cm ** 2 / u.s)), (["mW/(m2)"], u.Unit(u.erg / u.cm ** 2 / u.s)), (["km/s", "km.s-1"], u.km / u.s), (["10pix/nm"], u.Unit(10 * u.pix / u.nm)), (["1.5x10+11m"], u.Unit(1.5e11 * u.m)), (["1.5×10+11m"], u.Unit(1.5e11 * u.m)), (["m2"], u.m ** 2), (["10+21m"], u.Unit(u.m * 1e21)), (["2.54cm"], u.Unit(u.cm * 2.54)), (["20%"], 0.20 * u.dimensionless_unscaled), (["10+9"], 1.e9 * u.dimensionless_unscaled), (["2x10-9"], 2.e-9 * u.dimensionless_unscaled), (["---"], u.dimensionless_unscaled), (["ma"], u.ma), (["mAU"], u.mAU), (["uarcmin"], u.uarcmin), (["uarcsec"], u.uarcsec), (["kbarn"], u.kbarn), (["Gbit"], u.Gbit), (["Gibit"], 2 ** 30 * u.bit), (["kbyte"], u.kbyte), (["mRy"], 0.001 * u.Ry), (["mmag"], u.mmag), (["Mpc"], u.Mpc), (["Gyr"], u.Gyr), (["°"], u.degree), (["°/s"], u.degree / u.s), (["Å"], u.AA), (["Å/s"], u.AA / u.s), (["\\h"], si.h)]) def test_cds_grammar(strings, unit): for s in strings: print(s) unit2 = u_format.CDS.parse(s) assert unit2 == unit @pytest.mark.parametrize('string', [ '0.1 nm', 'solMass(3/2)', 'km / s', 'km s-1', 'pix0.1nm', 'pix/(0.1nm)', 'km*s', 'km**2', '5x8+3m', '0.1---', '---m', 'm---', 'mag(s-1)', 'dB(mW)', 'dex(cm s-2)']) def test_cds_grammar_fail(string): with pytest.raises(ValueError): print(string) u_format.CDS.parse(string) # These examples are taken from the EXAMPLES section of # https://heasarc.gsfc.nasa.gov/docs/heasarc/ofwg/docs/general/ogip_93_001/ @pytest.mark.parametrize('strings, unit', [ (["count /s", "count/s", "count s**(-1)", "count / s", "count /s "], u.count / u.s), (["/pixel /s", "/(pixel * s)"], (u.pixel * u.s) ** -1), (["count /m**2 /s /eV", "count m**(-2) * s**(-1) * eV**(-1)", "count /(m**2 * s * eV)"], u.count * u.m ** -2 * u.s ** -1 * u.eV ** -1), (["erg /pixel /s /GHz", "erg /s /GHz /pixel", "erg /pixel /(s * GHz)"], u.erg / (u.s * u.GHz * u.pixel)), (["keV**2 /yr /angstrom", "10**(10) keV**2 /yr /m"], # Though this is given as an example, it seems to violate the rules # of not raising scales to powers, so I'm just excluding it # "(10**2 MeV)**2 /yr /m" u.keV**2 / (u.yr * u.angstrom)), (["10**(46) erg /s", "10**46 erg /s", "10**(39) J /s", "10**(39) W", "10**(15) YW", "YJ /fs"], 10**46 * u.erg / u.s), (["10**(-7) J /cm**2 /MeV", "10**(-9) J m**(-2) eV**(-1)", "nJ m**(-2) eV**(-1)", "nJ /m**2 /eV"], 10 ** -7 * u.J * u.cm ** -2 * u.MeV ** -1), (["sqrt(erg /pixel /s /GHz)", "(erg /pixel /s /GHz)**(0.5)", "(erg /pixel /s /GHz)**(1/2)", "erg**(0.5) pixel**(-0.5) s**(-0.5) GHz**(-0.5)"], (u.erg * u.pixel ** -1 * u.s ** -1 * u.GHz ** -1) ** 0.5), (["(count /s) (/pixel /s)", "(count /s) * (/pixel /s)", "count /pixel /s**2"], (u.count / u.s) * (1.0 / (u.pixel * u.s)))]) def test_ogip_grammar(strings, unit): for s in strings: print(s) unit2 = u_format.OGIP.parse(s) assert unit2 == unit @pytest.mark.parametrize('string', [ 'log(photon /m**2 /s /Hz)', 'sin( /pixel /s)', 'log(photon /cm**2 /s /Hz) /(sin( /pixel /s))', 'log(photon /cm**2 /s /Hz) (sin( /pixel /s))**(-1)', 'dB(mW)', 'dex(cm/s**2)']) def test_ogip_grammar_fail(string): with pytest.raises(ValueError): print(string) u_format.OGIP.parse(string) @pytest.mark.parametrize('unit', [val for key, val in u.__dict__.items() if (isinstance(val, core.UnitBase) and not isinstance(val, core.PrefixUnit))]) def test_roundtrip(unit): a = core.Unit(unit.to_string('generic'), format='generic') b = core.Unit(unit.decompose().to_string('generic'), format='generic') assert_allclose(a.decompose().scale, unit.decompose().scale, rtol=1e-2) assert_allclose(b.decompose().scale, unit.decompose().scale, rtol=1e-2) @pytest.mark.parametrize('unit', [ val for key, val in u_format.VOUnit._units.items() if (isinstance(val, core.UnitBase) and not isinstance(val, core.PrefixUnit))]) def test_roundtrip_vo_unit(unit): a = core.Unit(unit.to_string('vounit'), format='vounit') assert_allclose(a.decompose().scale, unit.decompose().scale, rtol=1e-2) if unit not in (u.mag, u.dB): ud = unit.decompose().to_string('vounit') assert ' ' not in ud b = core.Unit(ud, format='vounit') assert_allclose(b.decompose().scale, unit.decompose().scale, rtol=1e-2) @pytest.mark.parametrize('unit', [ val for key, val in u_format.Fits._units.items() if (isinstance(val, core.UnitBase) and not isinstance(val, core.PrefixUnit))]) def test_roundtrip_fits(unit): s = unit.to_string('fits') a = core.Unit(s, format='fits') assert_allclose(a.decompose().scale, unit.decompose().scale, rtol=1e-2) @pytest.mark.parametrize('unit', [ val for key, val in u_format.CDS._units.items() if (isinstance(val, core.UnitBase) and not isinstance(val, core.PrefixUnit))]) def test_roundtrip_cds(unit): a = core.Unit(unit.to_string('cds'), format='cds') assert_allclose(a.decompose().scale, unit.decompose().scale, rtol=1e-2) try: b = core.Unit(unit.decompose().to_string('cds'), format='cds') except ValueError: # skip mag: decomposes into dex, unknown to OGIP return assert_allclose(b.decompose().scale, unit.decompose().scale, rtol=1e-2) @pytest.mark.parametrize('unit', [ val for key, val in u_format.OGIP._units.items() if (isinstance(val, core.UnitBase) and not isinstance(val, core.PrefixUnit))]) def test_roundtrip_ogip(unit): a = core.Unit(unit.to_string('ogip'), format='ogip') assert_allclose(a.decompose().scale, unit.decompose().scale, rtol=1e-2) try: b = core.Unit(unit.decompose().to_string('ogip'), format='ogip') except ValueError: # skip mag: decomposes into dex, unknown to OGIP return assert_allclose(b.decompose().scale, unit.decompose().scale, rtol=1e-2) def test_fits_units_available(): u_format.Fits._units def test_vo_units_available(): u_format.VOUnit._units def test_cds_units_available(): u_format.CDS._units def test_cds_non_ascii_unit(): """Regression test for #5350. This failed with a decoding error as μas could not be represented in ascii.""" from astropy.units import cds with cds.enable(): u.radian.find_equivalent_units(include_prefix_units=True) def test_latex(): fluxunit = u.erg / (u.cm ** 2 * u.s) assert fluxunit.to_string('latex') == r'$\mathrm{\frac{erg}{s\,cm^{2}}}$' def test_new_style_latex(): fluxunit = u.erg / (u.cm ** 2 * u.s) assert "{0:latex}".format(fluxunit) == r'$\mathrm{\frac{erg}{s\,cm^{2}}}$' def test_latex_scale(): fluxunit = u.Unit(1.e-24 * u.erg / (u.cm ** 2 * u.s * u.Hz)) latex = r'$\mathrm{1 \times 10^{-24}\,\frac{erg}{Hz\,s\,cm^{2}}}$' assert fluxunit.to_string('latex') == latex def test_latex_inline_scale(): fluxunit = u.Unit(1.e-24 * u.erg / (u.cm ** 2 * u.s * u.Hz)) latex_inline = (r'$\mathrm{1 \times 10^{-24}\,erg' r'\,Hz^{-1}\,s^{-1}\,cm^{-2}}$') assert fluxunit.to_string('latex_inline') == latex_inline @pytest.mark.parametrize('format_spec, string', [ ('generic', 'erg / (cm2 s)'), ('s', 'erg / (cm2 s)'), ('console', ' erg \n ------\n s cm^2'), ('latex', '$\\mathrm{\\frac{erg}{s\\,cm^{2}}}$'), ('latex_inline', '$\\mathrm{erg\\,s^{-1}\\,cm^{-2}}$'), ('>20s', ' erg / (cm2 s)')]) def test_format_styles(format_spec, string): fluxunit = u.erg / (u.cm ** 2 * u.s) assert format(fluxunit, format_spec) == string def test_flatten_to_known(): myunit = u.def_unit("FOOBAR_One", u.erg / u.Hz) assert myunit.to_string('fits') == 'erg Hz-1' myunit2 = myunit * u.bit ** 3 assert myunit2.to_string('fits') == 'bit3 erg Hz-1' def test_flatten_impossible(): myunit = u.def_unit("FOOBAR_Two") with u.add_enabled_units(myunit), pytest.raises(ValueError): myunit.to_string('fits') def test_console_out(): """ Issue #436. """ u.Jy.decompose().to_string('console') def test_flexible_float(): assert u.min._represents.to_string('latex') == r'$\mathrm{60\,s}$' def test_fraction_repr(): area = u.cm ** 2.0 assert '.' not in area.to_string('latex') fractional = u.cm ** 2.5 assert '5/2' in fractional.to_string('latex') assert fractional.to_string('unicode') == 'cm⁵⸍²' def test_scale_effectively_unity(): """Scale just off unity at machine precision level is OK. Ensures #748 does not recur """ a = (3. * u.N).cgs assert is_effectively_unity(a.unit.scale) assert len(a.__repr__().split()) == 3 def test_percent(): """Test that the % unit is properly recognized. Since % is a special symbol, this goes slightly beyond the round-tripping tested above.""" assert u.Unit('%') == u.percent == u.Unit(0.01) assert u.Unit('%', format='cds') == u.Unit(0.01) assert u.Unit(0.01).to_string('cds') == '%' with pytest.raises(ValueError): u.Unit('%', format='fits') with pytest.raises(ValueError): u.Unit('%', format='vounit') def test_scaled_dimensionless(): """Test that scaled dimensionless units are properly recognized in generic and CDS, but not in fits and vounit.""" assert u.Unit('0.1') == u.Unit(0.1) == 0.1 * u.dimensionless_unscaled assert u.Unit('1.e-4') == u.Unit(1.e-4) assert u.Unit('10-4', format='cds') == u.Unit(1.e-4) assert u.Unit('10+8').to_string('cds') == '10+8' with pytest.raises(ValueError): u.Unit(0.15).to_string('fits') assert u.Unit(0.1).to_string('fits') == '10**-1' with pytest.raises(ValueError): u.Unit(0.1).to_string('vounit') def test_deprecated_did_you_mean_units(): try: u.Unit('ANGSTROM', format='fits') except ValueError as e: assert 'Did you mean Angstrom or angstrom?' in str(e) try: u.Unit('crab', format='ogip') except ValueError as e: assert 'Crab (deprecated)' in str(e) assert 'mCrab (deprecated)' in str(e) try: u.Unit('ANGSTROM', format='vounit') except ValueError as e: assert 'angstrom (deprecated)' in str(e) assert '0.1nm' in str(e) assert str(e).count('0.1nm') == 1 with catch_warnings() as w: u.Unit('angstrom', format='vounit') assert len(w) == 1 assert '0.1nm' in str(w[0].message) @pytest.mark.parametrize('string', ['mag(ct/s)', 'dB(mW)', 'dex(cm s**-2)']) def test_fits_function(string): # Function units cannot be written, so ensure they're not parsed either. with pytest.raises(ValueError): print(string) u_format.Fits().parse(string) @pytest.mark.parametrize('string', ['mag(ct/s)', 'dB(mW)', 'dex(cm s**-2)']) def test_vounit_function(string): # Function units cannot be written, so ensure they're not parsed either. with pytest.raises(ValueError): print(string) u_format.VOUnit().parse(string) def test_vounit_binary_prefix(): u.Unit('KiB', format='vounit') == u.Unit('1024 B') u.Unit('Kibyte', format='vounit') == u.Unit('1024 B') u.Unit('Kibit', format='vounit') == u.Unit('1024 B') with catch_warnings() as w: u.Unit('kibibyte', format='vounit') assert len(w) == 1 def test_vounit_unknown(): assert u.Unit('unknown', format='vounit') is None assert u.Unit('UNKNOWN', format='vounit') is None assert u.Unit('', format='vounit') is u.dimensionless_unscaled def test_vounit_details(): assert u.Unit('Pa', format='vounit') is u.Pascal # The da- prefix is not allowed, and the d- prefix is discouraged assert u.dam.to_string('vounit') == '10m' assert u.Unit('dam dag').to_string('vounit') == '100g m' def test_vounit_custom(): x = u.Unit("'foo' m", format='vounit') x_vounit = x.to_string('vounit') assert x_vounit == "'foo' m" x_string = x.to_string() assert x_string == "foo m" x = u.Unit("m'foo' m", format='vounit') assert x.bases[1]._represents.scale == 0.001 x_vounit = x.to_string('vounit') assert x_vounit == "m m'foo'" x_string = x.to_string() assert x_string == 'm mfoo' def test_vounit_implicit_custom(): x = u.Unit("furlong/week", format="vounit") assert x.bases[0]._represents.scale == 1e-15 assert x.bases[0]._represents.bases[0].name == 'urlong' @pytest.mark.parametrize('scale, number, string', [('10+2', 100, '10**2'), ('10(+2)', 100, '10**2'), ('10**+2', 100, '10**2'), ('10**(+2)', 100, '10**2'), ('10^+2', 100, '10**2'), ('10^(+2)', 100, '10**2'), ('10**2', 100, '10**2'), ('10**(2)', 100, '10**2'), ('10^2', 100, '10**2'), ('10^(2)', 100, '10**2'), ('10-20', 10**(-20), '10**-20'), ('10(-20)', 10**(-20), '10**-20'), ('10**-20', 10**(-20), '10**-20'), ('10**(-20)', 10**(-20), '10**-20'), ('10^-20', 10**(-20), '10**-20'), ('10^(-20)', 10**(-20), '10**-20'), ]) def test_fits_scale_factor(scale, number, string): x = u.Unit(scale + ' erg/s/cm**2/Angstrom', format='fits') assert x == number * (u.erg / u.s / u.cm ** 2 / u.Angstrom) assert x.to_string(format='fits') == string + ' Angstrom-1 cm-2 erg s-1' x = u.Unit(scale + '*erg/s/cm**2/Angstrom', format='fits') assert x == number * (u.erg / u.s / u.cm ** 2 / u.Angstrom) assert x.to_string(format='fits') == string + ' Angstrom-1 cm-2 erg s-1' def test_fits_scale_factor_errors(): with pytest.raises(ValueError): x = u.Unit('1000 erg/s/cm**2/Angstrom', format='fits') with pytest.raises(ValueError): x = u.Unit('12 erg/s/cm**2/Angstrom', format='fits') x = u.Unit(1.2 * u.erg) with pytest.raises(ValueError): x.to_string(format='fits') x = u.Unit(100.0 * u.erg) assert x.to_string(format='fits') == '10**2 erg'
30fb6b2eb898cc7fcc601b7ae1d8f3c44f3945a9f1d204a40628862424a121d1
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """ Regression tests for the units package """ import pickle from fractions import Fraction import pytest import numpy as np from numpy.testing import assert_allclose from astropy.tests.helper import raises, catch_warnings from astropy import units as u from astropy import constants as c from astropy.units import utils def test_getting_started(): """ Corresponds to "Getting Started" section in the docs. """ from astropy.units import imperial with imperial.enable(): speed_unit = u.cm / u.s x = speed_unit.to(imperial.mile / u.hour, 1) assert_allclose(x, 0.02236936292054402) speed_converter = speed_unit._get_converter("mile hour^-1") x = speed_converter([1., 1000., 5000.]) assert_allclose(x, [2.23693629e-02, 2.23693629e+01, 1.11846815e+02]) def test_initialisation(): assert u.Unit(u.m) is u.m ten_meter = u.Unit(10.*u.m) assert ten_meter == u.CompositeUnit(10., [u.m], [1]) assert u.Unit(ten_meter) is ten_meter assert u.Unit(10.*ten_meter) == u.CompositeUnit(100., [u.m], [1]) foo = u.Unit('foo', (10. * ten_meter)**2, namespace=locals()) assert foo == u.CompositeUnit(10000., [u.m], [2]) assert u.Unit('m') == u.m assert u.Unit('') == u.dimensionless_unscaled assert u.one == u.dimensionless_unscaled assert u.Unit('10 m') == ten_meter assert u.Unit(10.) == u.CompositeUnit(10., [], []) def test_invalid_power(): x = u.m ** Fraction(1, 3) assert isinstance(x.powers[0], Fraction) x = u.m ** Fraction(1, 2) assert isinstance(x.powers[0], float) # Test the automatic conversion to a fraction x = u.m ** (1. / 3.) assert isinstance(x.powers[0], Fraction) def test_invalid_compare(): assert not (u.m == u.s) def test_convert(): assert u.h._get_converter(u.s)(1) == 3600 def test_convert_fail(): with pytest.raises(u.UnitsError): u.cm.to(u.s, 1) with pytest.raises(u.UnitsError): (u.cm / u.s).to(u.m, 1) def test_composite(): assert (u.cm / u.s * u.h)._get_converter(u.m)(1) == 36 assert u.cm * u.cm == u.cm ** 2 assert u.cm * u.cm * u.cm == u.cm ** 3 assert u.Hz.to(1000 * u.Hz, 1) == 0.001 def test_str(): assert str(u.cm) == "cm" def test_repr(): assert repr(u.cm) == 'Unit("cm")' def test_represents(): assert u.m.represents is u.m assert u.km.represents.scale == 1000. assert u.km.represents.bases == [u.m] assert u.Ry.scale == 1.0 and u.Ry.bases == [u.Ry] assert_allclose(u.Ry.represents.scale, 13.605692518464949) assert u.Ry.represents.bases == [u.eV] bla = u.def_unit('bla', namespace=locals()) assert bla.represents is bla blabla = u.def_unit('blabla', 10 * u.hr, namespace=locals()) assert blabla.represents.scale == 10. assert blabla.represents.bases == [u.hr] assert blabla.decompose().scale == 10 * 3600 assert blabla.decompose().bases == [u.s] def test_units_conversion(): assert_allclose(u.kpc.to(u.Mpc), 0.001) assert_allclose(u.Mpc.to(u.kpc), 1000) assert_allclose(u.yr.to(u.Myr), 1.e-6) assert_allclose(u.AU.to(u.pc), 4.84813681e-6) assert_allclose(u.cycle.to(u.rad), 6.283185307179586) def test_units_manipulation(): # Just do some manipulation and check it's happy (u.kpc * u.yr) ** Fraction(1, 3) / u.Myr (u.AA * u.erg) ** 9 def test_decompose(): assert u.Ry == u.Ry.decompose() def test_dimensionless_to_si(): """ Issue #1150: Test for conversion of dimensionless quantities to the SI system """ testunit = ((1.0 * u.kpc) / (1.0 * u.Mpc)) assert testunit.unit.physical_type == 'dimensionless' assert_allclose(testunit.si, 0.001) def test_dimensionless_to_cgs(): """ Issue #1150: Test for conversion of dimensionless quantities to the CGS system """ testunit = ((1.0 * u.m) / (1.0 * u.km)) assert testunit.unit.physical_type == 'dimensionless' assert_allclose(testunit.cgs, 0.001) def test_unknown_unit(): with catch_warnings(u.UnitsWarning) as warning_lines: u.Unit("FOO", parse_strict='warn') assert 'FOO' in str(warning_lines[0].message) def test_multiple_solidus(): assert u.Unit("m/s/kg").to_string() == u.m / u.s / u.kg with catch_warnings(u.UnitsWarning) as warning_lines: assert u.Unit("m/s/kg").to_string() == u.m / (u.s * u.kg) assert 'm/s/kg' in str(warning_lines[0].message) assert 'discouraged' in str(warning_lines[0].message) with pytest.raises(ValueError): u.Unit("m/s/kg", format="vounit") def test_unknown_unit3(): unit = u.Unit("FOO", parse_strict='silent') assert isinstance(unit, u.UnrecognizedUnit) assert unit.name == "FOO" unit2 = u.Unit("FOO", parse_strict='silent') assert unit == unit2 assert unit.is_equivalent(unit2) unit3 = u.Unit("BAR", parse_strict='silent') assert unit != unit3 assert not unit.is_equivalent(unit3) # Also test basic (in)equalities. assert unit == "FOO" assert unit != u.m # next two from gh-7603. assert unit != None # noqa assert unit not in (None, u.m) with pytest.raises(ValueError): unit._get_converter(unit3) x = unit.to_string('latex') y = unit2.to_string('cgs') with pytest.raises(ValueError): unit4 = u.Unit("BAR", parse_strict='strict') with pytest.raises(TypeError): unit5 = u.Unit(None) @raises(TypeError) def test_invalid_scale(): x = ['a', 'b', 'c'] * u.m def test_cds_power(): unit = u.Unit("10+22/cm2", format="cds", parse_strict='silent') assert unit.scale == 1e22 def test_register(): foo = u.def_unit("foo", u.m ** 3, namespace=locals()) assert 'foo' in locals() with u.add_enabled_units(foo): assert 'foo' in u.get_current_unit_registry().registry assert 'foo' not in u.get_current_unit_registry().registry def test_in_units(): speed_unit = u.cm / u.s x = speed_unit.in_units(u.pc / u.hour, 1) def test_null_unit(): assert (u.m / u.m) == u.Unit(1) def test_unrecognized_equivalency(): assert u.m.is_equivalent('foo') is False assert u.m.is_equivalent('pc') is True @raises(TypeError) def test_unit_noarg(): u.Unit() def test_convertible_exception(): try: u.AA.to(u.h * u.s ** 2) except u.UnitsError as e: assert "length" in str(e) def test_convertible_exception2(): try: u.m.to(u.s) except u.UnitsError as e: assert "length" in str(e) @raises(TypeError) def test_invalid_type(): class A: pass u.Unit(A()) def test_steradian(): """ Issue #599 """ assert u.sr.is_equivalent(u.rad * u.rad) results = u.sr.compose(units=u.cgs.bases) assert results[0].bases[0] is u.rad results = u.sr.compose(units=u.cgs.__dict__) assert results[0].bases[0] is u.sr def test_decompose_bases(): """ From issue #576 """ from astropy.units import cgs from astropy.constants import e d = e.esu.unit.decompose(bases=cgs.bases) assert d._bases == [u.cm, u.g, u.s] assert d._powers == [Fraction(3, 2), 0.5, -1] assert d._scale == 1.0 def test_complex_compose(): complex = u.cd * u.sr * u.Wb composed = complex.compose() assert set(composed[0]._bases) == set([u.lm, u.Wb]) def test_equiv_compose(): composed = u.m.compose(equivalencies=u.spectral()) assert any([u.Hz] == x.bases for x in composed) def test_empty_compose(): with pytest.raises(u.UnitsError): composed = u.m.compose(units=[]) def _unit_as_str(unit): # This function serves two purposes - it is used to sort the units to # test alphabetically, and it is also use to allow pytest to show the unit # in the [] when running the parametrized tests. return str(unit) # We use a set to make sure we don't have any duplicates. COMPOSE_ROUNDTRIP = set() for val in u.__dict__.values(): if (isinstance(val, u.UnitBase) and not isinstance(val, u.PrefixUnit)): COMPOSE_ROUNDTRIP.add(val) @pytest.mark.parametrize('unit', sorted(COMPOSE_ROUNDTRIP, key=_unit_as_str), ids=_unit_as_str) def test_compose_roundtrip(unit): composed_list = unit.decompose().compose() found = False for composed in composed_list: if len(composed.bases): if composed.bases[0] is unit: found = True break elif len(unit.bases) == 0: found = True break assert found # We use a set to make sure we don't have any duplicates. COMPOSE_CGS_TO_SI = set() for val in u.cgs.__dict__.values(): # Can't decompose Celsius if (isinstance(val, u.UnitBase) and not isinstance(val, u.PrefixUnit) and val != u.cgs.deg_C): COMPOSE_CGS_TO_SI.add(val) @pytest.mark.parametrize('unit', sorted(COMPOSE_CGS_TO_SI, key=_unit_as_str), ids=_unit_as_str) def test_compose_cgs_to_si(unit): si = unit.to_system(u.si) assert [x.is_equivalent(unit) for x in si] assert si[0] == unit.si # We use a set to make sure we don't have any duplicates. COMPOSE_SI_TO_CGS = set() for val in u.si.__dict__.values(): # Can't decompose Celsius if (isinstance(val, u.UnitBase) and not isinstance(val, u.PrefixUnit) and val != u.si.deg_C): COMPOSE_SI_TO_CGS.add(val) @pytest.mark.parametrize('unit', sorted(COMPOSE_SI_TO_CGS, key=_unit_as_str), ids=_unit_as_str) def test_compose_si_to_cgs(unit): # Can't convert things with Ampere to CGS without more context try: cgs = unit.to_system(u.cgs) except u.UnitsError: if u.A in unit.decompose().bases: pass else: raise else: assert [x.is_equivalent(unit) for x in cgs] assert cgs[0] == unit.cgs def test_to_cgs(): assert u.Pa.to_system(u.cgs)[1]._bases[0] is u.Ba assert u.Pa.to_system(u.cgs)[1]._scale == 10.0 def test_decompose_to_cgs(): from astropy.units import cgs assert u.m.decompose(bases=cgs.bases)._bases[0] is cgs.cm def test_compose_issue_579(): unit = u.kg * u.s ** 2 / u.m result = unit.compose(units=[u.N, u.s, u.m]) assert len(result) == 1 assert result[0]._bases == [u.s, u.N, u.m] assert result[0]._powers == [4, 1, -2] def test_compose_prefix_unit(): x = u.m.compose(units=(u.m,)) assert x[0].bases[0] is u.m assert x[0].scale == 1.0 x = u.m.compose(units=[u.km], include_prefix_units=True) assert x[0].bases[0] is u.km assert x[0].scale == 0.001 x = u.m.compose(units=[u.km]) assert x[0].bases[0] is u.km assert x[0].scale == 0.001 x = (u.km/u.s).compose(units=(u.pc, u.Myr)) assert x[0].bases == [u.pc, u.Myr] assert_allclose(x[0].scale, 1.0227121650537077) with raises(u.UnitsError): (u.km/u.s).compose(units=(u.pc, u.Myr), include_prefix_units=False) def test_self_compose(): unit = u.kg * u.s assert len(unit.compose(units=[u.g, u.s])) == 1 @raises(u.UnitsError) def test_compose_failed(): unit = u.kg result = unit.compose(units=[u.N]) def test_compose_fractional_powers(): # Warning: with a complicated unit, this test becomes very slow; # e.g., x = (u.kg / u.s ** 3 * u.au ** 2.5 / u.yr ** 0.5 / u.sr ** 2) # takes 3 s x = u.m ** 0.5 / u.yr ** 1.5 factored = x.compose() for unit in factored: assert x.decompose() == unit.decompose() factored = x.compose(units=u.cgs) for unit in factored: assert x.decompose() == unit.decompose() factored = x.compose(units=u.si) for unit in factored: assert x.decompose() == unit.decompose() def test_compose_best_unit_first(): results = u.l.compose() assert len(results[0].bases) == 1 assert results[0].bases[0] is u.l results = (u.s ** -1).compose() assert results[0].bases[0] in (u.Hz, u.Bq) results = (u.Ry.decompose()).compose() assert results[0].bases[0] is u.Ry def test_compose_no_duplicates(): new = u.kg / u.s ** 3 * u.au ** 2.5 / u.yr ** 0.5 / u.sr ** 2 composed = new.compose(units=u.cgs.bases) assert len(composed) == 1 def test_long_int(): """ Issue #672 """ sigma = 10 ** 21 * u.M_p / u.cm ** 2 sigma.to(u.M_sun / u.pc ** 2) def test_endian_independence(): """ Regression test for #744 A logic issue in the units code meant that big endian arrays could not be converted because the dtype is '>f4', not 'float32', and the code was looking for the strings 'float' or 'int'. """ for endian in ['<', '>']: for ntype in ['i', 'f']: for byte in ['4', '8']: x = np.array([1, 2, 3], dtype=(endian + ntype + byte)) u.m.to(u.cm, x) def test_radian_base(): """ Issue #863 """ assert (1 * u.degree).si.unit == u.rad def test_no_as(): # We don't define 'as', since it is a keyword, but we # do want to define the long form (`attosecond`). assert not hasattr(u, 'as') assert hasattr(u, 'attosecond') def test_no_duplicates_in_names(): # Regression test for #5036 assert u.ct.names == ['ct', 'count'] assert u.ct.short_names == ['ct', 'count'] assert u.ct.long_names == ['count'] assert set(u.ph.names) == set(u.ph.short_names) | set(u.ph.long_names) def test_pickling(): p = pickle.dumps(u.m) other = pickle.loads(p) assert other is u.m new_unit = u.IrreducibleUnit(['foo'], format={'baz': 'bar'}) # This is local, so the unit should not be registered. assert 'foo' not in u.get_current_unit_registry().registry # Test pickling of this unregistered unit. p = pickle.dumps(new_unit) new_unit_copy = pickle.loads(p) assert new_unit_copy.names == ['foo'] assert new_unit_copy.get_format_name('baz') == 'bar' # It should still not be registered. assert 'foo' not in u.get_current_unit_registry().registry # Now try the same with a registered unit. with u.add_enabled_units([new_unit]): p = pickle.dumps(new_unit) assert 'foo' in u.get_current_unit_registry().registry # Check that a registered unit can be loaded and that it gets re-enabled. with u.add_enabled_units([]): assert 'foo' not in u.get_current_unit_registry().registry new_unit_copy = pickle.loads(p) assert new_unit_copy.names == ['foo'] assert new_unit_copy.get_format_name('baz') == 'bar' assert 'foo' in u.get_current_unit_registry().registry # And just to be sure, that it gets removed outside of the context. assert 'foo' not in u.get_current_unit_registry().registry def test_pickle_unrecognized_unit(): """ Issue #2047 """ a = u.Unit('asdf', parse_strict='silent') pickle.loads(pickle.dumps(a)) @raises(ValueError) def test_duplicate_define(): u.def_unit('m', namespace=u.__dict__) def test_all_units(): from astropy.units.core import get_current_unit_registry registry = get_current_unit_registry() assert len(registry.all_units) > len(registry.non_prefix_units) def test_repr_latex(): assert u.m._repr_latex_() == u.m.to_string('latex') def test_operations_with_strings(): assert u.m / '5s' == (u.m / (5.0 * u.s)) assert u.m * '5s' == (5.0 * u.m * u.s) def test_comparison(): assert u.m > u.cm assert u.m >= u.cm assert u.cm < u.m assert u.cm <= u.m with pytest.raises(u.UnitsError): u.m > u.kg def test_compose_into_arbitrary_units(): # Issue #1438 from astropy.constants import G G.decompose([u.kg, u.km, u.Unit("15 s")]) def test_unit_multiplication_with_string(): """Check that multiplication with strings produces the correct unit.""" u1 = u.cm us = 'kg' assert us * u1 == u.Unit(us) * u1 assert u1 * us == u1 * u.Unit(us) def test_unit_division_by_string(): """Check that multiplication with strings produces the correct unit.""" u1 = u.cm us = 'kg' assert us / u1 == u.Unit(us) / u1 assert u1 / us == u1 / u.Unit(us) def test_sorted_bases(): """See #1616.""" assert (u.m * u.Jy).bases == (u.Jy * u.m).bases def test_megabit(): """See #1543""" assert u.Mbit is u.Mb assert u.megabit is u.Mb assert u.Mbyte is u.MB assert u.megabyte is u.MB def test_composite_unit_get_format_name(): """See #1576""" unit1 = u.Unit('nrad/s') unit2 = u.Unit('Hz(1/2)') assert (str(u.CompositeUnit(1, [unit1, unit2], [1, -1])) == 'nrad / (Hz(1/2) s)') def test_unicode_policy(): from astropy.tests.helper import assert_follows_unicode_guidelines assert_follows_unicode_guidelines( u.degree, roundtrip=u.__dict__) def test_suggestions(): for search, matches in [ ('microns', 'micron'), ('s/microns', 'micron'), ('M', 'm'), ('metre', 'meter'), ('angstroms', 'Angstrom or angstrom'), ('milimeter', 'millimeter'), ('ångström', 'Angstrom or angstrom'), ('kev', 'EV, eV, kV or keV')]: try: u.Unit(search) except ValueError as e: assert 'Did you mean {0}?'.format(matches) in str(e) else: assert False, 'Expected ValueError' def test_fits_hst_unit(): """See #1911.""" x = u.Unit("erg /s /cm**2 /angstrom") assert x == u.erg * u.s ** -1 * u.cm ** -2 * u.angstrom ** -1 def test_barn_prefixes(): """Regression test for https://github.com/astropy/astropy/issues/3753""" assert u.fbarn is u.femtobarn assert u.pbarn is u.picobarn def test_fractional_powers(): """See #2069""" m = 1e9 * u.Msun tH = 1. / (70. * u.km / u.s / u.Mpc) vc = 200 * u.km/u.s x = (c.G ** 2 * m ** 2 * tH.cgs) ** Fraction(1, 3) / vc v1 = x.to('pc') x = (c.G ** 2 * m ** 2 * tH) ** Fraction(1, 3) / vc v2 = x.to('pc') x = (c.G ** 2 * m ** 2 * tH.cgs) ** (1.0 / 3.0) / vc v3 = x.to('pc') x = (c.G ** 2 * m ** 2 * tH) ** (1.0 / 3.0) / vc v4 = x.to('pc') assert_allclose(v1, v2) assert_allclose(v2, v3) assert_allclose(v3, v4) x = u.m ** (1.0 / 11.0) assert isinstance(x.powers[0], float) x = u.m ** (3.0 / 7.0) assert isinstance(x.powers[0], Fraction) assert x.powers[0].numerator == 3 assert x.powers[0].denominator == 7 x = u.cm ** Fraction(1, 2) * u.cm ** Fraction(2, 3) assert isinstance(x.powers[0], Fraction) assert x.powers[0] == Fraction(7, 6) def test_inherit_docstrings(): assert u.UnrecognizedUnit.is_unity.__doc__ == u.UnitBase.is_unity.__doc__ def test_sqrt_mag(): sqrt_mag = u.mag ** 0.5 assert hasattr(sqrt_mag.decompose().scale, 'imag') assert (sqrt_mag.decompose())**2 == u.mag def test_composite_compose(): # Issue #2382 composite_unit = u.s.compose(units=[u.Unit("s")])[0] u.s.compose(units=[composite_unit]) def test_data_quantities(): assert u.byte.is_equivalent(u.bit) def test_compare_with_none(): # Ensure that equality comparisons with `None` work, and don't # raise exceptions. We are deliberately not using `is None` here # because that doesn't trigger the bug. See #3108. assert not (u.m == None) # nopep8 assert u.m != None # nopep8 def test_validate_power_detect_fraction(): frac = utils.validate_power(1.1666666666666665) assert isinstance(frac, Fraction) assert frac.numerator == 7 assert frac.denominator == 6 def test_complex_fractional_rounding_errors(): # See #3788 kappa = 0.34 * u.cm**2 / u.g r_0 = 886221439924.7849 * u.cm q = 1.75 rho_0 = 5e-10 * u.solMass / u.solRad**3 y = 0.5 beta = 0.19047619047619049 a = 0.47619047619047628 m_h = 1e6*u.solMass t1 = 2 * c.c / (kappa * np.sqrt(np.pi)) t2 = (r_0**-q) / (rho_0 * y * beta * (a * c.G * m_h)**0.5) result = ((t1 * t2)**-0.8) assert result.unit.physical_type == 'length' result.to(u.solRad) def test_fractional_rounding_errors_simple(): x = (u.m ** 1.5) ** Fraction(4, 5) assert isinstance(x.powers[0], Fraction) assert x.powers[0].numerator == 6 assert x.powers[0].denominator == 5 def test_enable_unit_groupings(): from astropy.units import cds with cds.enable(): assert cds.geoMass in u.kg.find_equivalent_units() from astropy.units import imperial with imperial.enable(): assert imperial.inch in u.m.find_equivalent_units() def test_unit_summary_prefixes(): """ Test for a few units that the unit summary table correctly reports whether or not that unit supports prefixes. Regression test for https://github.com/astropy/astropy/issues/3835 """ from astropy.units import astrophys for summary in utils._iter_unit_summary(astrophys.__dict__): unit, _, _, _, prefixes = summary if unit.name == 'lyr': assert prefixes elif unit.name == 'pc': assert prefixes elif unit.name == 'barn': assert prefixes elif unit.name == 'cycle': assert prefixes == 'No' elif unit.name == 'vox': assert prefixes == 'Yes' def test_raise_to_negative_power(): """Test that order of bases is changed when raising to negative power. Regression test for https://github.com/astropy/astropy/issues/8260 """ m2s2 = u.m ** 2 / u.s **2 spm = m2s2 ** (-1 / 2) assert spm.bases == [u.s, u.m] assert spm.powers == [1, -1] assert spm == u.s / u.m
f812b25b297b6530226d9347035032779c8bf7cf99b1cc9d4af2355d77e69838
# The purpose of these tests are to ensure that calling quantities using # array methods returns quantities with the right units, or raises exceptions. import pytest import numpy as np from astropy import units as u class TestQuantityArrayCopy: """ Test whether arrays are properly copied/used in place """ def test_copy_on_creation(self): v = np.arange(1000.) q_nocopy = u.Quantity(v, "km/s", copy=False) q_copy = u.Quantity(v, "km/s", copy=True) v[0] = -1. assert q_nocopy[0].value == v[0] assert q_copy[0].value != v[0] def test_to_copies(self): q = u.Quantity(np.arange(1., 100.), "km/s") q2 = q.to(u.m/u.s) assert np.all(q.value != q2.value) q3 = q.to(u.km/u.s) assert np.all(q.value == q3.value) q[0] = -1.*u.km/u.s assert q[0].value != q3[0].value def test_si_copies(self): q = u.Quantity(np.arange(100.), "m/s") q2 = q.si assert np.all(q.value == q2.value) q[0] = -1.*u.m/u.s assert q[0].value != q2[0].value def test_getitem_is_view(self): """Check that [keys] work, and that, like ndarray, it returns a view, so that changing one changes the other. Also test that one can add axes (closes #1422) """ q = u.Quantity(np.arange(100.), "m/s") q_sel = q[10:20] q_sel[0] = -1.*u.m/u.s assert q_sel[0] == q[10] # also check that getitem can do new axes q2 = q[:, np.newaxis] q2[10, 0] = -9*u.m/u.s assert np.all(q2.flatten() == q) def test_flat(self): q = u.Quantity(np.arange(9.).reshape(3, 3), "m/s") q_flat = q.flat # check that a single item is a quantity (with the right value) assert q_flat[8] == 8. * u.m / u.s # and that getting a range works as well assert np.all(q_flat[0:2] == np.arange(2.) * u.m / u.s) # as well as getting items via iteration q_flat_list = [_q for _q in q.flat] assert np.all(u.Quantity(q_flat_list) == u.Quantity([_a for _a in q.value.flat], q.unit)) # check that flat works like a view of the real array q_flat[8] = -1. * u.km / u.s assert q_flat[8] == -1. * u.km / u.s assert q[2, 2] == -1. * u.km / u.s # while if one goes by an iterated item, a copy is made q_flat_list[8] = -2 * u.km / u.s assert q_flat_list[8] == -2. * u.km / u.s assert q_flat[8] == -1. * u.km / u.s assert q[2, 2] == -1. * u.km / u.s class TestQuantityReshapeFuncs: """Test different ndarray methods that alter the array shape tests: reshape, squeeze, ravel, flatten, transpose, swapaxes """ def test_reshape(self): q = np.arange(6.) * u.m q_reshape = q.reshape(3, 2) assert isinstance(q_reshape, u.Quantity) assert q_reshape.unit == q.unit assert np.all(q_reshape.value == q.value.reshape(3, 2)) def test_squeeze(self): q = np.arange(6.).reshape(6, 1) * u.m q_squeeze = q.squeeze() assert isinstance(q_squeeze, u.Quantity) assert q_squeeze.unit == q.unit assert np.all(q_squeeze.value == q.value.squeeze()) def test_ravel(self): q = np.arange(6.).reshape(3, 2) * u.m q_ravel = q.ravel() assert isinstance(q_ravel, u.Quantity) assert q_ravel.unit == q.unit assert np.all(q_ravel.value == q.value.ravel()) def test_flatten(self): q = np.arange(6.).reshape(3, 2) * u.m q_flatten = q.flatten() assert isinstance(q_flatten, u.Quantity) assert q_flatten.unit == q.unit assert np.all(q_flatten.value == q.value.flatten()) def test_transpose(self): q = np.arange(6.).reshape(3, 2) * u.m q_transpose = q.transpose() assert isinstance(q_transpose, u.Quantity) assert q_transpose.unit == q.unit assert np.all(q_transpose.value == q.value.transpose()) def test_swapaxes(self): q = np.arange(6.).reshape(3, 1, 2) * u.m q_swapaxes = q.swapaxes(0, 2) assert isinstance(q_swapaxes, u.Quantity) assert q_swapaxes.unit == q.unit assert np.all(q_swapaxes.value == q.value.swapaxes(0, 2)) class TestQuantityStatsFuncs: """ Test statistical functions """ def test_mean(self): q1 = np.array([1., 2., 4., 5., 6.]) * u.m assert np.mean(q1) == 3.6 * u.m def test_mean_inplace(self): q1 = np.array([1., 2., 4., 5., 6.]) * u.m qi = 1.5 * u.s qi2 = np.mean(q1, out=qi) assert qi2 is qi assert qi == 3.6 * u.m def test_std(self): q1 = np.array([1., 2.]) * u.m assert np.std(q1) == 0.5 * u.m def test_std_inplace(self): q1 = np.array([1., 2.]) * u.m qi = 1.5 * u.s np.std(q1, out=qi) assert qi == 0.5 * u.m def test_var(self): q1 = np.array([1., 2.]) * u.m assert np.var(q1) == 0.25 * u.m ** 2 def test_var_inplace(self): q1 = np.array([1., 2.]) * u.m qi = 1.5 * u.s np.var(q1, out=qi) assert qi == 0.25 * u.m ** 2 def test_median(self): q1 = np.array([1., 2., 4., 5., 6.]) * u.m assert np.median(q1) == 4. * u.m def test_median_inplace(self): q1 = np.array([1., 2., 4., 5., 6.]) * u.m qi = 1.5 * u.s np.median(q1, out=qi) assert qi == 4 * u.m def test_min(self): q1 = np.array([1., 2., 4., 5., 6.]) * u.m assert np.min(q1) == 1. * u.m def test_min_inplace(self): q1 = np.array([1., 2., 4., 5., 6.]) * u.m qi = 1.5 * u.s np.min(q1, out=qi) assert qi == 1. * u.m def test_argmin(self): q1 = np.array([6., 2., 4., 5., 6.]) * u.m assert np.argmin(q1) == 1 def test_max(self): q1 = np.array([1., 2., 4., 5., 6.]) * u.m assert np.max(q1) == 6. * u.m def test_max_inplace(self): q1 = np.array([1., 2., 4., 5., 6.]) * u.m qi = 1.5 * u.s np.max(q1, out=qi) assert qi == 6. * u.m def test_argmax(self): q1 = np.array([5., 2., 4., 5., 6.]) * u.m assert np.argmax(q1) == 4 def test_clip(self): q1 = np.array([1., 2., 4., 5., 6.]) * u.km / u.m c1 = q1.clip(1500, 5.5 * u.Mm / u.km) assert np.all(c1 == np.array([1.5, 2., 4., 5., 5.5]) * u.km / u.m) def test_clip_inplace(self): q1 = np.array([1., 2., 4., 5., 6.]) * u.km / u.m c1 = q1.clip(1500, 5.5 * u.Mm / u.km, out=q1) assert np.all(q1 == np.array([1.5, 2., 4., 5., 5.5]) * u.km / u.m) c1[0] = 10 * u.Mm/u.mm assert np.all(c1.value == q1.value) def test_conj(self): q1 = np.array([1., 2., 4., 5., 6.]) * u.km / u.m assert np.all(q1.conj() == q1) def test_ptp(self): q1 = np.array([1., 2., 4., 5., 6.]) * u.m assert np.ptp(q1) == 5. * u.m def test_ptp_inplace(self): q1 = np.array([1., 2., 4., 5., 6.]) * u.m qi = 1.5 * u.s np.ptp(q1, out=qi) assert qi == 5. * u.m def test_round(self): q1 = np.array([1.253, 2.253, 3.253]) * u.kg assert np.all(np.round(q1) == np.array([1, 2, 3]) * u.kg) assert np.all(np.round(q1, decimals=2) == np.round(q1.value, decimals=2) * u.kg) assert np.all(q1.round(decimals=2) == q1.value.round(decimals=2) * u.kg) def test_round_inplace(self): q1 = np.array([1.253, 2.253, 3.253]) * u.kg qi = np.zeros(3) * u.s a = q1.round(decimals=2, out=qi) assert a is qi assert np.all(q1.round(decimals=2) == qi) def test_sum(self): q1 = np.array([1., 2., 6.]) * u.m assert np.all(q1.sum() == 9. * u.m) assert np.all(np.sum(q1) == 9. * u.m) q2 = np.array([[4., 5., 9.], [1., 1., 1.]]) * u.s assert np.all(q2.sum(0) == np.array([5., 6., 10.]) * u.s) assert np.all(np.sum(q2, 0) == np.array([5., 6., 10.]) * u.s) def test_sum_inplace(self): q1 = np.array([1., 2., 6.]) * u.m qi = 1.5 * u.s np.sum(q1, out=qi) assert qi == 9. * u.m def test_cumsum(self): q1 = np.array([1, 2, 6]) * u.m assert np.all(q1.cumsum() == np.array([1, 3, 9]) * u.m) assert np.all(np.cumsum(q1) == np.array([1, 3, 9]) * u.m) q2 = np.array([4, 5, 9]) * u.s assert np.all(q2.cumsum() == np.array([4, 9, 18]) * u.s) assert np.all(np.cumsum(q2) == np.array([4, 9, 18]) * u.s) def test_cumsum_inplace(self): q1 = np.array([1, 2, 6]) * u.m qi = np.ones(3) * u.s np.cumsum(q1, out=qi) assert np.all(qi == np.array([1, 3, 9]) * u.m) q2 = q1 q1.cumsum(out=q1) assert np.all(q2 == qi) def test_nansum(self): q1 = np.array([1., 2., np.nan]) * u.m assert np.all(q1.nansum() == 3. * u.m) assert np.all(np.nansum(q1) == 3. * u.m) q2 = np.array([[np.nan, 5., 9.], [1., np.nan, 1.]]) * u.s assert np.all(q2.nansum(0) == np.array([1., 5., 10.]) * u.s) assert np.all(np.nansum(q2, 0) == np.array([1., 5., 10.]) * u.s) def test_nansum_inplace(self): q1 = np.array([1., 2., np.nan]) * u.m qi = 1.5 * u.s qout = q1.nansum(out=qi) assert qout is qi assert qi == np.nansum(q1.value) * q1.unit qi2 = 1.5 * u.s qout2 = np.nansum(q1, out=qi2) assert qout2 is qi2 assert qi2 == np.nansum(q1.value) * q1.unit def test_prod(self): q1 = np.array([1, 2, 6]) * u.m with pytest.raises(u.UnitsError) as exc: q1.prod() with pytest.raises(u.UnitsError) as exc: np.prod(q1) q2 = np.array([3., 4., 5.]) * u.Unit(1) assert q2.prod() == 60. * u.Unit(1) assert np.prod(q2) == 60. * u.Unit(1) def test_cumprod(self): q1 = np.array([1, 2, 6]) * u.m with pytest.raises(u.UnitsError) as exc: q1.cumprod() with pytest.raises(u.UnitsError) as exc: np.cumprod(q1) q2 = np.array([3, 4, 5]) * u.Unit(1) assert np.all(q2.cumprod() == np.array([3, 12, 60]) * u.Unit(1)) assert np.all(np.cumprod(q2) == np.array([3, 12, 60]) * u.Unit(1)) def test_diff(self): q1 = np.array([1., 2., 4., 10.]) * u.m assert np.all(q1.diff() == np.array([1., 2., 6.]) * u.m) assert np.all(np.diff(q1) == np.array([1., 2., 6.]) * u.m) def test_ediff1d(self): q1 = np.array([1., 2., 4., 10.]) * u.m assert np.all(q1.ediff1d() == np.array([1., 2., 6.]) * u.m) assert np.all(np.ediff1d(q1) == np.array([1., 2., 6.]) * u.m) @pytest.mark.xfail def test_dot_func(self): q1 = np.array([1., 2., 4., 10.]) * u.m q2 = np.array([3., 4., 5., 6.]) * u.s q3 = np.dot(q1, q2) assert q3.value == np.dot(q1.value, q2.value) assert q3.unit == u.m * u.s def test_dot_meth(self): q1 = np.array([1., 2., 4., 10.]) * u.m q2 = np.array([3., 4., 5., 6.]) * u.s q3 = q1.dot(q2) assert q3.value == np.dot(q1.value, q2.value) assert q3.unit == u.m * u.s def test_trace_func(self): q = np.array([[1., 2.], [3., 4.]]) * u.m assert np.trace(q) == 5. * u.m def test_trace_meth(self): q1 = np.array([[1., 2.], [3., 4.]]) * u.m assert q1.trace() == 5. * u.m cont = u.Quantity(4., u.s) q2 = np.array([[3., 4.], [5., 6.]]) * u.m q2.trace(out=cont) assert cont == 9. * u.m def test_clip_func(self): q = np.arange(10) * u.m assert np.all(np.clip(q, 3 * u.m, 6 * u.m) == np.array([3., 3., 3., 3., 4., 5., 6., 6., 6., 6.]) * u.m) def test_clip_meth(self): expected = np.array([3., 3., 3., 3., 4., 5., 6., 6., 6., 6.]) * u.m q1 = np.arange(10) * u.m q3 = q1.clip(3 * u.m, 6 * u.m) assert np.all(q1.clip(3 * u.m, 6 * u.m) == expected) cont = np.zeros(10) * u.s q1.clip(3 * u.m, 6 * u.m, out=cont) assert np.all(cont == expected) class TestArrayConversion: """ Test array conversion methods """ def test_item(self): q1 = u.Quantity(np.array([1, 2, 3]), u.m / u.km, dtype=int) assert q1.item(1) == 2 * q1.unit q1.itemset(1, 1) assert q1.item(1) == 1000 * u.m / u.km q1.itemset(1, 100 * u.cm / u.km) assert q1.item(1) == 1 * u.m / u.km with pytest.raises(TypeError): q1.itemset(1, 1.5 * u.m / u.km) with pytest.raises(ValueError): q1.itemset() q1[1] = 1 assert q1[1] == 1000 * u.m / u.km q1[1] = 100 * u.cm / u.km assert q1[1] == 1 * u.m / u.km with pytest.raises(TypeError): q1[1] = 1.5 * u.m / u.km def test_take_put(self): q1 = np.array([1, 2, 3]) * u.m / u.km assert q1.take(1) == 2 * u.m / u.km assert all(q1.take((0, 2)) == np.array([1, 3]) * u.m / u.km) q1.put((1, 2), (3, 4)) assert np.all(q1.take((1, 2)) == np.array([3000, 4000]) * q1.unit) q1.put(0, 500 * u.cm / u.km) assert q1.item(0) == 5 * u.m / u.km def test_slice(self): """Test that setitem changes the unit if needed (or ignores it for values where that is allowed; viz., #2695)""" q2 = np.array([[1., 2., 3.], [4., 5., 6.]]) * u.km / u.m q1 = q2.copy() q2[0, 0] = 10000. assert q2.unit == q1.unit assert q2[0, 0].value == 10. q2[0] = 9. * u.Mm / u.km assert all(q2.flatten()[:3].value == np.array([9., 9., 9.])) q2[0, :-1] = 8000. assert all(q2.flatten()[:3].value == np.array([8., 8., 9.])) with pytest.raises(u.UnitsError): q2[1, 1] = 10 * u.s # just to be sure, repeat with a dimensionfull unit q3 = u.Quantity(np.arange(10.), "m/s") q3[5] = 100. * u.cm / u.s assert q3[5].value == 1. # and check unit is ignored for 0, inf, nan, where that is reasonable q3[5] = 0. assert q3[5] == 0. q3[5] = np.inf assert np.isinf(q3[5]) q3[5] = np.nan assert np.isnan(q3[5]) def test_fill(self): q1 = np.array([1, 2, 3]) * u.m / u.km q1.fill(2) assert np.all(q1 == 2000 * u.m / u.km) def test_repeat_compress_diagonal(self): q1 = np.array([1, 2, 3]) * u.m / u.km q2 = q1.repeat(2) assert q2.unit == q1.unit assert all(q2.value == q1.value.repeat(2)) q2.sort() assert q2.unit == q1.unit q2 = q1.compress(np.array([True, True, False, False])) assert q2.unit == q1.unit assert all(q2.value == q1.value.compress(np.array([True, True, False, False]))) q1 = np.array([[1, 2], [3, 4]]) * u.m / u.km q2 = q1.diagonal() assert q2.unit == q1.unit assert all(q2.value == q1.value.diagonal()) def test_view(self): q1 = np.array([1, 2, 3], dtype=np.int64) * u.m / u.km q2 = q1.view(np.ndarray) assert not hasattr(q2, 'unit') q3 = q2.view(u.Quantity) assert q3._unit is None # MaskedArray copies and properties assigned in __dict__ q4 = np.ma.MaskedArray(q1) assert q4._unit is q1._unit q5 = q4.view(u.Quantity) assert q5.unit is q1.unit def test_slice_to_quantity(self): """ Regression test for https://github.com/astropy/astropy/issues/2003 """ a = np.random.uniform(size=(10, 8)) x, y, z = a[:, 1:4].T * u.km/u.s total = np.sum(a[:, 1] * u.km / u.s - x) assert isinstance(total, u.Quantity) assert total == (0.0 * u.km / u.s) def test_byte_type_view_field_changes(self): q1 = np.array([1, 2, 3], dtype=np.int64) * u.m / u.km q2 = q1.byteswap() assert q2.unit == q1.unit assert all(q2.value == q1.value.byteswap()) q2 = q1.astype(np.float64) assert all(q2 == q1) assert q2.dtype == np.float64 q2a = q1.getfield(np.int32, offset=0) q2b = q1.byteswap().getfield(np.int32, offset=4) assert q2a.unit == q1.unit assert all(q2b.byteswap() == q2a) def test_sort(self): q1 = np.array([1., 5., 2., 4.]) * u.km / u.m i = q1.argsort() assert not hasattr(i, 'unit') q1.sort() i = q1.searchsorted([1500, 2500]) assert not hasattr(i, 'unit') assert all(i == q1.to( u.dimensionless_unscaled).value.searchsorted([1500, 2500])) def test_not_implemented(self): q1 = np.array([1, 2, 3]) * u.m / u.km with pytest.raises(NotImplementedError): q1.choose([0, 0, 1]) with pytest.raises(NotImplementedError): q1.tolist() with pytest.raises(NotImplementedError): q1.tostring() with pytest.raises(NotImplementedError): q1.tofile(0) with pytest.raises(NotImplementedError): q1.dump('a.a') with pytest.raises(NotImplementedError): q1.dumps() class TestRecArray: """Record arrays are not specifically supported, but we should not prevent their use unnecessarily""" def setup(self): self.ra = (np.array(np.arange(12.).reshape(4, 3)) .view(dtype=('f8,f8,f8')).squeeze()) def test_creation(self): qra = u.Quantity(self.ra, u.m) assert np.all(qra[:2].value == self.ra[:2]) def test_equality(self): qra = u.Quantity(self.ra, u.m) qra[1] = qra[2] assert qra[1] == qra[2]
344ddaee761ef9579cc1cd14d7d22070820bb2fddbce82113e29ee0a0b022025
# coding: utf-8 # Licensed under a 3-clause BSD style license - see LICENSE.rst """ Test the Logarithmic Units and Quantities """ import pickle import itertools import pytest import numpy as np from numpy.testing import assert_allclose from astropy.tests.helper import assert_quantity_allclose from astropy import units as u, constants as c lu_units = [u.dex, u.mag, u.decibel] lu_subclasses = [u.DexUnit, u.MagUnit, u.DecibelUnit] lq_subclasses = [u.Dex, u.Magnitude, u.Decibel] pu_sample = (u.dimensionless_unscaled, u.m, u.g/u.s**2, u.Jy) class TestLogUnitCreation: def test_logarithmic_units(self): """Check logarithmic units are set up correctly.""" assert u.dB.to(u.dex) == 0.1 assert u.dex.to(u.mag) == -2.5 assert u.mag.to(u.dB) == -4 @pytest.mark.parametrize('lu_unit, lu_cls', zip(lu_units, lu_subclasses)) def test_callable_units(self, lu_unit, lu_cls): assert isinstance(lu_unit, u.UnitBase) assert callable(lu_unit) assert lu_unit._function_unit_class is lu_cls @pytest.mark.parametrize('lu_unit', lu_units) def test_equality_to_normal_unit_for_dimensionless(self, lu_unit): lu = lu_unit() assert lu == lu._default_function_unit # eg, MagUnit() == u.mag assert lu._default_function_unit == lu # and u.mag == MagUnit() @pytest.mark.parametrize('lu_unit, physical_unit', itertools.product(lu_units, pu_sample)) def test_call_units(self, lu_unit, physical_unit): """Create a LogUnit subclass using the callable unit and physical unit, and do basic check that output is right.""" lu1 = lu_unit(physical_unit) assert lu1.physical_unit == physical_unit assert lu1.function_unit == lu1._default_function_unit def test_call_invalid_unit(self): with pytest.raises(TypeError): u.mag([]) with pytest.raises(ValueError): u.mag(u.mag()) @pytest.mark.parametrize('lu_cls, physical_unit', itertools.product( lu_subclasses + [u.LogUnit], pu_sample)) def test_subclass_creation(self, lu_cls, physical_unit): """Create a LogUnit subclass object for given physical unit, and do basic check that output is right.""" lu1 = lu_cls(physical_unit) assert lu1.physical_unit == physical_unit assert lu1.function_unit == lu1._default_function_unit lu2 = lu_cls(physical_unit, function_unit=2*lu1._default_function_unit) assert lu2.physical_unit == physical_unit assert lu2.function_unit == u.Unit(2*lu2._default_function_unit) with pytest.raises(ValueError): lu_cls(physical_unit, u.m) def test_lshift_magnitude(self): mag = 1. << u.ABmag assert isinstance(mag, u.Magnitude) assert mag.unit == u.ABmag assert mag.value == 1. # same test for an array, which should produce a view a2 = np.arange(10.) q2 = a2 << u.ABmag assert isinstance(q2, u.Magnitude) assert q2.unit == u.ABmag assert np.all(q2.value == a2) a2[9] = 0. assert np.all(q2.value == a2) # a different magnitude unit mag = 10. << u.STmag assert isinstance(mag, u.Magnitude) assert mag.unit == u.STmag assert mag.value == 10. def test_ilshift_magnitude(self): # test in-place operation and conversion mag_fnu_cgs = u.mag(u.erg/u.s/u.cm**2/u.Hz) m = np.arange(10.0) * u.mag(u.Jy) jy = m.physical m2 = m << mag_fnu_cgs assert np.all(m2 == m.to(mag_fnu_cgs)) m2 = m m <<= mag_fnu_cgs assert m is m2 # Check it was done in-place! assert np.all(m.value == m2.value) assert m.unit == mag_fnu_cgs # Check it works if equivalencies are in-place. with u.add_enabled_equivalencies(u.spectral_density(5500*u.AA)): st = jy.to(u.ST) m <<= u.STmag assert m is m2 assert_quantity_allclose(m.physical, st) assert m.unit == u.STmag def test_lshift_errors(self): m = np.arange(10.0) * u.mag(u.Jy) with pytest.raises(u.UnitsError): m << u.STmag with pytest.raises(u.UnitsError): m << u.Jy with pytest.raises(u.UnitsError): m <<= u.STmag with pytest.raises(u.UnitsError): m <<= u.Jy def test_predefined_magnitudes(): assert_quantity_allclose((-21.1*u.STmag).physical, 1.*u.erg/u.cm**2/u.s/u.AA) assert_quantity_allclose((-48.6*u.ABmag).physical, 1.*u.erg/u.cm**2/u.s/u.Hz) assert_quantity_allclose((0*u.M_bol).physical, c.L_bol0) assert_quantity_allclose((0*u.m_bol).physical, c.L_bol0/(4.*np.pi*(10.*c.pc)**2)) def test_predefined_reinitialisation(): assert u.mag('STflux') == u.STmag assert u.mag('ABflux') == u.ABmag assert u.mag('Bol') == u.M_bol assert u.mag('bol') == u.m_bol # required for backwards-compatibility, at least unless deprecated assert u.mag('ST') == u.STmag assert u.mag('AB') == u.ABmag def test_predefined_string_roundtrip(): """Ensure round-tripping; see #5015""" with u.magnitude_zero_points.enable(): assert u.Unit(u.STmag.to_string()) == u.STmag assert u.Unit(u.ABmag.to_string()) == u.ABmag assert u.Unit(u.M_bol.to_string()) == u.M_bol assert u.Unit(u.m_bol.to_string()) == u.m_bol def test_inequality(): """Check __ne__ works (regresssion for #5342).""" lu1 = u.mag(u.Jy) lu2 = u.dex(u.Jy) lu3 = u.mag(u.Jy**2) lu4 = lu3 - lu1 assert lu1 != lu2 assert lu1 != lu3 assert lu1 == lu4 class TestLogUnitStrings: def test_str(self): """Do some spot checks that str, repr, etc. work as expected.""" lu1 = u.mag(u.Jy) assert str(lu1) == 'mag(Jy)' assert repr(lu1) == 'Unit("mag(Jy)")' assert lu1.to_string('generic') == 'mag(Jy)' with pytest.raises(ValueError): lu1.to_string('fits') lu2 = u.dex() assert str(lu2) == 'dex' assert repr(lu2) == 'Unit("dex(1)")' assert lu2.to_string() == 'dex(1)' lu3 = u.MagUnit(u.Jy, function_unit=2*u.mag) assert str(lu3) == '2 mag(Jy)' assert repr(lu3) == 'MagUnit("Jy", unit="2 mag")' assert lu3.to_string() == '2 mag(Jy)' lu4 = u.mag(u.ct) assert lu4.to_string('generic') == 'mag(ct)' assert lu4.to_string('latex') == ('$\\mathrm{mag}$$\\mathrm{\\left( ' '\\mathrm{ct} \\right)}$') assert lu4._repr_latex_() == lu4.to_string('latex') class TestLogUnitConversion: @pytest.mark.parametrize('lu_unit, physical_unit', itertools.product(lu_units, pu_sample)) def test_physical_unit_conversion(self, lu_unit, physical_unit): """Check various LogUnit subclasses are equivalent and convertible to their non-log counterparts.""" lu1 = lu_unit(physical_unit) assert lu1.is_equivalent(physical_unit) assert lu1.to(physical_unit, 0.) == 1. assert physical_unit.is_equivalent(lu1) assert physical_unit.to(lu1, 1.) == 0. pu = u.Unit(8.*physical_unit) assert lu1.is_equivalent(physical_unit) assert lu1.to(pu, 0.) == 0.125 assert pu.is_equivalent(lu1) assert_allclose(pu.to(lu1, 0.125), 0., atol=1.e-15) # Check we round-trip. value = np.linspace(0., 10., 6) assert_allclose(pu.to(lu1, lu1.to(pu, value)), value, atol=1.e-15) # And that we're not just returning True all the time. pu2 = u.g assert not lu1.is_equivalent(pu2) with pytest.raises(u.UnitsError): lu1.to(pu2) assert not pu2.is_equivalent(lu1) with pytest.raises(u.UnitsError): pu2.to(lu1) @pytest.mark.parametrize('lu_unit', lu_units) def test_container_unit_conversion(self, lu_unit): """Check that conversion to logarithmic units (u.mag, u.dB, u.dex) is only possible when the physical unit is dimensionless.""" values = np.linspace(0., 10., 6) lu1 = lu_unit(u.dimensionless_unscaled) assert lu1.is_equivalent(lu1.function_unit) assert_allclose(lu1.to(lu1.function_unit, values), values) lu2 = lu_unit(u.Jy) assert not lu2.is_equivalent(lu2.function_unit) with pytest.raises(u.UnitsError): lu2.to(lu2.function_unit, values) @pytest.mark.parametrize( 'flu_unit, tlu_unit, physical_unit', itertools.product(lu_units, lu_units, pu_sample)) def test_subclass_conversion(self, flu_unit, tlu_unit, physical_unit): """Check various LogUnit subclasses are equivalent and convertible to each other if they correspond to equivalent physical units.""" values = np.linspace(0., 10., 6) flu = flu_unit(physical_unit) tlu = tlu_unit(physical_unit) assert flu.is_equivalent(tlu) assert_allclose(flu.to(tlu), flu.function_unit.to(tlu.function_unit)) assert_allclose(flu.to(tlu, values), values * flu.function_unit.to(tlu.function_unit)) tlu2 = tlu_unit(u.Unit(100.*physical_unit)) assert flu.is_equivalent(tlu2) # Check that we round-trip. assert_allclose(flu.to(tlu2, tlu2.to(flu, values)), values, atol=1.e-15) tlu3 = tlu_unit(physical_unit.to_system(u.si)[0]) assert flu.is_equivalent(tlu3) assert_allclose(flu.to(tlu3, tlu3.to(flu, values)), values, atol=1.e-15) tlu4 = tlu_unit(u.g) assert not flu.is_equivalent(tlu4) with pytest.raises(u.UnitsError): flu.to(tlu4, values) def test_unit_decomposition(self): lu = u.mag(u.Jy) assert lu.decompose() == u.mag(u.Jy.decompose()) assert lu.decompose().physical_unit.bases == [u.kg, u.s] assert lu.si == u.mag(u.Jy.si) assert lu.si.physical_unit.bases == [u.kg, u.s] assert lu.cgs == u.mag(u.Jy.cgs) assert lu.cgs.physical_unit.bases == [u.g, u.s] def test_unit_multiple_possible_equivalencies(self): lu = u.mag(u.Jy) assert lu.is_equivalent(pu_sample) def test_magnitude_conversion_fails_message(self): """Check that "dimensionless" magnitude units include a message in their exception text suggesting a possible cause of the problem. """ with pytest.raises(u.UnitConversionError) as excinfo: (10*u.ABmag - 2*u.ABmag).to(u.nJy) assert "Did you perhaps subtract magnitudes so the unit got lost?" in str(excinfo.value) class TestLogUnitArithmetic: def test_multiplication_division(self): """Check that multiplication/division with other units is only possible when the physical unit is dimensionless, and that this turns the unit into a normal one.""" lu1 = u.mag(u.Jy) with pytest.raises(u.UnitsError): lu1 * u.m with pytest.raises(u.UnitsError): u.m * lu1 with pytest.raises(u.UnitsError): lu1 / lu1 for unit in (u.dimensionless_unscaled, u.m, u.mag, u.dex): with pytest.raises(u.UnitsError): lu1 / unit lu2 = u.mag(u.dimensionless_unscaled) with pytest.raises(u.UnitsError): lu2 * lu1 with pytest.raises(u.UnitsError): lu2 / lu1 # But dimensionless_unscaled can be cancelled. assert lu2 / lu2 == u.dimensionless_unscaled # With dimensionless, normal units are OK, but we return a plain unit. tf = lu2 * u.m tr = u.m * lu2 for t in (tf, tr): assert not isinstance(t, type(lu2)) assert t == lu2.function_unit * u.m with u.set_enabled_equivalencies(u.logarithmic()): with pytest.raises(u.UnitsError): t.to(lu2.physical_unit) # Now we essentially have a LogUnit with a prefactor of 100, # so should be equivalent again. t = tf / u.cm with u.set_enabled_equivalencies(u.logarithmic()): assert t.is_equivalent(lu2.function_unit) assert_allclose(t.to(u.dimensionless_unscaled, np.arange(3.)/100.), lu2.to(lu2.physical_unit, np.arange(3.))) # If we effectively remove lu1, a normal unit should be returned. t2 = tf / lu2 assert not isinstance(t2, type(lu2)) assert t2 == u.m t3 = tf / lu2.function_unit assert not isinstance(t3, type(lu2)) assert t3 == u.m # For completeness, also ensure non-sensical operations fail with pytest.raises(TypeError): lu1 * object() with pytest.raises(TypeError): slice(None) * lu1 with pytest.raises(TypeError): lu1 / [] with pytest.raises(TypeError): 1 / lu1 @pytest.mark.parametrize('power', (2, 0.5, 1, 0)) def test_raise_to_power(self, power): """Check that raising LogUnits to some power is only possible when the physical unit is dimensionless, and that conversion is turned off when the resulting logarithmic unit (such as mag**2) is incompatible.""" lu1 = u.mag(u.Jy) if power == 0: assert lu1 ** power == u.dimensionless_unscaled elif power == 1: assert lu1 ** power == lu1 else: with pytest.raises(u.UnitsError): lu1 ** power # With dimensionless, though, it works, but returns a normal unit. lu2 = u.mag(u.dimensionless_unscaled) t = lu2**power if power == 0: assert t == u.dimensionless_unscaled elif power == 1: assert t == lu2 else: assert not isinstance(t, type(lu2)) assert t == lu2.function_unit**power # also check we roundtrip t2 = t**(1./power) assert t2 == lu2.function_unit with u.set_enabled_equivalencies(u.logarithmic()): assert_allclose(t2.to(u.dimensionless_unscaled, np.arange(3.)), lu2.to(lu2.physical_unit, np.arange(3.))) @pytest.mark.parametrize('other', pu_sample) def test_addition_subtraction_to_normal_units_fails(self, other): lu1 = u.mag(u.Jy) with pytest.raises(u.UnitsError): lu1 + other with pytest.raises(u.UnitsError): lu1 - other with pytest.raises(u.UnitsError): other - lu1 def test_addition_subtraction_to_non_units_fails(self): lu1 = u.mag(u.Jy) with pytest.raises(TypeError): lu1 + 1. with pytest.raises(TypeError): lu1 - [1., 2., 3.] @pytest.mark.parametrize( 'other', (u.mag, u.mag(), u.mag(u.Jy), u.mag(u.m), u.Unit(2*u.mag), u.MagUnit('', 2.*u.mag))) def test_addition_subtraction(self, other): """Check physical units are changed appropriately""" lu1 = u.mag(u.Jy) other_pu = getattr(other, 'physical_unit', u.dimensionless_unscaled) lu_sf = lu1 + other assert lu_sf.is_equivalent(lu1.physical_unit * other_pu) lu_sr = other + lu1 assert lu_sr.is_equivalent(lu1.physical_unit * other_pu) lu_df = lu1 - other assert lu_df.is_equivalent(lu1.physical_unit / other_pu) lu_dr = other - lu1 assert lu_dr.is_equivalent(other_pu / lu1.physical_unit) def test_complicated_addition_subtraction(self): """for fun, a more complicated example of addition and subtraction""" dm0 = u.Unit('DM', 1./(4.*np.pi*(10.*u.pc)**2)) lu_dm = u.mag(dm0) lu_absST = u.STmag - lu_dm assert lu_absST.is_equivalent(u.erg/u.s/u.AA) def test_neg_pos(self): lu1 = u.mag(u.Jy) neg_lu = -lu1 assert neg_lu != lu1 assert neg_lu.physical_unit == u.Jy**-1 assert -neg_lu == lu1 pos_lu = +lu1 assert pos_lu is not lu1 assert pos_lu == lu1 def test_pickle(): lu1 = u.dex(u.cm/u.s**2) s = pickle.dumps(lu1) lu2 = pickle.loads(s) assert lu1 == lu2 def test_hashable(): lu1 = u.dB(u.mW) lu2 = u.dB(u.m) lu3 = u.dB(u.mW) assert hash(lu1) != hash(lu2) assert hash(lu1) == hash(lu3) luset = {lu1, lu2, lu3} assert len(luset) == 2 class TestLogQuantityCreation: @pytest.mark.parametrize('lq, lu', zip(lq_subclasses + [u.LogQuantity], lu_subclasses + [u.LogUnit])) def test_logarithmic_quantities(self, lq, lu): """Check logarithmic quantities are all set up correctly""" assert lq._unit_class == lu assert type(lu()._quantity_class(1.)) is lq @pytest.mark.parametrize('lq_cls, physical_unit', itertools.product(lq_subclasses, pu_sample)) def test_subclass_creation(self, lq_cls, physical_unit): """Create LogQuantity subclass objects for some physical units, and basic check on transformations""" value = np.arange(1., 10.) log_q = lq_cls(value * physical_unit) assert log_q.unit.physical_unit == physical_unit assert log_q.unit.function_unit == log_q.unit._default_function_unit assert_allclose(log_q.physical.value, value) with pytest.raises(ValueError): lq_cls(value, physical_unit) @pytest.mark.parametrize( 'unit', (u.mag, u.mag(), u.mag(u.Jy), u.mag(u.m), u.Unit(2*u.mag), u.MagUnit('', 2.*u.mag), u.MagUnit(u.Jy, -1*u.mag), u.MagUnit(u.m, -2.*u.mag))) def test_different_units(self, unit): q = u.Magnitude(1.23, unit) assert q.unit.function_unit == getattr(unit, 'function_unit', unit) assert q.unit.physical_unit is getattr(unit, 'physical_unit', u.dimensionless_unscaled) @pytest.mark.parametrize('value, unit', ( (1.*u.mag(u.Jy), None), (1.*u.dex(u.Jy), None), (1.*u.mag(u.W/u.m**2/u.Hz), u.mag(u.Jy)), (1.*u.dex(u.W/u.m**2/u.Hz), u.mag(u.Jy)))) def test_function_values(self, value, unit): lq = u.Magnitude(value, unit) assert lq == value assert lq.unit.function_unit == u.mag assert lq.unit.physical_unit == getattr(unit, 'physical_unit', value.unit.physical_unit) @pytest.mark.parametrize( 'unit', (u.mag(), u.mag(u.Jy), u.mag(u.m), u.MagUnit('', 2.*u.mag), u.MagUnit(u.Jy, -1*u.mag), u.MagUnit(u.m, -2.*u.mag))) def test_indirect_creation(self, unit): q1 = 2.5 * unit assert isinstance(q1, u.Magnitude) assert q1.value == 2.5 assert q1.unit == unit pv = 100. * unit.physical_unit q2 = unit * pv assert q2.unit == unit assert q2.unit.physical_unit == pv.unit assert q2.to_value(unit.physical_unit) == 100. assert (q2._function_view / u.mag).to_value(1) == -5. q3 = unit / 0.4 assert q3 == q1 def test_from_view(self): # Cannot view a physical quantity as a function quantity, since the # values would change. q = [100., 1000.] * u.cm/u.s**2 with pytest.raises(TypeError): q.view(u.Dex) # But fine if we have the right magnitude. q = [2., 3.] * u.dex lq = q.view(u.Dex) assert isinstance(lq, u.Dex) assert lq.unit.physical_unit == u.dimensionless_unscaled assert np.all(q == lq) def test_using_quantity_class(self): """Check that we can use Quantity if we have subok=True""" # following issue #5851 lu = u.dex(u.AA) with pytest.raises(u.UnitTypeError): u.Quantity(1., lu) q = u.Quantity(1., lu, subok=True) assert type(q) is lu._quantity_class def test_conversion_to_and_from_physical_quantities(): """Ensures we can convert from regular quantities.""" mst = [10., 12., 14.] * u.STmag flux_lambda = mst.physical mst_roundtrip = flux_lambda.to(u.STmag) # check we return a logquantity; see #5178. assert isinstance(mst_roundtrip, u.Magnitude) assert mst_roundtrip.unit == mst.unit assert_allclose(mst_roundtrip.value, mst.value) wave = [4956.8, 4959.55, 4962.3] * u.AA flux_nu = mst.to(u.Jy, equivalencies=u.spectral_density(wave)) mst_roundtrip2 = flux_nu.to(u.STmag, u.spectral_density(wave)) assert isinstance(mst_roundtrip2, u.Magnitude) assert mst_roundtrip2.unit == mst.unit assert_allclose(mst_roundtrip2.value, mst.value) def test_quantity_decomposition(): lq = 10.*u.mag(u.Jy) assert lq.decompose() == lq assert lq.decompose().unit.physical_unit.bases == [u.kg, u.s] assert lq.si == lq assert lq.si.unit.physical_unit.bases == [u.kg, u.s] assert lq.cgs == lq assert lq.cgs.unit.physical_unit.bases == [u.g, u.s] class TestLogQuantityViews: def setup(self): self.lq = u.Magnitude(np.arange(10.) * u.Jy) self.lq2 = u.Magnitude(np.arange(5.)) def test_value_view(self): lq_value = self.lq.value assert type(lq_value) is np.ndarray lq_value[2] = -1. assert np.all(self.lq.value == lq_value) def test_function_view(self): lq_fv = self.lq._function_view assert type(lq_fv) is u.Quantity assert lq_fv.unit is self.lq.unit.function_unit lq_fv[3] = -2. * lq_fv.unit assert np.all(self.lq.value == lq_fv.value) def test_quantity_view(self): # Cannot view as Quantity, since the unit cannot be represented. with pytest.raises(TypeError): self.lq.view(u.Quantity) # But a dimensionless one is fine. q2 = self.lq2.view(u.Quantity) assert q2.unit is u.mag assert np.all(q2.value == self.lq2.value) lq3 = q2.view(u.Magnitude) assert type(lq3.unit) is u.MagUnit assert lq3.unit.physical_unit == u.dimensionless_unscaled assert np.all(lq3 == self.lq2) class TestLogQuantitySlicing: def test_item_get_and_set(self): lq1 = u.Magnitude(np.arange(1., 11.)*u.Jy) assert lq1[9] == u.Magnitude(10.*u.Jy) lq1[2] = 100.*u.Jy assert lq1[2] == u.Magnitude(100.*u.Jy) with pytest.raises(u.UnitsError): lq1[2] = 100.*u.m with pytest.raises(u.UnitsError): lq1[2] = 100.*u.mag with pytest.raises(u.UnitsError): lq1[2] = u.Magnitude(100.*u.m) assert lq1[2] == u.Magnitude(100.*u.Jy) def test_slice_get_and_set(self): lq1 = u.Magnitude(np.arange(1., 10.)*u.Jy) lq1[2:4] = 100.*u.Jy assert np.all(lq1[2:4] == u.Magnitude(100.*u.Jy)) with pytest.raises(u.UnitsError): lq1[2:4] = 100.*u.m with pytest.raises(u.UnitsError): lq1[2:4] = 100.*u.mag with pytest.raises(u.UnitsError): lq1[2:4] = u.Magnitude(100.*u.m) assert np.all(lq1[2] == u.Magnitude(100.*u.Jy)) class TestLogQuantityArithmetic: def test_multiplication_division(self): """Check that multiplication/division with other quantities is only possible when the physical unit is dimensionless, and that this turns the result into a normal quantity.""" lq = u.Magnitude(np.arange(1., 11.)*u.Jy) with pytest.raises(u.UnitsError): lq * (1.*u.m) with pytest.raises(u.UnitsError): (1.*u.m) * lq with pytest.raises(u.UnitsError): lq / lq for unit in (u.m, u.mag, u.dex): with pytest.raises(u.UnitsError): lq / unit lq2 = u.Magnitude(np.arange(1, 11.)) with pytest.raises(u.UnitsError): lq2 * lq with pytest.raises(u.UnitsError): lq2 / lq with pytest.raises(u.UnitsError): lq / lq2 # but dimensionless_unscaled can be cancelled r = lq2 / u.Magnitude(2.) assert r.unit == u.dimensionless_unscaled assert np.all(r.value == lq2.value/2.) # with dimensionless, normal units OK, but return normal quantities tf = lq2 * u.m tr = u.m * lq2 for t in (tf, tr): assert not isinstance(t, type(lq2)) assert t.unit == lq2.unit.function_unit * u.m with u.set_enabled_equivalencies(u.logarithmic()): with pytest.raises(u.UnitsError): t.to(lq2.unit.physical_unit) t = tf / (50.*u.cm) # now we essentially have the same quantity but with a prefactor of 2 assert t.unit.is_equivalent(lq2.unit.function_unit) assert_allclose(t.to(lq2.unit.function_unit), lq2._function_view*2) @pytest.mark.parametrize('power', (2, 0.5, 1, 0)) def test_raise_to_power(self, power): """Check that raising LogQuantities to some power is only possible when the physical unit is dimensionless, and that conversion is turned off when the resulting logarithmic unit (say, mag**2) is incompatible.""" lq = u.Magnitude(np.arange(1., 4.)*u.Jy) if power == 0: assert np.all(lq ** power == 1.) elif power == 1: assert np.all(lq ** power == lq) else: with pytest.raises(u.UnitsError): lq ** power # with dimensionless, it works, but falls back to normal quantity # (except for power=1) lq2 = u.Magnitude(np.arange(10.)) t = lq2**power if power == 0: assert t.unit is u.dimensionless_unscaled assert np.all(t.value == 1.) elif power == 1: assert np.all(t == lq2) else: assert not isinstance(t, type(lq2)) assert t.unit == lq2.unit.function_unit ** power with u.set_enabled_equivalencies(u.logarithmic()): with pytest.raises(u.UnitsError): t.to(u.dimensionless_unscaled) def test_error_on_lq_as_power(self): lq = u.Magnitude(np.arange(1., 4.)*u.Jy) with pytest.raises(TypeError): lq ** lq @pytest.mark.parametrize('other', pu_sample) def test_addition_subtraction_to_normal_units_fails(self, other): lq = u.Magnitude(np.arange(1., 10.)*u.Jy) q = 1.23 * other with pytest.raises(u.UnitsError): lq + q with pytest.raises(u.UnitsError): lq - q with pytest.raises(u.UnitsError): q - lq @pytest.mark.parametrize( 'other', (1.23 * u.mag, 2.34 * u.mag(), u.Magnitude(3.45 * u.Jy), u.Magnitude(4.56 * u.m), 5.67 * u.Unit(2*u.mag), u.Magnitude(6.78, 2.*u.mag))) def test_addition_subtraction(self, other): """Check that addition/subtraction with quantities with magnitude or MagUnit units works, and that it changes the physical units appropriately.""" lq = u.Magnitude(np.arange(1., 10.)*u.Jy) other_physical = other.to(getattr(other.unit, 'physical_unit', u.dimensionless_unscaled), equivalencies=u.logarithmic()) lq_sf = lq + other assert_allclose(lq_sf.physical, lq.physical * other_physical) lq_sr = other + lq assert_allclose(lq_sr.physical, lq.physical * other_physical) lq_df = lq - other assert_allclose(lq_df.physical, lq.physical / other_physical) lq_dr = other - lq assert_allclose(lq_dr.physical, other_physical / lq.physical) @pytest.mark.parametrize('other', pu_sample) def test_inplace_addition_subtraction_unit_checks(self, other): lu1 = u.mag(u.Jy) lq1 = u.Magnitude(np.arange(1., 10.), lu1) with pytest.raises(u.UnitsError): lq1 += other assert np.all(lq1.value == np.arange(1., 10.)) assert lq1.unit == lu1 with pytest.raises(u.UnitsError): lq1 -= other assert np.all(lq1.value == np.arange(1., 10.)) assert lq1.unit == lu1 @pytest.mark.parametrize( 'other', (1.23 * u.mag, 2.34 * u.mag(), u.Magnitude(3.45 * u.Jy), u.Magnitude(4.56 * u.m), 5.67 * u.Unit(2*u.mag), u.Magnitude(6.78, 2.*u.mag))) def test_inplace_addition_subtraction(self, other): """Check that inplace addition/subtraction with quantities with magnitude or MagUnit units works, and that it changes the physical units appropriately.""" lq = u.Magnitude(np.arange(1., 10.)*u.Jy) other_physical = other.to(getattr(other.unit, 'physical_unit', u.dimensionless_unscaled), equivalencies=u.logarithmic()) lq_sf = lq.copy() lq_sf += other assert_allclose(lq_sf.physical, lq.physical * other_physical) lq_df = lq.copy() lq_df -= other assert_allclose(lq_df.physical, lq.physical / other_physical) def test_complicated_addition_subtraction(self): """For fun, a more complicated example of addition and subtraction.""" dm0 = u.Unit('DM', 1./(4.*np.pi*(10.*u.pc)**2)) DMmag = u.mag(dm0) m_st = 10. * u.STmag dm = 5. * DMmag M_st = m_st - dm assert M_st.unit.is_equivalent(u.erg/u.s/u.AA) assert np.abs(M_st.physical / (m_st.physical*4.*np.pi*(100.*u.pc)**2) - 1.) < 1.e-15 class TestLogQuantityComparisons: def test_comparison_to_non_quantities_fails(self): lq = u.Magnitude(np.arange(1., 10.)*u.Jy) with pytest.raises(TypeError): lq > 'a' assert not (lq == 'a') assert lq != 'a' def test_comparison(self): lq1 = u.Magnitude(np.arange(1., 4.)*u.Jy) lq2 = u.Magnitude(2.*u.Jy) assert np.all((lq1 > lq2) == np.array([True, False, False])) assert np.all((lq1 == lq2) == np.array([False, True, False])) lq3 = u.Dex(2.*u.Jy) assert np.all((lq1 > lq3) == np.array([True, False, False])) assert np.all((lq1 == lq3) == np.array([False, True, False])) lq4 = u.Magnitude(2.*u.m) assert not (lq1 == lq4) assert lq1 != lq4 with pytest.raises(u.UnitsError): lq1 < lq4 q5 = 1.5 * u.Jy assert np.all((lq1 > q5) == np.array([True, False, False])) assert np.all((q5 < lq1) == np.array([True, False, False])) with pytest.raises(u.UnitsError): lq1 >= 2.*u.m with pytest.raises(u.UnitsError): lq1 <= lq1.value * u.mag # For physically dimensionless, we can compare with the function unit. lq6 = u.Magnitude(np.arange(1., 4.)) fv6 = lq6.value * u.mag assert np.all(lq6 == fv6) # but not some arbitrary unit, of course. with pytest.raises(u.UnitsError): lq6 < 2.*u.m class TestLogQuantityMethods: def setup(self): self.mJy = np.arange(1., 5.).reshape(2, 2) * u.mag(u.Jy) self.m1 = np.arange(1., 5.5, 0.5).reshape(3, 3) * u.mag() self.mags = (self.mJy, self.m1) @pytest.mark.parametrize('method', ('mean', 'min', 'max', 'round', 'trace', 'std', 'var', 'ptp', 'diff', 'ediff1d')) def test_always_ok(self, method): for mag in self.mags: res = getattr(mag, method)() assert np.all(res.value == getattr(mag._function_view, method)().value) if method in ('std', 'ptp', 'diff', 'ediff1d'): assert res.unit == u.mag() elif method == 'var': assert res.unit == u.mag**2 else: assert res.unit == mag.unit def test_clip(self): for mag in self.mags: assert np.all(mag.clip(2. * mag.unit, 4. * mag.unit).value == mag.value.clip(2., 4.)) @pytest.mark.parametrize('method', ('sum', 'cumsum', 'nansum')) def test_only_ok_if_dimensionless(self, method): res = getattr(self.m1, method)() assert np.all(res.value == getattr(self.m1._function_view, method)().value) assert res.unit == self.m1.unit with pytest.raises(TypeError): getattr(self.mJy, method)() def test_dot(self): assert np.all(self.m1.dot(self.m1).value == self.m1.value.dot(self.m1.value)) @pytest.mark.parametrize('method', ('prod', 'cumprod')) def test_never_ok(self, method): with pytest.raises(TypeError): getattr(self.mJy, method)() with pytest.raises(TypeError): getattr(self.m1, method)()
d7db36018d388aa14ecb5447678c59eaf785f419576627d1c2ee262958bf3eb0
# coding: utf-8 # Licensed under a 3-clause BSD style license - see LICENSE.rst """ Test the Quantity class and related. """ import copy import pickle import decimal from fractions import Fraction import pytest import numpy as np from numpy.testing import (assert_allclose, assert_array_equal, assert_array_almost_equal) from astropy.tests.helper import catch_warnings, raises from astropy.utils import isiterable, minversion from astropy.utils.compat import NUMPY_LT_1_14 from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyWarning from astropy import units as u from astropy.units.quantity import _UNIT_NOT_INITIALISED try: import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt from distutils.version import LooseVersion MATPLOTLIB_LT_15 = LooseVersion(matplotlib.__version__) < LooseVersion("1.5") HAS_MATPLOTLIB = True except ImportError: HAS_MATPLOTLIB = False """ The Quantity class will represent a number + unit + uncertainty """ class TestQuantityCreation: def test_1(self): # create objects through operations with Unit objects: quantity = 11.42 * u.meter # returns a Quantity object assert isinstance(quantity, u.Quantity) quantity = u.meter * 11.42 # returns a Quantity object assert isinstance(quantity, u.Quantity) quantity = 11.42 / u.meter assert isinstance(quantity, u.Quantity) quantity = u.meter / 11.42 assert isinstance(quantity, u.Quantity) quantity = 11.42 * u.meter / u.second assert isinstance(quantity, u.Quantity) with pytest.raises(TypeError): quantity = 182.234 + u.meter with pytest.raises(TypeError): quantity = 182.234 - u.meter with pytest.raises(TypeError): quantity = 182.234 % u.meter def test_2(self): # create objects using the Quantity constructor: q1 = u.Quantity(11.412, unit=u.meter) q2 = u.Quantity(21.52, "cm") q3 = u.Quantity(11.412) # By default quantities that don't specify a unit are unscaled # dimensionless assert q3.unit == u.Unit(1) with pytest.raises(TypeError): q4 = u.Quantity(object(), unit=u.m) def test_3(self): # with pytest.raises(u.UnitsError): with pytest.raises(ValueError): # Until @mdboom fixes the errors in units q1 = u.Quantity(11.412, unit="testingggg") def test_nan_inf(self): # Not-a-number q = u.Quantity('nan', unit='cm') assert np.isnan(q.value) q = u.Quantity('NaN', unit='cm') assert np.isnan(q.value) q = u.Quantity('-nan', unit='cm') # float() allows this assert np.isnan(q.value) q = u.Quantity('nan cm') assert np.isnan(q.value) assert q.unit == u.cm # Infinity q = u.Quantity('inf', unit='cm') assert np.isinf(q.value) q = u.Quantity('-inf', unit='cm') assert np.isinf(q.value) q = u.Quantity('inf cm') assert np.isinf(q.value) assert q.unit == u.cm q = u.Quantity('Infinity', unit='cm') # float() allows this assert np.isinf(q.value) # make sure these strings don't parse... with pytest.raises(TypeError): q = u.Quantity('', unit='cm') with pytest.raises(TypeError): q = u.Quantity('spam', unit='cm') def test_unit_property(self): # test getting and setting 'unit' attribute q1 = u.Quantity(11.4, unit=u.meter) with pytest.raises(AttributeError): q1.unit = u.cm def test_preserve_dtype(self): """Test that if an explicit dtype is given, it is used, while if not, numbers are converted to float (including decimal.Decimal, which numpy converts to an object; closes #1419) """ # If dtype is specified, use it, but if not, convert int, bool to float q1 = u.Quantity(12, unit=u.m / u.s, dtype=int) assert q1.dtype == int q2 = u.Quantity(q1) assert q2.dtype == float assert q2.value == float(q1.value) assert q2.unit == q1.unit # but we should preserve float32 a3 = np.array([1., 2.], dtype=np.float32) q3 = u.Quantity(a3, u.yr) assert q3.dtype == a3.dtype # items stored as objects by numpy should be converted to float # by default q4 = u.Quantity(decimal.Decimal('10.25'), u.m) assert q4.dtype == float q5 = u.Quantity(decimal.Decimal('10.25'), u.m, dtype=object) assert q5.dtype == object def test_copy(self): # By default, a new quantity is constructed, but not if copy=False a = np.arange(10.) q0 = u.Quantity(a, unit=u.m / u.s) assert q0.base is not a q1 = u.Quantity(a, unit=u.m / u.s, copy=False) assert q1.base is a q2 = u.Quantity(q0) assert q2 is not q0 assert q2.base is not q0.base q2 = u.Quantity(q0, copy=False) assert q2 is q0 assert q2.base is q0.base q3 = u.Quantity(q0, q0.unit, copy=False) assert q3 is q0 assert q3.base is q0.base q4 = u.Quantity(q0, u.cm / u.s, copy=False) assert q4 is not q0 assert q4.base is not q0.base def test_subok(self): """Test subok can be used to keep class, or to insist on Quantity""" class MyQuantitySubclass(u.Quantity): pass myq = MyQuantitySubclass(np.arange(10.), u.m) # try both with and without changing the unit assert type(u.Quantity(myq)) is u.Quantity assert type(u.Quantity(myq, subok=True)) is MyQuantitySubclass assert type(u.Quantity(myq, u.km)) is u.Quantity assert type(u.Quantity(myq, u.km, subok=True)) is MyQuantitySubclass def test_order(self): """Test that order is correctly propagated to np.array""" ac = np.array(np.arange(10.), order='C') qcc = u.Quantity(ac, u.m, order='C') assert qcc.flags['C_CONTIGUOUS'] qcf = u.Quantity(ac, u.m, order='F') assert qcf.flags['F_CONTIGUOUS'] qca = u.Quantity(ac, u.m, order='A') assert qca.flags['C_CONTIGUOUS'] # check it works also when passing in a quantity assert u.Quantity(qcc, order='C').flags['C_CONTIGUOUS'] assert u.Quantity(qcc, order='A').flags['C_CONTIGUOUS'] assert u.Quantity(qcc, order='F').flags['F_CONTIGUOUS'] af = np.array(np.arange(10.), order='F') qfc = u.Quantity(af, u.m, order='C') assert qfc.flags['C_CONTIGUOUS'] qff = u.Quantity(ac, u.m, order='F') assert qff.flags['F_CONTIGUOUS'] qfa = u.Quantity(af, u.m, order='A') assert qfa.flags['F_CONTIGUOUS'] assert u.Quantity(qff, order='C').flags['C_CONTIGUOUS'] assert u.Quantity(qff, order='A').flags['F_CONTIGUOUS'] assert u.Quantity(qff, order='F').flags['F_CONTIGUOUS'] def test_ndmin(self): """Test that ndmin is correctly propagated to np.array""" a = np.arange(10.) q1 = u.Quantity(a, u.m, ndmin=1) assert q1.ndim == 1 and q1.shape == (10,) q2 = u.Quantity(a, u.m, ndmin=2) assert q2.ndim == 2 and q2.shape == (1, 10) # check it works also when passing in a quantity q3 = u.Quantity(q1, u.m, ndmin=3) assert q3.ndim == 3 and q3.shape == (1, 1, 10) def test_non_quantity_with_unit(self): """Test that unit attributes in objects get recognized.""" class MyQuantityLookalike(np.ndarray): pass a = np.arange(3.) mylookalike = a.copy().view(MyQuantityLookalike) mylookalike.unit = 'm' q1 = u.Quantity(mylookalike) assert isinstance(q1, u.Quantity) assert q1.unit is u.m assert np.all(q1.value == a) q2 = u.Quantity(mylookalike, u.mm) assert q2.unit is u.mm assert np.all(q2.value == 1000.*a) q3 = u.Quantity(mylookalike, copy=False) assert np.all(q3.value == mylookalike) q3[2] = 0 assert q3[2] == 0. assert mylookalike[2] == 0. mylookalike = a.copy().view(MyQuantityLookalike) mylookalike.unit = u.m q4 = u.Quantity(mylookalike, u.mm, copy=False) q4[2] = 0 assert q4[2] == 0. assert mylookalike[2] == 2. mylookalike.unit = 'nonsense' with pytest.raises(TypeError): u.Quantity(mylookalike) def test_creation_via_view(self): # This works but is no better than 1. * u.m q1 = 1. << u.m assert isinstance(q1, u.Quantity) assert q1.unit == u.m assert q1.value == 1. # With an array, we get an actual view. a2 = np.arange(10.) q2 = a2 << u.m / u.s assert isinstance(q2, u.Quantity) assert q2.unit == u.m / u.s assert np.all(q2.value == a2) a2[9] = 0. assert np.all(q2.value == a2) # But with a unit change we get a copy. q3 = q2 << u.mm / u.s assert isinstance(q3, u.Quantity) assert q3.unit == u.mm / u.s assert np.all(q3.value == a2 * 1000.) a2[8] = 0. assert q3[8].value == 8000. # Without a unit change, we do get a view. q4 = q2 << q2.unit a2[7] = 0. assert np.all(q4.value == a2) with pytest.raises(u.UnitsError): q2 << u.s # But one can do an in-place unit change. a2_copy = a2.copy() q2 <<= u.mm / u.s assert q2.unit == u.mm / u.s # Of course, this changes a2 as well. assert np.all(q2.value == a2) # Sanity check on the values. assert np.all(q2.value == a2_copy * 1000.) a2[8] = -1. # Using quantities, one can also work with strings. q5 = q2 << 'km/hr' assert q5.unit == u.km / u.hr assert np.all(q5 == q2) # Finally, we can use scalar quantities as units. not_quite_a_foot = 30. * u.cm a6 = np.arange(5.) q6 = a6 << not_quite_a_foot assert q6.unit == u.Unit(not_quite_a_foot) assert np.all(q6.to_value(u.cm) == 30. * a6) def test_rshift_warns(self): with pytest.raises(TypeError), \ catch_warnings() as warning_lines: 1 >> u.m assert len(warning_lines) == 1 assert warning_lines[0].category == AstropyWarning assert 'is not implemented' in str(warning_lines[0].message) q = 1. * u.km with pytest.raises(TypeError), \ catch_warnings() as warning_lines: q >> u.m assert len(warning_lines) == 1 assert warning_lines[0].category == AstropyWarning assert 'is not implemented' in str(warning_lines[0].message) with pytest.raises(TypeError), \ catch_warnings() as warning_lines: q >>= u.m assert len(warning_lines) == 1 assert warning_lines[0].category == AstropyWarning assert 'is not implemented' in str(warning_lines[0].message) with pytest.raises(TypeError), \ catch_warnings() as warning_lines: 1. >> q assert len(warning_lines) == 1 assert warning_lines[0].category == AstropyWarning assert 'is not implemented' in str(warning_lines[0].message) class TestQuantityOperations: q1 = u.Quantity(11.42, u.meter) q2 = u.Quantity(8.0, u.centimeter) def test_addition(self): # Take units from left object, q1 new_quantity = self.q1 + self.q2 assert new_quantity.value == 11.5 assert new_quantity.unit == u.meter # Take units from left object, q2 new_quantity = self.q2 + self.q1 assert new_quantity.value == 1150.0 assert new_quantity.unit == u.centimeter new_q = u.Quantity(1500.1, u.m) + u.Quantity(13.5, u.km) assert new_q.unit == u.m assert new_q.value == 15000.1 def test_subtraction(self): # Take units from left object, q1 new_quantity = self.q1 - self.q2 assert new_quantity.value == 11.34 assert new_quantity.unit == u.meter # Take units from left object, q2 new_quantity = self.q2 - self.q1 assert new_quantity.value == -1134.0 assert new_quantity.unit == u.centimeter def test_multiplication(self): # Take units from left object, q1 new_quantity = self.q1 * self.q2 assert new_quantity.value == 91.36 assert new_quantity.unit == (u.meter * u.centimeter) # Take units from left object, q2 new_quantity = self.q2 * self.q1 assert new_quantity.value == 91.36 assert new_quantity.unit == (u.centimeter * u.meter) # Multiply with a number new_quantity = 15. * self.q1 assert new_quantity.value == 171.3 assert new_quantity.unit == u.meter # Multiply with a number new_quantity = self.q1 * 15. assert new_quantity.value == 171.3 assert new_quantity.unit == u.meter def test_division(self): # Take units from left object, q1 new_quantity = self.q1 / self.q2 assert_array_almost_equal(new_quantity.value, 1.4275, decimal=5) assert new_quantity.unit == (u.meter / u.centimeter) # Take units from left object, q2 new_quantity = self.q2 / self.q1 assert_array_almost_equal(new_quantity.value, 0.70052539404553416, decimal=16) assert new_quantity.unit == (u.centimeter / u.meter) q1 = u.Quantity(11.4, unit=u.meter) q2 = u.Quantity(10.0, unit=u.second) new_quantity = q1 / q2 assert_array_almost_equal(new_quantity.value, 1.14, decimal=10) assert new_quantity.unit == (u.meter / u.second) # divide with a number new_quantity = self.q1 / 10. assert new_quantity.value == 1.142 assert new_quantity.unit == u.meter # divide with a number new_quantity = 11.42 / self.q1 assert new_quantity.value == 1. assert new_quantity.unit == u.Unit("1/m") def test_commutativity(self): """Regression test for issue #587.""" new_q = u.Quantity(11.42, 'm*s') assert self.q1 * u.s == u.s * self.q1 == new_q assert self.q1 / u.s == u.Quantity(11.42, 'm/s') assert u.s / self.q1 == u.Quantity(1 / 11.42, 's/m') def test_power(self): # raise quantity to a power new_quantity = self.q1 ** 2 assert_array_almost_equal(new_quantity.value, 130.4164, decimal=5) assert new_quantity.unit == u.Unit("m^2") new_quantity = self.q1 ** 3 assert_array_almost_equal(new_quantity.value, 1489.355288, decimal=7) assert new_quantity.unit == u.Unit("m^3") def test_matrix_multiplication(self): a = np.eye(3) q = a * u.m result1 = q @ a assert np.all(result1 == q) result2 = a @ q assert np.all(result2 == q) result3 = q @ q assert np.all(result3 == a * u.m ** 2) # less trivial case. q2 = np.array([[[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]], [[0., 1., 0.], [0., 0., 1.], [1., 0., 0.]], [[0., 0., 1.], [1., 0., 0.], [0., 1., 0.]]]) / u.s result4 = q @ q2 assert np.all(result4 == np.matmul(a, q2.value) * q.unit * q2.unit) def test_unary(self): # Test the minus unary operator new_quantity = -self.q1 assert new_quantity.value == -self.q1.value assert new_quantity.unit == self.q1.unit new_quantity = -(-self.q1) assert new_quantity.value == self.q1.value assert new_quantity.unit == self.q1.unit # Test the plus unary operator new_quantity = +self.q1 assert new_quantity.value == self.q1.value assert new_quantity.unit == self.q1.unit def test_abs(self): q = 1. * u.m / u.s new_quantity = abs(q) assert new_quantity.value == q.value assert new_quantity.unit == q.unit q = -1. * u.m / u.s new_quantity = abs(q) assert new_quantity.value == -q.value assert new_quantity.unit == q.unit def test_incompatible_units(self): """ When trying to add or subtract units that aren't compatible, throw an error """ q1 = u.Quantity(11.412, unit=u.meter) q2 = u.Quantity(21.52, unit=u.second) with pytest.raises(u.UnitsError): new_q = q1 + q2 def test_non_number_type(self): q1 = u.Quantity(11.412, unit=u.meter) type_err_msg = ("Unsupported operand type(s) for ufunc add: " "'Quantity' and 'dict'") with pytest.raises(TypeError) as exc: q1 + {'a': 1} assert exc.value.args[0] == type_err_msg with pytest.raises(TypeError): q1 + u.meter def test_dimensionless_operations(self): # test conversion to dimensionless dq = 3. * u.m / u.km dq1 = dq + 1. * u.mm / u.km assert dq1.value == 3.001 assert dq1.unit == dq.unit dq2 = dq + 1. assert dq2.value == 1.003 assert dq2.unit == u.dimensionless_unscaled # this test will check that operations with dimensionless Quantities # don't work with pytest.raises(u.UnitsError): self.q1 + u.Quantity(0.1, unit=u.Unit("")) with pytest.raises(u.UnitsError): self.q1 - u.Quantity(0.1, unit=u.Unit("")) # and test that scaling of integers works q = u.Quantity(np.array([1, 2, 3]), u.m / u.km, dtype=int) q2 = q + np.array([4, 5, 6]) assert q2.unit == u.dimensionless_unscaled assert_allclose(q2.value, np.array([4.001, 5.002, 6.003])) # but not if doing it inplace with pytest.raises(TypeError): q += np.array([1, 2, 3]) # except if it is actually possible q = np.array([1, 2, 3]) * u.km / u.m q += np.array([4, 5, 6]) assert q.unit == u.dimensionless_unscaled assert np.all(q.value == np.array([1004, 2005, 3006])) def test_complicated_operation(self): """ Perform a more complicated test """ from astropy.units import imperial # Multiple units distance = u.Quantity(15., u.meter) time = u.Quantity(11., u.second) velocity = (distance / time).to(imperial.mile / u.hour) assert_array_almost_equal( velocity.value, 3.05037, decimal=5) G = u.Quantity(6.673E-11, u.m ** 3 / u.kg / u.s ** 2) new_q = ((1. / (4. * np.pi * G)).to(u.pc ** -3 / u.s ** -2 * u.kg)) # Area side1 = u.Quantity(11., u.centimeter) side2 = u.Quantity(7., u.centimeter) area = side1 * side2 assert_array_almost_equal(area.value, 77., decimal=15) assert area.unit == u.cm * u.cm def test_comparison(self): # equality/ non-equality is straightforward for quantity objects assert (1 / (u.cm * u.cm)) == 1 * u.cm ** -2 assert 1 * u.m == 100 * u.cm assert 1 * u.m != 1 * u.cm # when one is a unit, Quantity does not know what to do, # but unit is fine with it, so it still works unit = u.cm**3 q = 1. * unit assert q.__eq__(unit) is NotImplemented assert unit.__eq__(q) is True assert q == unit q = 1000. * u.mm**3 assert q == unit # mismatched types should never work assert not 1. * u.cm == 1. assert 1. * u.cm != 1. # comparison with zero should raise a deprecation warning for quantity in (1. * u.cm, 1. * u.dimensionless_unscaled): with catch_warnings(AstropyDeprecationWarning) as warning_lines: bool(quantity) assert warning_lines[0].category == AstropyDeprecationWarning assert (str(warning_lines[0].message) == 'The truth value of ' 'a Quantity is ambiguous. In the future this will ' 'raise a ValueError.') def test_numeric_converters(self): # float, int, long, and __index__ should only work for single # quantities, of appropriate type, and only if they are dimensionless. # for index, this should be unscaled as well # (Check on __index__ is also a regression test for #1557) # quantities with units should never convert, or be usable as an index q1 = u.Quantity(1, u.m) converter_err_msg = ("only dimensionless scalar quantities " "can be converted to Python scalars") index_err_msg = ("only integer dimensionless scalar quantities " "can be converted to a Python index") with pytest.raises(TypeError) as exc: float(q1) assert exc.value.args[0] == converter_err_msg with pytest.raises(TypeError) as exc: int(q1) assert exc.value.args[0] == converter_err_msg # We used to test `q1 * ['a', 'b', 'c'] here, but that that worked # at all was a really odd confluence of bugs. Since it doesn't work # in numpy >=1.10 any more, just go directly for `__index__` (which # makes the test more similar to the `int`, `long`, etc., tests). with pytest.raises(TypeError) as exc: q1.__index__() assert exc.value.args[0] == index_err_msg # dimensionless but scaled is OK, however q2 = u.Quantity(1.23, u.m / u.km) assert float(q2) == float(q2.to_value(u.dimensionless_unscaled)) assert int(q2) == int(q2.to_value(u.dimensionless_unscaled)) with pytest.raises(TypeError) as exc: q2.__index__() assert exc.value.args[0] == index_err_msg # dimensionless unscaled is OK, though for index needs to be int q3 = u.Quantity(1.23, u.dimensionless_unscaled) assert float(q3) == 1.23 assert int(q3) == 1 with pytest.raises(TypeError) as exc: q3.__index__() assert exc.value.args[0] == index_err_msg # integer dimensionless unscaled is good for all q4 = u.Quantity(2, u.dimensionless_unscaled, dtype=int) assert float(q4) == 2. assert int(q4) == 2 assert q4.__index__() == 2 # but arrays are not OK q5 = u.Quantity([1, 2], u.m) with pytest.raises(TypeError) as exc: float(q5) assert exc.value.args[0] == converter_err_msg with pytest.raises(TypeError) as exc: int(q5) assert exc.value.args[0] == converter_err_msg with pytest.raises(TypeError) as exc: q5.__index__() assert exc.value.args[0] == index_err_msg # See https://github.com/numpy/numpy/issues/5074 # It seems unlikely this will be resolved, so xfail'ing it. @pytest.mark.xfail(reason="list multiplication only works for numpy <=1.10") def test_numeric_converter_to_index_in_practice(self): """Test that use of __index__ actually works.""" q4 = u.Quantity(2, u.dimensionless_unscaled, dtype=int) assert q4 * ['a', 'b', 'c'] == ['a', 'b', 'c', 'a', 'b', 'c'] def test_array_converters(self): # Scalar quantity q = u.Quantity(1.23, u.m) assert np.all(np.array(q) == np.array([1.23])) # Array quantity q = u.Quantity([1., 2., 3.], u.m) assert np.all(np.array(q) == np.array([1., 2., 3.])) def test_quantity_conversion(): q1 = u.Quantity(0.1, unit=u.meter) value = q1.value assert value == 0.1 value_in_km = q1.to_value(u.kilometer) assert value_in_km == 0.0001 new_quantity = q1.to(u.kilometer) assert new_quantity.value == 0.0001 with pytest.raises(u.UnitsError): q1.to(u.zettastokes) with pytest.raises(u.UnitsError): q1.to_value(u.zettastokes) def test_quantity_value_views(): q1 = u.Quantity([1., 2.], unit=u.meter) # views if the unit is the same. v1 = q1.value v1[0] = 0. assert np.all(q1 == [0., 2.] * u.meter) v2 = q1.to_value() v2[1] = 3. assert np.all(q1 == [0., 3.] * u.meter) v3 = q1.to_value('m') v3[0] = 1. assert np.all(q1 == [1., 3.] * u.meter) v4 = q1.to_value('cm') v4[0] = 0. # copy if different unit. assert np.all(q1 == [1., 3.] * u.meter) def test_quantity_conversion_with_equiv(): q1 = u.Quantity(0.1, unit=u.meter) v2 = q1.to_value(u.Hz, equivalencies=u.spectral()) assert_allclose(v2, 2997924580.0) q2 = q1.to(u.Hz, equivalencies=u.spectral()) assert_allclose(q2.value, v2) q1 = u.Quantity(0.4, unit=u.arcsecond) v2 = q1.to_value(u.au, equivalencies=u.parallax()) q2 = q1.to(u.au, equivalencies=u.parallax()) v3 = q2.to_value(u.arcminute, equivalencies=u.parallax()) q3 = q2.to(u.arcminute, equivalencies=u.parallax()) assert_allclose(v2, 515662.015) assert_allclose(q2.value, v2) assert q2.unit == u.au assert_allclose(v3, 0.0066666667) assert_allclose(q3.value, v3) assert q3.unit == u.arcminute def test_quantity_conversion_equivalency_passed_on(): class MySpectral(u.Quantity): _equivalencies = u.spectral() def __quantity_view__(self, obj, unit): return obj.view(MySpectral) def __quantity_instance__(self, *args, **kwargs): return MySpectral(*args, **kwargs) q1 = MySpectral([1000, 2000], unit=u.Hz) q2 = q1.to(u.nm) assert q2.unit == u.nm q3 = q2.to(u.Hz) assert q3.unit == u.Hz assert_allclose(q3.value, q1.value) q4 = MySpectral([1000, 2000], unit=u.nm) q5 = q4.to(u.Hz).to(u.nm) assert q5.unit == u.nm assert_allclose(q4.value, q5.value) # Regression test for issue #2315, divide-by-zero error when examining 0*unit def test_self_equivalency(): assert u.deg.is_equivalent(0*u.radian) assert u.deg.is_equivalent(1*u.radian) def test_si(): q1 = 10. * u.m * u.s ** 2 / (200. * u.ms) ** 2 # 250 meters assert q1.si.value == 250 assert q1.si.unit == u.m q = 10. * u.m # 10 meters assert q.si.value == 10 assert q.si.unit == u.m q = 10. / u.m # 10 1 / meters assert q.si.value == 10 assert q.si.unit == (1 / u.m) def test_cgs(): q1 = 10. * u.cm * u.s ** 2 / (200. * u.ms) ** 2 # 250 centimeters assert q1.cgs.value == 250 assert q1.cgs.unit == u.cm q = 10. * u.m # 10 centimeters assert q.cgs.value == 1000 assert q.cgs.unit == u.cm q = 10. / u.cm # 10 1 / centimeters assert q.cgs.value == 10 assert q.cgs.unit == (1 / u.cm) q = 10. * u.Pa # 10 pascals assert q.cgs.value == 100 assert q.cgs.unit == u.barye class TestQuantityComparison: def test_quantity_equality(self): assert u.Quantity(1000, unit='m') == u.Quantity(1, unit='km') assert not (u.Quantity(1, unit='m') == u.Quantity(1, unit='km')) # for ==, !=, return False, True if units do not match assert (u.Quantity(1100, unit=u.m) != u.Quantity(1, unit=u.s)) is True assert (u.Quantity(1100, unit=u.m) == u.Quantity(1, unit=u.s)) is False def test_quantity_comparison(self): assert u.Quantity(1100, unit=u.meter) > u.Quantity(1, unit=u.kilometer) assert u.Quantity(900, unit=u.meter) < u.Quantity(1, unit=u.kilometer) with pytest.raises(u.UnitsError): assert u.Quantity(1100, unit=u.meter) > u.Quantity(1, unit=u.second) with pytest.raises(u.UnitsError): assert u.Quantity(1100, unit=u.meter) < u.Quantity(1, unit=u.second) assert u.Quantity(1100, unit=u.meter) >= u.Quantity(1, unit=u.kilometer) assert u.Quantity(1000, unit=u.meter) >= u.Quantity(1, unit=u.kilometer) assert u.Quantity(900, unit=u.meter) <= u.Quantity(1, unit=u.kilometer) assert u.Quantity(1000, unit=u.meter) <= u.Quantity(1, unit=u.kilometer) with pytest.raises(u.UnitsError): assert u.Quantity( 1100, unit=u.meter) >= u.Quantity(1, unit=u.second) with pytest.raises(u.UnitsError): assert u.Quantity(1100, unit=u.meter) <= u.Quantity(1, unit=u.second) assert u.Quantity(1200, unit=u.meter) != u.Quantity(1, unit=u.kilometer) class TestQuantityDisplay: scalarintq = u.Quantity(1, unit='m', dtype=int) scalarfloatq = u.Quantity(1.3, unit='m') arrq = u.Quantity([1, 2.3, 8.9], unit='m') scalar_complex_q = u.Quantity(complex(1.0, 2.0)) scalar_big_complex_q = u.Quantity(complex(1.0, 2.0e27) * 1e25) scalar_big_neg_complex_q = u.Quantity(complex(-1.0, -2.0e27) * 1e36) arr_complex_q = u.Quantity(np.arange(3) * (complex(-1.0, -2.0e27) * 1e36)) big_arr_complex_q = u.Quantity(np.arange(125) * (complex(-1.0, -2.0e27) * 1e36)) def test_dimensionless_quantity_repr(self): q2 = u.Quantity(1., unit='m-1') q3 = u.Quantity(1, unit='m-1', dtype=int) if NUMPY_LT_1_14: assert repr(self.scalarintq * q2) == "<Quantity 1.0>" assert repr(self.arrq * q2) == "<Quantity [ 1. , 2.3, 8.9]>" else: assert repr(self.scalarintq * q2) == "<Quantity 1.>" assert repr(self.arrq * q2) == "<Quantity [1. , 2.3, 8.9]>" assert repr(self.scalarintq * q3) == "<Quantity 1>" def test_dimensionless_quantity_str(self): q2 = u.Quantity(1., unit='m-1') q3 = u.Quantity(1, unit='m-1', dtype=int) assert str(self.scalarintq * q2) == "1.0" assert str(self.scalarintq * q3) == "1" if NUMPY_LT_1_14: assert str(self.arrq * q2) == "[ 1. 2.3 8.9]" else: assert str(self.arrq * q2) == "[1. 2.3 8.9]" def test_dimensionless_quantity_format(self): q1 = u.Quantity(3.14) assert format(q1, '.2f') == '3.14' def test_scalar_quantity_str(self): assert str(self.scalarintq) == "1 m" assert str(self.scalarfloatq) == "1.3 m" def test_scalar_quantity_repr(self): assert repr(self.scalarintq) == "<Quantity 1 m>" assert repr(self.scalarfloatq) == "<Quantity 1.3 m>" def test_array_quantity_str(self): if NUMPY_LT_1_14: assert str(self.arrq) == "[ 1. 2.3 8.9] m" else: assert str(self.arrq) == "[1. 2.3 8.9] m" def test_array_quantity_repr(self): if NUMPY_LT_1_14: assert repr(self.arrq) == "<Quantity [ 1. , 2.3, 8.9] m>" else: assert repr(self.arrq) == "<Quantity [1. , 2.3, 8.9] m>" def test_scalar_quantity_format(self): assert format(self.scalarintq, '02d') == "01 m" assert format(self.scalarfloatq, '.1f') == "1.3 m" assert format(self.scalarfloatq, '.0f') == "1 m" def test_uninitialized_unit_format(self): bad_quantity = np.arange(10.).view(u.Quantity) assert str(bad_quantity).endswith(_UNIT_NOT_INITIALISED) assert repr(bad_quantity).endswith(_UNIT_NOT_INITIALISED + '>') def test_to_string(self): qscalar = u.Quantity(1.5e14, 'm/s') # __str__ is the default `format` assert str(qscalar) == qscalar.to_string() res = 'Quantity as KMS: 150000000000.0 km / s' assert "Quantity as KMS: {0}".format(qscalar.to_string(unit=u.km / u.s)) == res res = r'$1.5 \times 10^{14} \; \mathrm{\frac{m}{s}}$' assert qscalar.to_string(format="latex") == res res = r'$1.5 \times 10^{14} \; \mathrm{\frac{m}{s}}$' assert qscalar.to_string(format="latex", subfmt="inline") == res res = r'$\displaystyle 1.5 \times 10^{14} \; \mathrm{\frac{m}{s}}$' assert qscalar.to_string(format="latex", subfmt="display") == res def test_repr_latex(self): from astropy.units.quantity import conf q2scalar = u.Quantity(1.5e14, 'm/s') assert self.scalarintq._repr_latex_() == r'$1 \; \mathrm{m}$' assert self.scalarfloatq._repr_latex_() == r'$1.3 \; \mathrm{m}$' assert (q2scalar._repr_latex_() == r'$1.5 \times 10^{14} \; \mathrm{\frac{m}{s}}$') assert self.arrq._repr_latex_() == r'$[1,~2.3,~8.9] \; \mathrm{m}$' # Complex quantities assert self.scalar_complex_q._repr_latex_() == r'$(1+2i) \; \mathrm{}$' assert (self.scalar_big_complex_q._repr_latex_() == r'$(1 \times 10^{25}+2 \times 10^{52}i) \; \mathrm{}$') assert (self.scalar_big_neg_complex_q._repr_latex_() == r'$(-1 \times 10^{36}-2 \times 10^{63}i) \; \mathrm{}$') assert (self.arr_complex_q._repr_latex_() == (r'$[(0-0i),~(-1 \times 10^{36}-2 \times 10^{63}i),' r'~(-2 \times 10^{36}-4 \times 10^{63}i)] \; \mathrm{}$')) assert r'\dots' in self.big_arr_complex_q._repr_latex_() qmed = np.arange(100)*u.m qbig = np.arange(1000)*u.m qvbig = np.arange(10000)*1e9*u.m pops = np.get_printoptions() oldlat = conf.latex_array_threshold try: # check precision behavior q = u.Quantity(987654321.123456789, 'm/s') qa = np.array([7.89123, 123456789.987654321, 0]) * u.cm np.set_printoptions(precision=8) assert q._repr_latex_() == r'$9.8765432 \times 10^{8} \; \mathrm{\frac{m}{s}}$' assert qa._repr_latex_() == r'$[7.89123,~1.2345679 \times 10^{8},~0] \; \mathrm{cm}$' np.set_printoptions(precision=2) assert q._repr_latex_() == r'$9.9 \times 10^{8} \; \mathrm{\frac{m}{s}}$' assert qa._repr_latex_() == r'$[7.9,~1.2 \times 10^{8},~0] \; \mathrm{cm}$' # check thresholding behavior conf.latex_array_threshold = 100 # should be default lsmed = qmed._repr_latex_() assert r'\dots' not in lsmed lsbig = qbig._repr_latex_() assert r'\dots' in lsbig lsvbig = qvbig._repr_latex_() assert r'\dots' in lsvbig conf.latex_array_threshold = 1001 lsmed = qmed._repr_latex_() assert r'\dots' not in lsmed lsbig = qbig._repr_latex_() assert r'\dots' not in lsbig lsvbig = qvbig._repr_latex_() assert r'\dots' in lsvbig conf.latex_array_threshold = -1 # means use the numpy threshold np.set_printoptions(threshold=99) lsmed = qmed._repr_latex_() assert r'\dots' in lsmed lsbig = qbig._repr_latex_() assert r'\dots' in lsbig lsvbig = qvbig._repr_latex_() assert r'\dots' in lsvbig finally: # prevent side-effects from influencing other tests np.set_printoptions(**pops) conf.latex_array_threshold = oldlat qinfnan = [np.inf, -np.inf, np.nan] * u.m assert qinfnan._repr_latex_() == r'$[\infty,~-\infty,~{\rm NaN}] \; \mathrm{m}$' def test_decompose(): q1 = 5 * u.N assert q1.decompose() == (5 * u.kg * u.m * u.s ** -2) def test_decompose_regression(): """ Regression test for bug #1163 If decompose was called multiple times on a Quantity with an array and a scale != 1, the result changed every time. This is because the value was being referenced not copied, then modified, which changed the original value. """ q = np.array([1, 2, 3]) * u.m / (2. * u.km) assert np.all(q.decompose().value == np.array([0.0005, 0.001, 0.0015])) assert np.all(q == np.array([1, 2, 3]) * u.m / (2. * u.km)) assert np.all(q.decompose().value == np.array([0.0005, 0.001, 0.0015])) def test_arrays(): """ Test using quantites with array values """ qsec = u.Quantity(np.arange(10), u.second) assert isinstance(qsec.value, np.ndarray) assert not qsec.isscalar # len and indexing should work for arrays assert len(qsec) == len(qsec.value) qsecsub25 = qsec[2:5] assert qsecsub25.unit == qsec.unit assert isinstance(qsecsub25, u.Quantity) assert len(qsecsub25) == 3 # make sure isscalar, len, and indexing behave correcly for non-arrays. qsecnotarray = u.Quantity(10., u.second) assert qsecnotarray.isscalar with pytest.raises(TypeError): len(qsecnotarray) with pytest.raises(TypeError): qsecnotarray[0] qseclen0array = u.Quantity(np.array(10), u.second, dtype=int) # 0d numpy array should act basically like a scalar assert qseclen0array.isscalar with pytest.raises(TypeError): len(qseclen0array) with pytest.raises(TypeError): qseclen0array[0] assert isinstance(qseclen0array.value, int) a = np.array([(1., 2., 3.), (4., 5., 6.), (7., 8., 9.)], dtype=[('x', float), ('y', float), ('z', float)]) qkpc = u.Quantity(a, u.kpc) assert not qkpc.isscalar qkpc0 = qkpc[0] assert qkpc0.value == a[0] assert qkpc0.unit == qkpc.unit assert isinstance(qkpc0, u.Quantity) assert qkpc0.isscalar qkpcx = qkpc['x'] assert np.all(qkpcx.value == a['x']) assert qkpcx.unit == qkpc.unit assert isinstance(qkpcx, u.Quantity) assert not qkpcx.isscalar qkpcx1 = qkpc['x'][1] assert qkpcx1.unit == qkpc.unit assert isinstance(qkpcx1, u.Quantity) assert qkpcx1.isscalar qkpc1x = qkpc[1]['x'] assert qkpc1x.isscalar assert qkpc1x == qkpcx1 # can also create from lists, will auto-convert to arrays qsec = u.Quantity(list(range(10)), u.second) assert isinstance(qsec.value, np.ndarray) # quantity math should work with arrays assert_array_equal((qsec * 2).value, (np.arange(10) * 2)) assert_array_equal((qsec / 2).value, (np.arange(10) / 2)) # quantity addition/subtraction should *not* work with arrays b/c unit # ambiguous with pytest.raises(u.UnitsError): assert_array_equal((qsec + 2).value, (np.arange(10) + 2)) with pytest.raises(u.UnitsError): assert_array_equal((qsec - 2).value, (np.arange(10) + 2)) # should create by unit multiplication, too qsec2 = np.arange(10) * u.second qsec3 = u.second * np.arange(10) assert np.all(qsec == qsec2) assert np.all(qsec2 == qsec3) # make sure numerical-converters fail when arrays are present with pytest.raises(TypeError): float(qsec) with pytest.raises(TypeError): int(qsec) def test_array_indexing_slicing(): q = np.array([1., 2., 3.]) * u.m assert q[0] == 1. * u.m assert np.all(q[0:2] == u.Quantity([1., 2.], u.m)) def test_array_setslice(): q = np.array([1., 2., 3.]) * u.m q[1:2] = np.array([400.]) * u.cm assert np.all(q == np.array([1., 4., 3.]) * u.m) def test_inverse_quantity(): """ Regression test from issue #679 """ q = u.Quantity(4., u.meter / u.second) qot = q / 2 toq = 2 / q npqot = q / np.array(2) assert npqot.value == 2.0 assert npqot.unit == (u.meter / u.second) assert qot.value == 2.0 assert qot.unit == (u.meter / u.second) assert toq.value == 0.5 assert toq.unit == (u.second / u.meter) def test_quantity_mutability(): q = u.Quantity(9.8, u.meter / u.second / u.second) with pytest.raises(AttributeError): q.value = 3 with pytest.raises(AttributeError): q.unit = u.kg def test_quantity_initialized_with_quantity(): q1 = u.Quantity(60, u.second) q2 = u.Quantity(q1, u.minute) assert q2.value == 1 q3 = u.Quantity([q1, q2], u.second) assert q3[0].value == 60 assert q3[1].value == 60 q4 = u.Quantity([q2, q1]) assert q4.unit == q2.unit assert q4[0].value == 1 assert q4[1].value == 1 def test_quantity_string_unit(): q1 = 1. * u.m / 's' assert q1.value == 1 assert q1.unit == (u.m / u.s) q2 = q1 * "m" assert q2.unit == ((u.m * u.m) / u.s) @raises(ValueError) def test_quantity_invalid_unit_string(): "foo" * u.m def test_implicit_conversion(): q = u.Quantity(1.0, u.meter) # Manually turn this on to simulate what might happen in a subclass q._include_easy_conversion_members = True assert_allclose(q.centimeter, 100) assert_allclose(q.cm, 100) assert_allclose(q.parsec, 3.240779289469756e-17) def test_implicit_conversion_autocomplete(): q = u.Quantity(1.0, u.meter) # Manually turn this on to simulate what might happen in a subclass q._include_easy_conversion_members = True q.foo = 42 attrs = dir(q) assert 'centimeter' in attrs assert 'cm' in attrs assert 'parsec' in attrs assert 'foo' in attrs assert 'to' in attrs assert 'value' in attrs # Something from the base class, object assert '__setattr__' in attrs with pytest.raises(AttributeError): q.l def test_quantity_iterability(): """Regressiont est for issue #878. Scalar quantities should not be iterable and should raise a type error on iteration. """ q1 = [15.0, 17.0] * u.m assert isiterable(q1) q2 = next(iter(q1)) assert q2 == 15.0 * u.m assert not isiterable(q2) pytest.raises(TypeError, iter, q2) def test_copy(): q1 = u.Quantity(np.array([[1., 2., 3.], [4., 5., 6.]]), unit=u.m) q2 = q1.copy() assert np.all(q1.value == q2.value) assert q1.unit == q2.unit assert q1.dtype == q2.dtype assert q1.value is not q2.value q3 = q1.copy(order='F') assert q3.flags['F_CONTIGUOUS'] assert np.all(q1.value == q3.value) assert q1.unit == q3.unit assert q1.dtype == q3.dtype assert q1.value is not q3.value q4 = q1.copy(order='C') assert q4.flags['C_CONTIGUOUS'] assert np.all(q1.value == q4.value) assert q1.unit == q4.unit assert q1.dtype == q4.dtype assert q1.value is not q4.value def test_deepcopy(): q1 = u.Quantity(np.array([1., 2., 3.]), unit=u.m) q2 = copy.deepcopy(q1) assert isinstance(q2, u.Quantity) assert np.all(q1.value == q2.value) assert q1.unit == q2.unit assert q1.dtype == q2.dtype assert q1.value is not q2.value def test_equality_numpy_scalar(): """ A regression test to ensure that numpy scalars are correctly compared (which originally failed due to the lack of ``__array_priority__``). """ assert 10 != 10. * u.m assert np.int64(10) != 10 * u.m assert 10 * u.m != np.int64(10) def test_quantity_pickelability(): """ Testing pickleability of quantity """ q1 = np.arange(10) * u.m q2 = pickle.loads(pickle.dumps(q1)) assert np.all(q1.value == q2.value) assert q1.unit.is_equivalent(q2.unit) assert q1.unit == q2.unit def test_quantity_initialisation_from_string(): q = u.Quantity('1') assert q.unit == u.dimensionless_unscaled assert q.value == 1. q = u.Quantity('1.5 m/s') assert q.unit == u.m/u.s assert q.value == 1.5 assert u.Unit(q) == u.Unit('1.5 m/s') q = u.Quantity('.5 m') assert q == u.Quantity(0.5, u.m) q = u.Quantity('-1e1km') assert q == u.Quantity(-10, u.km) q = u.Quantity('-1e+1km') assert q == u.Quantity(-10, u.km) q = u.Quantity('+.5km') assert q == u.Quantity(.5, u.km) q = u.Quantity('+5e-1km') assert q == u.Quantity(.5, u.km) q = u.Quantity('5', u.m) assert q == u.Quantity(5., u.m) q = u.Quantity('5 km', u.m) assert q.value == 5000. assert q.unit == u.m q = u.Quantity('5Em') assert q == u.Quantity(5., u.Em) with pytest.raises(TypeError): u.Quantity('') with pytest.raises(TypeError): u.Quantity('m') with pytest.raises(TypeError): u.Quantity('1.2.3 deg') with pytest.raises(TypeError): u.Quantity('1+deg') with pytest.raises(TypeError): u.Quantity('1-2deg') with pytest.raises(TypeError): u.Quantity('1.2e-13.3m') with pytest.raises(TypeError): u.Quantity(['5']) with pytest.raises(TypeError): u.Quantity(np.array(['5'])) with pytest.raises(ValueError): u.Quantity('5E') with pytest.raises(ValueError): u.Quantity('5 foo') def test_unsupported(): q1 = np.arange(10) * u.m with pytest.raises(TypeError): q2 = np.bitwise_and(q1, q1) def test_unit_identity(): q = 1.0 * u.hour assert q.unit is u.hour def test_quantity_to_view(): q1 = np.array([1000, 2000]) * u.m q2 = q1.to(u.km) assert q1.value[0] == 1000 assert q2.value[0] == 1 @raises(ValueError) def test_quantity_tuple_power(): (5.0 * u.m) ** (1, 2) def test_quantity_fraction_power(): q = (25.0 * u.m**2) ** Fraction(1, 2) assert q.value == 5. assert q.unit == u.m # Regression check to ensure we didn't create an object type by raising # the value of the quantity to a Fraction. [#3922] assert q.dtype.kind == 'f' def test_inherit_docstrings(): assert u.Quantity.argmax.__doc__ == np.ndarray.argmax.__doc__ def test_quantity_from_table(): """ Checks that units from tables are respected when converted to a Quantity. This also generically checks the use of *anything* with a `unit` attribute passed into Quantity """ from... table import Table t = Table(data=[np.arange(5), np.arange(5)], names=['a', 'b']) t['a'].unit = u.kpc qa = u.Quantity(t['a']) assert qa.unit == u.kpc assert_array_equal(qa.value, t['a']) qb = u.Quantity(t['b']) assert qb.unit == u.dimensionless_unscaled assert_array_equal(qb.value, t['b']) # This does *not* auto-convert, because it's not necessarily obvious that's # desired. Instead we revert to standard `Quantity` behavior qap = u.Quantity(t['a'], u.pc) assert qap.unit == u.pc assert_array_equal(qap.value, t['a'] * 1000) qbp = u.Quantity(t['b'], u.pc) assert qbp.unit == u.pc assert_array_equal(qbp.value, t['b']) def test_assign_slice_with_quantity_like(): # Regression tests for gh-5961 from astropy.table import Table, Column # first check directly that we can use a Column to assign to a slice. c = Column(np.arange(10.), unit=u.mm) q = u.Quantity(c) q[:2] = c[:2] # next check that we do not fail the original problem. t = Table() t['x'] = np.arange(10) * u.mm t['y'] = np.ones(10) * u.mm assert type(t['x']) is Column xy = np.vstack([t['x'], t['y']]).T * u.mm ii = [0, 2, 4] assert xy[ii, 0].unit == t['x'][ii].unit # should not raise anything xy[ii, 0] = t['x'][ii] def test_insert(): """ Test Quantity.insert method. This does not test the full capabilities of the underlying np.insert, but hits the key functionality for Quantity. """ q = [1, 2] * u.m # Insert a compatible float with different units q2 = q.insert(0, 1 * u.km) assert np.all(q2.value == [1000, 1, 2]) assert q2.unit is u.m assert q2.dtype.kind == 'f' if minversion(np, '1.8.0'): q2 = q.insert(1, [1, 2] * u.km) assert np.all(q2.value == [1, 1000, 2000, 2]) assert q2.unit is u.m # Cannot convert 1.5 * u.s to m with pytest.raises(u.UnitsError): q.insert(1, 1.5 * u.s) # Tests with multi-dim quantity q = [[1, 2], [3, 4]] * u.m q2 = q.insert(1, [10, 20] * u.m, axis=0) assert np.all(q2.value == [[1, 2], [10, 20], [3, 4]]) q2 = q.insert(1, [10, 20] * u.m, axis=1) assert np.all(q2.value == [[1, 10, 2], [3, 20, 4]]) q2 = q.insert(1, 10 * u.m, axis=1) assert np.all(q2.value == [[1, 10, 2], [3, 10, 4]]) def test_repr_array_of_quantity(): """ Test print/repr of object arrays of Quantity objects with different units. Regression test for the issue first reported in https://github.com/astropy/astropy/issues/3777 """ a = np.array([1 * u.m, 2 * u.s], dtype=object) if NUMPY_LT_1_14: assert repr(a) == 'array([<Quantity 1.0 m>, <Quantity 2.0 s>], dtype=object)' assert str(a) == '[<Quantity 1.0 m> <Quantity 2.0 s>]' else: assert repr(a) == 'array([<Quantity 1. m>, <Quantity 2. s>], dtype=object)' assert str(a) == '[<Quantity 1. m> <Quantity 2. s>]' class TestSpecificTypeQuantity: def setup(self): class Length(u.SpecificTypeQuantity): _equivalent_unit = u.m class Length2(Length): _default_unit = u.m class Length3(Length): _unit = u.m self.Length = Length self.Length2 = Length2 self.Length3 = Length3 def test_creation(self): l = self.Length(np.arange(10.)*u.km) assert type(l) is self.Length with pytest.raises(u.UnitTypeError): self.Length(np.arange(10.) * u.hour) with pytest.raises(u.UnitTypeError): self.Length(np.arange(10.)) l2 = self.Length2(np.arange(5.)) assert type(l2) is self.Length2 assert l2._default_unit is self.Length2._default_unit with pytest.raises(u.UnitTypeError): self.Length3(np.arange(10.)) def test_view(self): l = (np.arange(5.) * u.km).view(self.Length) assert type(l) is self.Length with pytest.raises(u.UnitTypeError): (np.arange(5.) * u.s).view(self.Length) v = np.arange(5.).view(self.Length) assert type(v) is self.Length assert v._unit is None l3 = np.ones((2, 2)).view(self.Length3) assert type(l3) is self.Length3 assert l3.unit is self.Length3._unit def test_operation_precedence_and_fallback(self): l = self.Length(np.arange(5.)*u.cm) sum1 = l + 1.*u.m assert type(sum1) is self.Length sum2 = 1.*u.km + l assert type(sum2) is self.Length sum3 = l + l assert type(sum3) is self.Length res1 = l * (1.*u.m) assert type(res1) is u.Quantity res2 = l * l assert type(res2) is u.Quantity @pytest.mark.skipif('not HAS_MATPLOTLIB') @pytest.mark.xfail('MATPLOTLIB_LT_15') class TestQuantityMatplotlib: """Test if passing matplotlib quantities works. TODO: create PNG output and check against reference image once `astropy.wcsaxes` is merged, which provides the machinery for this. See https://github.com/astropy/astropy/issues/1881 See https://github.com/astropy/astropy/pull/2139 """ def test_plot(self): data = u.Quantity([4, 5, 6], 's') plt.plot(data) def test_scatter(self): x = u.Quantity([4, 5, 6], 'second') y = [1, 3, 4] * u.m plt.scatter(x, y) def test_unit_class_override(): class MyQuantity(u.Quantity): pass my_unit = u.Unit("my_deg", u.deg) my_unit._quantity_class = MyQuantity q1 = u.Quantity(1., my_unit) assert type(q1) is u.Quantity q2 = u.Quantity(1., my_unit, subok=True) assert type(q2) is MyQuantity class QuantityMimic: def __init__(self, value, unit): self.value = value self.unit = unit def __array__(self): return np.array(self.value) class QuantityMimic2(QuantityMimic): def to(self, unit): return u.Quantity(self.value, self.unit).to(unit) def to_value(self, unit): return u.Quantity(self.value, self.unit).to_value(unit) class TestQuantityMimics: """Test Quantity Mimics that are not ndarray subclasses.""" @pytest.mark.parametrize('Mimic', (QuantityMimic, QuantityMimic2)) def test_mimic_input(self, Mimic): value = np.arange(10.) mimic = Mimic(value, u.m) q = u.Quantity(mimic) assert q.unit == u.m assert np.all(q.value == value) q2 = u.Quantity(mimic, u.cm) assert q2.unit == u.cm assert np.all(q2.value == 100 * value) @pytest.mark.parametrize('Mimic', (QuantityMimic, QuantityMimic2)) def test_mimic_setting(self, Mimic): mimic = Mimic([1., 2.], u.m) q = u.Quantity(np.arange(10.), u.cm) q[8:] = mimic assert np.all(q[:8].value == np.arange(8.)) assert np.all(q[8:].value == [100., 200.])
8e96dbe9a96c356f52de21fcc21905ff4da561c7b516caaa1d4c126d9447d74f
import numpy as np import pytest from astropy import units as u class TestQuantityLinAlgFuncs: """ Test linear algebra functions """ @pytest.mark.xfail def test_outer(self): q1 = np.array([1, 2, 3]) * u.m q2 = np.array([1, 2]) / u.s o = np.outer(q1, q2) assert np.all(o == np.array([[1, 2], [2, 4], [3, 6]]) * u.m / u.s) @pytest.mark.xfail def test_inner(self): q1 = np.array([1, 2, 3]) * u.m q2 = np.array([4, 5, 6]) / u.s o = np.inner(q1, q2) assert o == 32 * u.m / u.s @pytest.mark.xfail def test_dot(self): q1 = np.array([1., 2., 3.]) * u.m q2 = np.array([4., 5., 6.]) / u.s o = np.dot(q1, q2) assert o == 32. * u.m / u.s @pytest.mark.xfail def test_matmul(self): q1 = np.eye(3) * u.m q2 = np.array([4., 5., 6.]) / u.s o = np.matmul(q1, q2) assert o == q2 / u.s
a7cf0fe785ab23d2f0fe95709e652b7e5612830f583a7e9c1c626ed9304ee11b
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Regression tests for the physical_type support in the units package """ from astropy import units as u from astropy.units import physical from astropy.constants import hbar from astropy.tests.helper import raises def test_simple(): assert u.m.physical_type == 'length' def test_power(): assert (u.cm ** 3).physical_type == 'volume' def test_speed(): assert (u.km / u.h).physical_type == 'speed' def test_unknown(): assert (u.m * u.s).physical_type == 'unknown' def test_dimensionless(): assert (u.m / u.m).physical_type == 'dimensionless' def test_angular_momentum(): assert hbar.unit.physical_type == 'angular momentum' def test_flam(): flam = u.erg / (u.cm**2 * u.s * u.AA) assert flam.physical_type == 'spectral flux density wav' def test_photlam(): photlam = u.photon / (u.cm ** 2 * u.s * u.AA) assert photlam.physical_type == 'photon flux density wav' def test_photnu(): photnu = u.photon / (u.cm ** 2 * u.s * u.Hz) assert photnu.physical_type == 'photon flux density' @raises(ValueError) def test_redundant_physical_type(): physical.def_physical_type(u.m, 'utter craziness') def test_data_quantity(): assert u.byte.physical_type == 'data quantity' assert u.bit.physical_type == 'data quantity'
9a4a8030c753f4f5dffe99fb6f180ccc03a8d4b036be41931f4aaba5cd2eede6
# The purpose of these tests are to ensure that calling ufuncs with quantities # returns quantities with the right units, or raises exceptions. import warnings from collections import namedtuple import pytest import numpy as np from numpy.testing import assert_allclose from astropy import units as u from astropy.units import quantity_helper as qh from astropy._erfa import ufunc as erfa_ufunc from astropy.tests.helper import raises try: import scipy # pylint: disable=W0611 except ImportError: HAS_SCIPY = False else: HAS_SCIPY = True testcase = namedtuple('testcase', ['f', 'q_in', 'q_out']) testexc = namedtuple('testexc', ['f', 'q_in', 'exc', 'msg']) testwarn = namedtuple('testwarn', ['f', 'q_in', 'wfilter']) @pytest.mark.skip def test_testcase(tc): results = tc.f(*tc.q_in) # careful of the following line, would break on a function returning # a single tuple (as opposed to tuple of return values) results = (results, ) if type(results) != tuple else results for result, expected in zip(results, tc.q_out): assert result.unit == expected.unit assert_allclose(result.value, expected.value, atol=1.E-15) @pytest.mark.skip def test_testexc(te): with pytest.raises(te.exc) as exc: te.f(*te.q_in) if te.msg is not None: assert te.msg in exc.value.args[0] @pytest.mark.skip def test_testwarn(tw): with warnings.catch_warnings(): warnings.filterwarnings(tw.wfilter) tw.f(*tw.q_in) class TestUfuncHelpers: # Note that this test should work even if scipy is present, since # the scipy.special ufuncs are only loaded on demand. # The test passes independently of whether erfa is already loaded # (which will be the case for a full test, since coordinates uses it). def test_coverage(self): """Test that we cover all ufunc's""" all_np_ufuncs = set([ufunc for ufunc in np.core.umath.__dict__.values() if isinstance(ufunc, np.ufunc)]) all_q_ufuncs = (qh.UNSUPPORTED_UFUNCS | set(qh.UFUNC_HELPERS.keys())) # Check that every numpy ufunc is covered. assert all_np_ufuncs - all_q_ufuncs == set() # Check that all ufuncs we cover come from numpy or erfa. # (Since coverage for erfa is incomplete, we do not check # this the other way). all_erfa_ufuncs = set([ufunc for ufunc in erfa_ufunc.__dict__.values() if isinstance(ufunc, np.ufunc)]) assert (all_q_ufuncs - all_np_ufuncs - all_erfa_ufuncs == set()) def test_scipy_registered(self): # Should be registered as existing even if scipy is not available. assert 'scipy.special' in qh.UFUNC_HELPERS.modules def test_removal_addition(self): assert np.add in qh.UFUNC_HELPERS assert np.add not in qh.UNSUPPORTED_UFUNCS qh.UFUNC_HELPERS[np.add] = None assert np.add not in qh.UFUNC_HELPERS assert np.add in qh.UNSUPPORTED_UFUNCS qh.UFUNC_HELPERS[np.add] = qh.UFUNC_HELPERS[np.subtract] assert np.add in qh.UFUNC_HELPERS assert np.add not in qh.UNSUPPORTED_UFUNCS class TestQuantityTrigonometricFuncs: """ Test trigonometric functions """ @pytest.mark.parametrize('tc', ( testcase( f=np.sin, q_in=(30. * u.degree, ), q_out=(0.5*u.dimensionless_unscaled, ) ), testcase( f=np.sin, q_in=(np.array([0., np.pi / 4., np.pi / 2.]) * u.radian, ), q_out=(np.array([0., 1. / np.sqrt(2.), 1.]) * u.one, ) ), testcase( f=np.arcsin, q_in=(np.sin(30. * u.degree), ), q_out=(np.radians(30.) * u.radian, ) ), testcase( f=np.arcsin, q_in=(np.sin(np.array([0., np.pi / 4., np.pi / 2.]) * u.radian), ), q_out=(np.array([0., np.pi / 4., np.pi / 2.]) * u.radian, ) ), testcase( f=np.cos, q_in=(np.pi / 3. * u.radian, ), q_out=(0.5 * u.dimensionless_unscaled, ) ), testcase( f=np.cos, q_in=(np.array([0., np.pi / 4., np.pi / 2.]) * u.radian, ), q_out=(np.array([1., 1. / np.sqrt(2.), 0.]) * u.one, ) ), testcase( f=np.arccos, q_in=(np.cos(np.pi / 3. * u.radian), ), q_out=(np.pi / 3. * u.radian, ) ), testcase( f=np.arccos, q_in=(np.cos(np.array([0., np.pi / 4., np.pi / 2.]) * u.radian), ), q_out=(np.array([0., np.pi / 4., np.pi / 2.]) * u.radian, ), ), testcase( f=np.tan, q_in=(np.pi / 3. * u.radian, ), q_out=(np.sqrt(3.) * u.dimensionless_unscaled, ) ), testcase( f=np.tan, q_in=(np.array([0., 45., 135., 180.]) * u.degree, ), q_out=(np.array([0., 1., -1., 0.]) * u.dimensionless_unscaled, ) ), testcase( f=np.arctan, q_in=(np.tan(np.pi / 3. * u.radian), ), q_out=(np.pi / 3. * u.radian, ) ), testcase( f=np.arctan, q_in=(np.tan(np.array([10., 30., 70., 80.]) * u.degree), ), q_out=(np.radians(np.array([10., 30., 70., 80.]) * u.degree), ) ), testcase( f=np.arctan2, q_in=(np.array([10., 30., 70., 80.]) * u.m, 2.0 * u.km), q_out=(np.arctan2(np.array([10., 30., 70., 80.]), 2000.) * u.radian, ) ), testcase( f=np.arctan2, q_in=((np.array([10., 80.]) * u.m / (2.0 * u.km)).to(u.one), 1.), q_out=(np.arctan2(np.array([10., 80.]) / 2000., 1.) * u.radian, ) ), testcase( f=np.deg2rad, q_in=(180. * u.degree, ), q_out=(np.pi * u.radian, ) ), testcase( f=np.radians, q_in=(180. * u.degree, ), q_out=(np.pi * u.radian, ) ), testcase( f=np.deg2rad, q_in=(3. * u.radian, ), q_out=(3. * u.radian, ) ), testcase( f=np.radians, q_in=(3. * u.radian, ), q_out=(3. * u.radian, ) ), testcase( f=np.rad2deg, q_in=(60. * u.degree, ), q_out=(60. * u.degree, ) ), testcase( f=np.degrees, q_in=(60. * u.degree, ), q_out=(60. * u.degree, ) ), testcase( f=np.rad2deg, q_in=(np.pi * u.radian, ), q_out=(180. * u.degree, ) ), testcase( f=np.degrees, q_in=(np.pi * u.radian, ), q_out=(180. * u.degree, ) ) )) def test_testcases(self, tc): return test_testcase(tc) @pytest.mark.parametrize('te', ( testexc( f=np.deg2rad, q_in=(3. * u.m, ), exc=TypeError, msg=None ), testexc( f=np.radians, q_in=(3. * u.m, ), exc=TypeError, msg=None ), testexc( f=np.rad2deg, q_in=(3. * u.m), exc=TypeError, msg=None ), testexc( f=np.degrees, q_in=(3. * u.m), exc=TypeError, msg=None ), testexc( f=np.sin, q_in=(3. * u.m, ), exc=TypeError, msg="Can only apply 'sin' function to quantities with angle units" ), testexc( f=np.arcsin, q_in=(3. * u.m, ), exc=TypeError, msg="Can only apply 'arcsin' function to dimensionless quantities" ), testexc( f=np.cos, q_in=(3. * u.s, ), exc=TypeError, msg="Can only apply 'cos' function to quantities with angle units" ), testexc( f=np.arccos, q_in=(3. * u.s, ), exc=TypeError, msg="Can only apply 'arccos' function to dimensionless quantities" ), testexc( f=np.tan, q_in=(np.array([1, 2, 3]) * u.N, ), exc=TypeError, msg="Can only apply 'tan' function to quantities with angle units" ), testexc( f=np.arctan, q_in=(np.array([1, 2, 3]) * u.N, ), exc=TypeError, msg="Can only apply 'arctan' function to dimensionless quantities" ), testexc( f=np.arctan2, q_in=(np.array([1, 2, 3]) * u.N, 1. * u.s), exc=u.UnitsError, msg="compatible dimensions" ), testexc( f=np.arctan2, q_in=(np.array([1, 2, 3]) * u.N, 1.), exc=u.UnitsError, msg="dimensionless quantities when other arg" ) )) def test_testexcs(self, te): return test_testexc(te) @pytest.mark.parametrize('tw', ( testwarn( f=np.arcsin, q_in=(27. * u.pc / (15 * u.kpc), ), wfilter='error' ), )) def test_testwarns(self, tw): return test_testwarn(tw) class TestQuantityMathFuncs: """ Test other mathematical functions """ def test_multiply_scalar(self): assert np.multiply(4. * u.m, 2. / u.s) == 8. * u.m / u.s assert np.multiply(4. * u.m, 2.) == 8. * u.m assert np.multiply(4., 2. / u.s) == 8. / u.s def test_multiply_array(self): assert np.all(np.multiply(np.arange(3.) * u.m, 2. / u.s) == np.arange(0, 6., 2.) * u.m / u.s) @pytest.mark.skipif(not isinstance(getattr(np, 'matmul', None), np.ufunc), reason="np.matmul is not yet a gufunc") def test_matmul(self): q = np.arange(3.) * u.m r = np.matmul(q, q) assert r == 5. * u.m ** 2 # less trivial case. q1 = np.eye(3) * u.m q2 = np.array([[[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]], [[0., 1., 0.], [0., 0., 1.], [1., 0., 0.]], [[0., 0., 1.], [1., 0., 0.], [0., 1., 0.]]]) / u.s r2 = np.matmul(q1, q2) assert np.all(r2 == np.matmul(q1.value, q2.value) * q1.unit * q2.unit) @pytest.mark.parametrize('function', (np.divide, np.true_divide)) def test_divide_scalar(self, function): assert function(4. * u.m, 2. * u.s) == function(4., 2.) * u.m / u.s assert function(4. * u.m, 2.) == function(4., 2.) * u.m assert function(4., 2. * u.s) == function(4., 2.) / u.s @pytest.mark.parametrize('function', (np.divide, np.true_divide)) def test_divide_array(self, function): assert np.all(function(np.arange(3.) * u.m, 2. * u.s) == function(np.arange(3.), 2.) * u.m / u.s) def test_floor_divide_remainder_and_divmod(self): inch = u.Unit(0.0254 * u.m) dividend = np.array([1., 2., 3.]) * u.m divisor = np.array([3., 4., 5.]) * inch quotient = dividend // divisor remainder = dividend % divisor assert_allclose(quotient.value, [13., 19., 23.]) assert quotient.unit == u.dimensionless_unscaled assert_allclose(remainder.value, [0.0094, 0.0696, 0.079]) assert remainder.unit == dividend.unit quotient2 = np.floor_divide(dividend, divisor) remainder2 = np.remainder(dividend, divisor) assert np.all(quotient2 == quotient) assert np.all(remainder2 == remainder) quotient3, remainder3 = divmod(dividend, divisor) assert np.all(quotient3 == quotient) assert np.all(remainder3 == remainder) with pytest.raises(TypeError): divmod(dividend, u.km) with pytest.raises(TypeError): dividend // u.km with pytest.raises(TypeError): dividend % u.km quotient4, remainder4 = np.divmod(dividend, divisor) assert np.all(quotient4 == quotient) assert np.all(remainder4 == remainder) with pytest.raises(TypeError): np.divmod(dividend, u.km) def test_sqrt_scalar(self): assert np.sqrt(4. * u.m) == 2. * u.m ** 0.5 def test_sqrt_array(self): assert np.all(np.sqrt(np.array([1., 4., 9.]) * u.m) == np.array([1., 2., 3.]) * u.m ** 0.5) def test_square_scalar(self): assert np.square(4. * u.m) == 16. * u.m ** 2 def test_square_array(self): assert np.all(np.square(np.array([1., 2., 3.]) * u.m) == np.array([1., 4., 9.]) * u.m ** 2) def test_reciprocal_scalar(self): assert np.reciprocal(4. * u.m) == 0.25 / u.m def test_reciprocal_array(self): assert np.all(np.reciprocal(np.array([1., 2., 4.]) * u.m) == np.array([1., 0.5, 0.25]) / u.m) def test_heaviside_scalar(self): assert np.heaviside(0. * u.m, 0.5) == 0.5 * u.dimensionless_unscaled assert np.heaviside(0. * u.s, 25 * u.percent) == 0.25 * u.dimensionless_unscaled assert np.heaviside(2. * u.J, 0.25) == 1. * u.dimensionless_unscaled def test_heaviside_array(self): values = np.array([-1., 0., 0., +1.]) halfway = np.array([0.75, 0.25, 0.75, 0.25]) * u.dimensionless_unscaled assert np.all(np.heaviside(values * u.m, halfway * u.dimensionless_unscaled) == [0, 0.25, 0.75, +1.] * u.dimensionless_unscaled) @pytest.mark.parametrize('function', (np.cbrt, )) def test_cbrt_scalar(self, function): assert function(8. * u.m**3) == 2. * u.m @pytest.mark.parametrize('function', (np.cbrt, )) def test_cbrt_array(self, function): # Calculate cbrt on both sides since on Windows the cube root of 64 # does not exactly equal 4. See 4388. values = np.array([1., 8., 64.]) assert np.all(function(values * u.m**3) == function(values) * u.m) def test_power_scalar(self): assert np.power(4. * u.m, 2.) == 16. * u.m ** 2 assert np.power(4., 200. * u.cm / u.m) == \ u.Quantity(16., u.dimensionless_unscaled) # regression check on #1696 assert np.power(4. * u.m, 0.) == 1. * u.dimensionless_unscaled def test_power_array(self): assert np.all(np.power(np.array([1., 2., 3.]) * u.m, 3.) == np.array([1., 8., 27.]) * u.m ** 3) # regression check on #1696 assert np.all(np.power(np.arange(4.) * u.m, 0.) == 1. * u.dimensionless_unscaled) # float_power only introduced in numpy 1.12 @pytest.mark.skipif("not hasattr(np, 'float_power')") def test_float_power_array(self): assert np.all(np.float_power(np.array([1., 2., 3.]) * u.m, 3.) == np.array([1., 8., 27.]) * u.m ** 3) # regression check on #1696 assert np.all(np.float_power(np.arange(4.) * u.m, 0.) == 1. * u.dimensionless_unscaled) @raises(ValueError) def test_power_array_array(self): np.power(4. * u.m, [2., 4.]) @raises(ValueError) def test_power_array_array2(self): np.power([2., 4.] * u.m, [2., 4.]) def test_power_array_array3(self): # Identical unit fractions are converted automatically to dimensionless # and should be allowed as base for np.power: #4764 q = [2., 4.] * u.m / u.m powers = [2., 4.] res = np.power(q, powers) assert np.all(res.value == q.value ** powers) assert res.unit == u.dimensionless_unscaled # The same holds for unit fractions that are scaled dimensionless. q2 = [2., 4.] * u.m / u.cm # Test also against different types of exponent for cls in (list, tuple, np.array, np.ma.array, u.Quantity): res2 = np.power(q2, cls(powers)) assert np.all(res2.value == q2.to_value(1) ** powers) assert res2.unit == u.dimensionless_unscaled # Though for single powers, we keep the composite unit. res3 = q2 ** 2 assert np.all(res3.value == q2.value ** 2) assert res3.unit == q2.unit ** 2 assert np.all(res3 == q2 ** [2, 2]) def test_power_invalid(self): with pytest.raises(TypeError) as exc: np.power(3., 4. * u.m) assert "raise something to a dimensionless" in exc.value.args[0] def test_copysign_scalar(self): assert np.copysign(3 * u.m, 1.) == 3. * u.m assert np.copysign(3 * u.m, 1. * u.s) == 3. * u.m assert np.copysign(3 * u.m, -1.) == -3. * u.m assert np.copysign(3 * u.m, -1. * u.s) == -3. * u.m def test_copysign_array(self): assert np.all(np.copysign(np.array([1., 2., 3.]) * u.s, -1.) == -np.array([1., 2., 3.]) * u.s) assert np.all(np.copysign(np.array([1., 2., 3.]) * u.s, -1. * u.m) == -np.array([1., 2., 3.]) * u.s) assert np.all(np.copysign(np.array([1., 2., 3.]) * u.s, np.array([-2., 2., -4.]) * u.m) == np.array([-1., 2., -3.]) * u.s) q = np.copysign(np.array([1., 2., 3.]), -3 * u.m) assert np.all(q == np.array([-1., -2., -3.])) assert not isinstance(q, u.Quantity) def test_ldexp_scalar(self): assert np.ldexp(4. * u.m, 2) == 16. * u.m def test_ldexp_array(self): assert np.all(np.ldexp(np.array([1., 2., 3.]) * u.m, [3, 2, 1]) == np.array([8., 8., 6.]) * u.m) def test_ldexp_invalid(self): with pytest.raises(TypeError): np.ldexp(3. * u.m, 4.) with pytest.raises(TypeError): np.ldexp(3., u.Quantity(4, u.m, dtype=int)) @pytest.mark.parametrize('function', (np.exp, np.expm1, np.exp2, np.log, np.log2, np.log10, np.log1p)) def test_exp_scalar(self, function): q = function(3. * u.m / (6. * u.m)) assert q.unit == u.dimensionless_unscaled assert q.value == function(0.5) @pytest.mark.parametrize('function', (np.exp, np.expm1, np.exp2, np.log, np.log2, np.log10, np.log1p)) def test_exp_array(self, function): q = function(np.array([2., 3., 6.]) * u.m / (6. * u.m)) assert q.unit == u.dimensionless_unscaled assert np.all(q.value == function(np.array([1. / 3., 1. / 2., 1.]))) # should also work on quantities that can be made dimensionless q2 = function(np.array([2., 3., 6.]) * u.m / (6. * u.cm)) assert q2.unit == u.dimensionless_unscaled assert_allclose(q2.value, function(np.array([100. / 3., 100. / 2., 100.]))) @pytest.mark.parametrize('function', (np.exp, np.expm1, np.exp2, np.log, np.log2, np.log10, np.log1p)) def test_exp_invalid_units(self, function): # Can't use exp() with non-dimensionless quantities with pytest.raises(TypeError) as exc: function(3. * u.m / u.s) assert exc.value.args[0] == ("Can only apply '{0}' function to " "dimensionless quantities" .format(function.__name__)) def test_modf_scalar(self): q = np.modf(9. * u.m / (600. * u.cm)) assert q == (0.5 * u.dimensionless_unscaled, 1. * u.dimensionless_unscaled) def test_modf_array(self): v = np.arange(10.) * u.m / (500. * u.cm) q = np.modf(v) n = np.modf(v.to_value(u.dimensionless_unscaled)) assert q[0].unit == u.dimensionless_unscaled assert q[1].unit == u.dimensionless_unscaled assert all(q[0].value == n[0]) assert all(q[1].value == n[1]) def test_frexp_scalar(self): q = np.frexp(3. * u.m / (6. * u.m)) assert q == (np.array(0.5), np.array(0.0)) def test_frexp_array(self): q = np.frexp(np.array([2., 3., 6.]) * u.m / (6. * u.m)) assert all((_q0, _q1) == np.frexp(_d) for _q0, _q1, _d in zip(q[0], q[1], [1. / 3., 1. / 2., 1.])) def test_frexp_invalid_units(self): # Can't use prod() with non-dimensionless quantities with pytest.raises(TypeError) as exc: np.frexp(3. * u.m / u.s) assert exc.value.args[0] == ("Can only apply 'frexp' function to " "unscaled dimensionless quantities") # also does not work on quantities that can be made dimensionless with pytest.raises(TypeError) as exc: np.frexp(np.array([2., 3., 6.]) * u.m / (6. * u.cm)) assert exc.value.args[0] == ("Can only apply 'frexp' function to " "unscaled dimensionless quantities") @pytest.mark.parametrize('function', (np.logaddexp, np.logaddexp2)) def test_dimensionless_twoarg_array(self, function): q = function(np.array([2., 3., 6.]) * u.m / (6. * u.cm), 1.) assert q.unit == u.dimensionless_unscaled assert_allclose(q.value, function(np.array([100. / 3., 100. / 2., 100.]), 1.)) @pytest.mark.parametrize('function', (np.logaddexp, np.logaddexp2)) def test_dimensionless_twoarg_invalid_units(self, function): with pytest.raises(TypeError) as exc: function(1. * u.km / u.s, 3. * u.m / u.s) assert exc.value.args[0] == ("Can only apply '{0}' function to " "dimensionless quantities" .format(function.__name__)) class TestInvariantUfuncs: @pytest.mark.parametrize(('ufunc'), [np.absolute, np.fabs, np.conj, np.conjugate, np.negative, np.spacing, np.rint, np.floor, np.ceil, np.positive]) def test_invariant_scalar(self, ufunc): q_i = 4.7 * u.m q_o = ufunc(q_i) assert isinstance(q_o, u.Quantity) assert q_o.unit == q_i.unit assert q_o.value == ufunc(q_i.value) @pytest.mark.parametrize(('ufunc'), [np.absolute, np.conjugate, np.negative, np.rint, np.floor, np.ceil]) def test_invariant_array(self, ufunc): q_i = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s q_o = ufunc(q_i) assert isinstance(q_o, u.Quantity) assert q_o.unit == q_i.unit assert np.all(q_o.value == ufunc(q_i.value)) @pytest.mark.parametrize(('ufunc'), [np.add, np.subtract, np.hypot, np.maximum, np.minimum, np.nextafter, np.remainder, np.mod, np.fmod]) def test_invariant_twoarg_scalar(self, ufunc): q_i1 = 4.7 * u.m q_i2 = 9.4 * u.km q_o = ufunc(q_i1, q_i2) assert isinstance(q_o, u.Quantity) assert q_o.unit == q_i1.unit assert_allclose(q_o.value, ufunc(q_i1.value, q_i2.to_value(q_i1.unit))) @pytest.mark.parametrize(('ufunc'), [np.add, np.subtract, np.hypot, np.maximum, np.minimum, np.nextafter, np.remainder, np.mod, np.fmod]) def test_invariant_twoarg_array(self, ufunc): q_i1 = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s q_i2 = np.array([10., -5., 1.e6]) * u.g / u.us q_o = ufunc(q_i1, q_i2) assert isinstance(q_o, u.Quantity) assert q_o.unit == q_i1.unit assert_allclose(q_o.value, ufunc(q_i1.value, q_i2.to_value(q_i1.unit))) @pytest.mark.parametrize(('ufunc'), [np.add, np.subtract, np.hypot, np.maximum, np.minimum, np.nextafter, np.remainder, np.mod, np.fmod]) def test_invariant_twoarg_one_arbitrary(self, ufunc): q_i1 = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s arbitrary_unit_value = np.array([0.]) q_o = ufunc(q_i1, arbitrary_unit_value) assert isinstance(q_o, u.Quantity) assert q_o.unit == q_i1.unit assert_allclose(q_o.value, ufunc(q_i1.value, arbitrary_unit_value)) @pytest.mark.parametrize(('ufunc'), [np.add, np.subtract, np.hypot, np.maximum, np.minimum, np.nextafter, np.remainder, np.mod, np.fmod]) def test_invariant_twoarg_invalid_units(self, ufunc): q_i1 = 4.7 * u.m q_i2 = 9.4 * u.s with pytest.raises(u.UnitsError) as exc: ufunc(q_i1, q_i2) assert "compatible dimensions" in exc.value.args[0] class TestComparisonUfuncs: @pytest.mark.parametrize(('ufunc'), [np.greater, np.greater_equal, np.less, np.less_equal, np.not_equal, np.equal]) def test_comparison_valid_units(self, ufunc): q_i1 = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s q_i2 = np.array([10., -5., 1.e6]) * u.g / u.Ms q_o = ufunc(q_i1, q_i2) assert not isinstance(q_o, u.Quantity) assert q_o.dtype == bool assert np.all(q_o == ufunc(q_i1.value, q_i2.to_value(q_i1.unit))) q_o2 = ufunc(q_i1 / q_i2, 2.) assert not isinstance(q_o2, u.Quantity) assert q_o2.dtype == bool assert np.all(q_o2 == ufunc((q_i1 / q_i2) .to_value(u.dimensionless_unscaled), 2.)) # comparison with 0., inf, nan is OK even for dimensional quantities for arbitrary_unit_value in (0., np.inf, np.nan): ufunc(q_i1, arbitrary_unit_value) ufunc(q_i1, arbitrary_unit_value*np.ones(len(q_i1))) # and just for completeness ufunc(q_i1, np.array([0., np.inf, np.nan])) @pytest.mark.parametrize(('ufunc'), [np.greater, np.greater_equal, np.less, np.less_equal, np.not_equal, np.equal]) def test_comparison_invalid_units(self, ufunc): q_i1 = 4.7 * u.m q_i2 = 9.4 * u.s with pytest.raises(u.UnitsError) as exc: ufunc(q_i1, q_i2) assert "compatible dimensions" in exc.value.args[0] class TestInplaceUfuncs: @pytest.mark.parametrize(('value'), [1., np.arange(10.)]) def test_one_argument_ufunc_inplace(self, value): # without scaling s = value * u.rad check = s np.sin(s, out=s) assert check is s assert check.unit == u.dimensionless_unscaled # with scaling s2 = (value * u.rad).to(u.deg) check2 = s2 np.sin(s2, out=s2) assert check2 is s2 assert check2.unit == u.dimensionless_unscaled assert_allclose(s.value, s2.value) @pytest.mark.parametrize(('value'), [1., np.arange(10.)]) def test_one_argument_ufunc_inplace_2(self, value): """Check inplace works with non-quantity input and quantity output""" s = value * u.m check = s np.absolute(value, out=s) assert check is s assert np.all(check.value == np.absolute(value)) assert check.unit is u.dimensionless_unscaled np.sqrt(value, out=s) assert check is s assert np.all(check.value == np.sqrt(value)) assert check.unit is u.dimensionless_unscaled np.exp(value, out=s) assert check is s assert np.all(check.value == np.exp(value)) assert check.unit is u.dimensionless_unscaled np.arcsin(value/10., out=s) assert check is s assert np.all(check.value == np.arcsin(value/10.)) assert check.unit is u.radian @pytest.mark.parametrize(('value'), [1., np.arange(10.)]) def test_one_argument_two_output_ufunc_inplace(self, value): v = 100. * value * u.cm / u.m v_copy = v.copy() tmp = v.copy() check = v np.modf(v, tmp, v) assert check is v assert check.unit == u.dimensionless_unscaled v2 = v_copy.to(u.dimensionless_unscaled) check2 = v2 np.modf(v2, tmp, v2) assert check2 is v2 assert check2.unit == u.dimensionless_unscaled # can also replace in last position if no scaling is needed v3 = v_copy.to(u.dimensionless_unscaled) check3 = v3 np.modf(v3, v3, tmp) assert check3 is v3 assert check3.unit == u.dimensionless_unscaled # And now, with numpy >= 1.13, one can also replace input with # first output when scaling v4 = v_copy.copy() check4 = v4 np.modf(v4, v4, tmp) assert check4 is v4 assert check4.unit == u.dimensionless_unscaled @pytest.mark.parametrize(('value'), [1., np.arange(10.)]) def test_two_argument_ufunc_inplace_1(self, value): s = value * u.cycle check = s s /= 2. assert check is s assert np.all(check.value == value / 2.) s /= u.s assert check is s assert check.unit == u.cycle / u.s s *= 2. * u.s assert check is s assert np.all(check == value * u.cycle) @pytest.mark.parametrize(('value'), [1., np.arange(10.)]) def test_two_argument_ufunc_inplace_2(self, value): s = value * u.cycle check = s np.arctan2(s, s, out=s) assert check is s assert check.unit == u.radian with pytest.raises(u.UnitsError): s += 1. * u.m assert check is s assert check.unit == u.radian np.arctan2(1. * u.deg, s, out=s) assert check is s assert check.unit == u.radian np.add(1. * u.deg, s, out=s) assert check is s assert check.unit == u.deg np.multiply(2. / u.s, s, out=s) assert check is s assert check.unit == u.deg / u.s def test_two_argument_ufunc_inplace_3(self): s = np.array([1., 2., 3.]) * u.dimensionless_unscaled np.add(np.array([1., 2., 3.]), np.array([1., 2., 3.]) * 2., out=s) assert np.all(s.value == np.array([3., 6., 9.])) assert s.unit is u.dimensionless_unscaled np.arctan2(np.array([1., 2., 3.]), np.array([1., 2., 3.]) * 2., out=s) assert_allclose(s.value, np.arctan2(1., 2.)) assert s.unit is u.radian @pytest.mark.parametrize(('value'), [1., np.arange(10.)]) def test_two_argument_two_output_ufunc_inplace(self, value): v = value * u.m divisor = 70.*u.cm v1 = v.copy() tmp = v.copy() check = np.divmod(v1, divisor, out=(tmp, v1)) assert check[0] is tmp and check[1] is v1 assert tmp.unit == u.dimensionless_unscaled assert v1.unit == v.unit v2 = v.copy() check2 = np.divmod(v2, divisor, out=(v2, tmp)) assert check2[0] is v2 and check2[1] is tmp assert v2.unit == u.dimensionless_unscaled assert tmp.unit == v.unit v3a = v.copy() v3b = v.copy() check3 = np.divmod(v3a, divisor, out=(v3a, v3b)) assert check3[0] is v3a and check3[1] is v3b assert v3a.unit == u.dimensionless_unscaled assert v3b.unit == v.unit def test_ufunc_inplace_non_contiguous_data(self): # ensure inplace works also for non-contiguous data (closes #1834) s = np.arange(10.) * u.m s_copy = s.copy() s2 = s[::2] s2 += 1. * u.cm assert np.all(s[::2] > s_copy[::2]) assert np.all(s[1::2] == s_copy[1::2]) def test_ufunc_inplace_non_standard_dtype(self): """Check that inplace operations check properly for casting. First two tests that check that float32 is kept close #3976. """ a1 = u.Quantity([1, 2, 3, 4], u.m, dtype=np.float32) a1 *= np.float32(10) assert a1.unit is u.m assert a1.dtype == np.float32 a2 = u.Quantity([1, 2, 3, 4], u.m, dtype=np.float32) a2 += (20.*u.km) assert a2.unit is u.m assert a2.dtype == np.float32 # For integer, in-place only works if no conversion is done. a3 = u.Quantity([1, 2, 3, 4], u.m, dtype=np.int32) a3 += u.Quantity(10, u.m, dtype=np.int64) assert a3.unit is u.m assert a3.dtype == np.int32 a4 = u.Quantity([1, 2, 3, 4], u.m, dtype=np.int32) with pytest.raises(TypeError): a4 += u.Quantity(10, u.mm, dtype=np.int64) class TestUfuncAt: """Test that 'at' method for ufuncs (calculates in-place at given indices) For Quantities, since calculations are in-place, it makes sense only if the result is still a quantity, and if the unit does not have to change """ def test_one_argument_ufunc_at(self): q = np.arange(10.) * u.m i = np.array([1, 2]) qv = q.value.copy() np.negative.at(q, i) np.negative.at(qv, i) assert np.all(q.value == qv) assert q.unit is u.m # cannot change from quantity to bool array with pytest.raises(TypeError): np.isfinite.at(q, i) # for selective in-place, cannot change the unit with pytest.raises(u.UnitsError): np.square.at(q, i) # except if the unit does not change (i.e., dimensionless) d = np.arange(10.) * u.dimensionless_unscaled dv = d.value.copy() np.square.at(d, i) np.square.at(dv, i) assert np.all(d.value == dv) assert d.unit is u.dimensionless_unscaled d = np.arange(10.) * u.dimensionless_unscaled dv = d.value.copy() np.log.at(d, i) np.log.at(dv, i) assert np.all(d.value == dv) assert d.unit is u.dimensionless_unscaled # also for sine it doesn't work, even if given an angle a = np.arange(10.) * u.radian with pytest.raises(u.UnitsError): np.sin.at(a, i) # except, for consistency, if we have made radian equivalent to # dimensionless (though hopefully it will never be needed) av = a.value.copy() with u.add_enabled_equivalencies(u.dimensionless_angles()): np.sin.at(a, i) np.sin.at(av, i) assert_allclose(a.value, av) # but we won't do double conversion ad = np.arange(10.) * u.degree with pytest.raises(u.UnitsError): np.sin.at(ad, i) def test_two_argument_ufunc_at(self): s = np.arange(10.) * u.m i = np.array([1, 2]) check = s.value.copy() np.add.at(s, i, 1.*u.km) np.add.at(check, i, 1000.) assert np.all(s.value == check) assert s.unit is u.m with pytest.raises(u.UnitsError): np.add.at(s, i, 1.*u.s) # also raise UnitsError if unit would have to be changed with pytest.raises(u.UnitsError): np.multiply.at(s, i, 1*u.s) # but be fine if it does not s = np.arange(10.) * u.m check = s.value.copy() np.multiply.at(s, i, 2.*u.dimensionless_unscaled) np.multiply.at(check, i, 2) assert np.all(s.value == check) s = np.arange(10.) * u.m np.multiply.at(s, i, 2.) assert np.all(s.value == check) # of course cannot change class of data either with pytest.raises(TypeError): np.greater.at(s, i, 1.*u.km) class TestUfuncReduceReduceatAccumulate: """Test 'reduce', 'reduceat' and 'accumulate' methods for ufuncs For Quantities, it makes sense only if the unit does not have to change """ def test_one_argument_ufunc_reduce_accumulate(self): # one argument cannot be used s = np.arange(10.) * u.radian i = np.array([0, 5, 1, 6]) with pytest.raises(ValueError): np.sin.reduce(s) with pytest.raises(ValueError): np.sin.accumulate(s) with pytest.raises(ValueError): np.sin.reduceat(s, i) def test_two_argument_ufunc_reduce_accumulate(self): s = np.arange(10.) * u.m i = np.array([0, 5, 1, 6]) check = s.value.copy() s_add_reduce = np.add.reduce(s) check_add_reduce = np.add.reduce(check) assert s_add_reduce.value == check_add_reduce assert s_add_reduce.unit is u.m s_add_accumulate = np.add.accumulate(s) check_add_accumulate = np.add.accumulate(check) assert np.all(s_add_accumulate.value == check_add_accumulate) assert s_add_accumulate.unit is u.m s_add_reduceat = np.add.reduceat(s, i) check_add_reduceat = np.add.reduceat(check, i) assert np.all(s_add_reduceat.value == check_add_reduceat) assert s_add_reduceat.unit is u.m # reduce(at) or accumulate on comparisons makes no sense, # as intermediate result is not even a Quantity with pytest.raises(TypeError): np.greater.reduce(s) with pytest.raises(TypeError): np.greater.accumulate(s) with pytest.raises(TypeError): np.greater.reduceat(s, i) # raise UnitsError if unit would have to be changed with pytest.raises(u.UnitsError): np.multiply.reduce(s) with pytest.raises(u.UnitsError): np.multiply.accumulate(s) with pytest.raises(u.UnitsError): np.multiply.reduceat(s, i) # but be fine if it does not s = np.arange(10.) * u.dimensionless_unscaled check = s.value.copy() s_multiply_reduce = np.multiply.reduce(s) check_multiply_reduce = np.multiply.reduce(check) assert s_multiply_reduce.value == check_multiply_reduce assert s_multiply_reduce.unit is u.dimensionless_unscaled s_multiply_accumulate = np.multiply.accumulate(s) check_multiply_accumulate = np.multiply.accumulate(check) assert np.all(s_multiply_accumulate.value == check_multiply_accumulate) assert s_multiply_accumulate.unit is u.dimensionless_unscaled s_multiply_reduceat = np.multiply.reduceat(s, i) check_multiply_reduceat = np.multiply.reduceat(check, i) assert np.all(s_multiply_reduceat.value == check_multiply_reduceat) assert s_multiply_reduceat.unit is u.dimensionless_unscaled class TestUfuncOuter: """Test 'outer' methods for ufuncs Just a few spot checks, since it uses the same code as the regular ufunc call """ def test_one_argument_ufunc_outer(self): # one argument cannot be used s = np.arange(10.) * u.radian with pytest.raises(ValueError): np.sin.outer(s) def test_two_argument_ufunc_outer(self): s1 = np.arange(10.) * u.m s2 = np.arange(2.) * u.s check1 = s1.value check2 = s2.value s12_multiply_outer = np.multiply.outer(s1, s2) check12_multiply_outer = np.multiply.outer(check1, check2) assert np.all(s12_multiply_outer.value == check12_multiply_outer) assert s12_multiply_outer.unit == s1.unit * s2.unit # raise UnitsError if appropriate with pytest.raises(u.UnitsError): np.add.outer(s1, s2) # but be fine if it does not s3 = np.arange(2.) * s1.unit check3 = s3.value s13_add_outer = np.add.outer(s1, s3) check13_add_outer = np.add.outer(check1, check3) assert np.all(s13_add_outer.value == check13_add_outer) assert s13_add_outer.unit is s1.unit s13_greater_outer = np.greater.outer(s1, s3) check13_greater_outer = np.greater.outer(check1, check3) assert type(s13_greater_outer) is np.ndarray assert np.all(s13_greater_outer == check13_greater_outer) if HAS_SCIPY: from scipy import special as sps def test_scipy_registration(): """Check that scipy gets loaded upon first use.""" assert sps.erf not in qh.UFUNC_HELPERS sps.erf(1. * u.percent) assert sps.erf in qh.UFUNC_HELPERS class TestScipySpecialUfuncs: erf_like_ufuncs = ( sps.erf, sps.gamma, sps.loggamma, sps.gammasgn, sps.psi, sps.rgamma, sps.erfc, sps.erfcx, sps.erfi, sps.wofz, sps.dawsn, sps.entr, sps.exprel, sps.expm1, sps.log1p, sps.exp2, sps.exp10) @pytest.mark.parametrize('function', erf_like_ufuncs) def test_erf_scalar(self, function): TestQuantityMathFuncs.test_exp_scalar(None, function) @pytest.mark.parametrize('function', erf_like_ufuncs) def test_erf_array(self, function): TestQuantityMathFuncs.test_exp_array(None, function) @pytest.mark.parametrize('function', erf_like_ufuncs) def test_erf_invalid_units(self, function): TestQuantityMathFuncs.test_exp_invalid_units(None, function) @pytest.mark.parametrize('function', (sps.cbrt, )) def test_cbrt_scalar(self, function): TestQuantityMathFuncs.test_cbrt_scalar(None, function) @pytest.mark.parametrize('function', (sps.cbrt, )) def test_cbrt_array(self, function): TestQuantityMathFuncs.test_cbrt_array(None, function) @pytest.mark.parametrize('function', (sps.radian, )) def test_radian(self, function): q1 = function(180. * u.degree, 0. * u.arcmin, 0. * u.arcsec) assert_allclose(q1.value, np.pi) assert q1.unit == u.radian q2 = function(0. * u.degree, 30. * u.arcmin, 0. * u.arcsec) assert_allclose(q2.value, (30. * u.arcmin).to(u.radian).value) assert q2.unit == u.radian q3 = function(0. * u.degree, 0. * u.arcmin, 30. * u.arcsec) assert_allclose(q3.value, (30. * u.arcsec).to(u.radian).value) # the following doesn't make much sense in terms of the name of the # routine, but we check it gives the correct result. q4 = function(3. * u.radian, 0. * u.arcmin, 0. * u.arcsec) assert_allclose(q4.value, 3.) assert q4.unit == u.radian with pytest.raises(TypeError): function(3. * u.m, 2. * u.s, 1. * u.kg) jv_like_ufuncs = ( sps.jv, sps.jn, sps.jve, sps.yn, sps.yv, sps.yve, sps.kn, sps.kv, sps.kve, sps.iv, sps.ive, sps.hankel1, sps.hankel1e, sps.hankel2, sps.hankel2e) @pytest.mark.parametrize('function', jv_like_ufuncs) def test_jv_scalar(self, function): q = function(2. * u.m / (2. * u.m), 3. * u.m / (6. * u.m)) assert q.unit == u.dimensionless_unscaled assert q.value == function(1.0, 0.5) @pytest.mark.parametrize('function', jv_like_ufuncs) def test_jv_array(self, function): q = function(np.ones(3) * u.m / (1. * u.m), np.array([2., 3., 6.]) * u.m / (6. * u.m)) assert q.unit == u.dimensionless_unscaled assert np.all(q.value == function( np.ones(3), np.array([1. / 3., 1. / 2., 1.])) ) # should also work on quantities that can be made dimensionless q2 = function(np.ones(3) * u.m / (1. * u.m), np.array([2., 3., 6.]) * u.m / (6. * u.cm)) assert q2.unit == u.dimensionless_unscaled assert_allclose(q2.value, function(np.ones(3), np.array([100. / 3., 100. / 2., 100.]))) @pytest.mark.parametrize('function', jv_like_ufuncs) def test_jv_invalid_units(self, function): # Can't use jv() with non-dimensionless quantities with pytest.raises(TypeError) as exc: function(1. * u.kg, 3. * u.m / u.s) assert exc.value.args[0] == ("Can only apply '{0}' function to " "dimensionless quantities" .format(function.__name__))
5a7946e68481b9dc481bde5247e3c677debf3cb917f431e5c75591c0c1c8c32d
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst import pytest from astropy import units as u # list of pairs (target unit/physical type, input unit) x_inputs = [(u.arcsec, u.deg), ('angle', u.deg), (u.kpc/u.Myr, u.km/u.s), ('speed', u.km/u.s), ([u.arcsec, u.km], u.deg), ([u.arcsec, u.km], u.km), # multiple allowed (['angle', 'length'], u.deg), (['angle', 'length'], u.km)] y_inputs = [(u.arcsec, u.deg), ('angle', u.deg), (u.kpc/u.Myr, u.km/u.s), ('speed', u.km/u.s)] @pytest.fixture(scope="module", params=list(range(len(x_inputs)))) def x_input(request): return x_inputs[request.param] @pytest.fixture(scope="module", params=list(range(len(y_inputs)))) def y_input(request): return y_inputs[request.param] # ---- Tests that use the fixtures defined above ---- def test_args(x_input, y_input): x_target, x_unit = x_input y_target, y_unit = y_input @u.quantity_input(x=x_target, y=y_target) def myfunc_args(x, y): return x, y x, y = myfunc_args(1*x_unit, 1*y_unit) assert isinstance(x, u.Quantity) assert isinstance(y, u.Quantity) assert x.unit == x_unit assert y.unit == y_unit def test_args_nonquantity(x_input): x_target, x_unit = x_input @u.quantity_input(x=x_target) def myfunc_args(x, y): return x, y x, y = myfunc_args(1*x_unit, 100) assert isinstance(x, u.Quantity) assert isinstance(y, int) assert x.unit == x_unit def test_wrong_unit(x_input, y_input): x_target, x_unit = x_input y_target, y_unit = y_input @u.quantity_input(x=x_target, y=y_target) def myfunc_args(x, y): return x, y with pytest.raises(u.UnitsError) as e: x, y = myfunc_args(1*x_unit, 100*u.Joule) # has to be an unspecified unit str_to = str(y_target) assert str(e.value) == "Argument 'y' to function 'myfunc_args' must be in units convertible to '{0}'.".format(str_to) def test_not_quantity(x_input, y_input): x_target, x_unit = x_input y_target, y_unit = y_input @u.quantity_input(x=x_target, y=y_target) def myfunc_args(x, y): return x, y with pytest.raises(TypeError) as e: x, y = myfunc_args(1*x_unit, 100) assert str(e.value) == "Argument 'y' to function 'myfunc_args' has no 'unit' attribute. You may want to pass in an astropy Quantity instead." def test_kwargs(x_input, y_input): x_target, x_unit = x_input y_target, y_unit = y_input @u.quantity_input(x=x_target, y=y_target) def myfunc_args(x, my_arg, y=1*y_unit): return x, my_arg, y x, my_arg, y = myfunc_args(1*x_unit, 100, y=100*y_unit) assert isinstance(x, u.Quantity) assert isinstance(my_arg, int) assert isinstance(y, u.Quantity) assert y.unit == y_unit def test_unused_kwargs(x_input, y_input): x_target, x_unit = x_input y_target, y_unit = y_input @u.quantity_input(x=x_target, y=y_target) def myfunc_args(x, my_arg1, y=y_unit, my_arg2=1000): return x, my_arg1, y, my_arg2 x, my_arg1, y, my_arg2 = myfunc_args(1*x_unit, 100, y=100*y_unit, my_arg2=10) assert isinstance(x, u.Quantity) assert isinstance(my_arg1, int) assert isinstance(y, u.Quantity) assert isinstance(my_arg2, int) assert y.unit == y_unit assert my_arg2 == 10 def test_kwarg_wrong_unit(x_input, y_input): x_target, x_unit = x_input y_target, y_unit = y_input @u.quantity_input(x=x_target, y=y_target) def myfunc_args(x, y=10*y_unit): return x, y with pytest.raises(u.UnitsError) as e: x, y = myfunc_args(1*x_unit, y=100*u.Joule) str_to = str(y_target) assert str(e.value) == "Argument 'y' to function 'myfunc_args' must be in units convertible to '{0}'.".format(str_to) def test_kwarg_not_quantity(x_input, y_input): x_target, x_unit = x_input y_target, y_unit = y_input @u.quantity_input(x=x_target, y=y_target) def myfunc_args(x, y=10*y_unit): return x, y with pytest.raises(TypeError) as e: x, y = myfunc_args(1*x_unit, y=100) assert str(e.value) == "Argument 'y' to function 'myfunc_args' has no 'unit' attribute. You may want to pass in an astropy Quantity instead." def test_kwarg_default(x_input, y_input): x_target, x_unit = x_input y_target, y_unit = y_input @u.quantity_input(x=x_target, y=y_target) def myfunc_args(x, y=10*y_unit): return x, y x, y = myfunc_args(1*x_unit) assert isinstance(x, u.Quantity) assert isinstance(y, u.Quantity) assert x.unit == x_unit assert y.unit == y_unit def test_kwargs_input(x_input, y_input): x_target, x_unit = x_input y_target, y_unit = y_input @u.quantity_input(x=x_target, y=y_target) def myfunc_args(x=1*x_unit, y=1*y_unit): return x, y kwargs = {'x': 10*x_unit, 'y': 10*y_unit} x, y = myfunc_args(**kwargs) assert isinstance(x, u.Quantity) assert isinstance(y, u.Quantity) assert x.unit == x_unit assert y.unit == y_unit def test_kwargs_extra(x_input): x_target, x_unit = x_input @u.quantity_input(x=x_target) def myfunc_args(x, **kwargs): return x x = myfunc_args(1*x_unit) assert isinstance(x, u.Quantity) assert x.unit == x_unit # ---- Tests that don't used the fixtures ---- @pytest.mark.parametrize("x_unit,y_unit", [ (u.arcsec, u.eV), ('angle', 'energy')]) def test_arg_equivalencies(x_unit, y_unit): @u.quantity_input(x=x_unit, y=y_unit, equivalencies=u.mass_energy()) def myfunc_args(x, y): return x, y+(10*u.J) # Add an energy to check equiv is working x, y = myfunc_args(1*u.arcsec, 100*u.gram) assert isinstance(x, u.Quantity) assert isinstance(y, u.Quantity) assert x.unit == u.arcsec assert y.unit == u.gram @pytest.mark.parametrize("x_unit,energy_unit", [ (u.arcsec, u.eV), ('angle', 'energy')]) def test_kwarg_equivalencies(x_unit, energy_unit): @u.quantity_input(x=x_unit, energy=energy_unit, equivalencies=u.mass_energy()) def myfunc_args(x, energy=10*u.eV): return x, energy+(10*u.J) # Add an energy to check equiv is working x, energy = myfunc_args(1*u.arcsec, 100*u.gram) assert isinstance(x, u.Quantity) assert isinstance(energy, u.Quantity) assert x.unit == u.arcsec assert energy.unit == u.gram def test_no_equivalent(): class test_unit: pass class test_quantity: unit = test_unit() @u.quantity_input(x=u.arcsec) def myfunc_args(x): return x with pytest.raises(TypeError) as e: x, y = myfunc_args(test_quantity()) assert str(e.value) == "Argument 'x' to function 'myfunc_args' has a 'unit' attribute without an 'is_equivalent' method. You may want to pass in an astropy Quantity instead." def test_kwarg_invalid_physical_type(): @u.quantity_input(x='angle', y='africanswallow') def myfunc_args(x, y=10*u.deg): return x, y with pytest.raises(ValueError) as e: x, y = myfunc_args(1*u.arcsec, y=100*u.deg) assert str(e.value) == "Invalid unit or physical type 'africanswallow'." def test_default_value_check(): x_target = u.deg x_unit = u.arcsec with pytest.raises(TypeError): @u.quantity_input(x=x_target) def myfunc_args(x=1.): return x x = myfunc_args() x = myfunc_args(1*x_unit) assert isinstance(x, u.Quantity) assert x.unit == x_unit def test_args_None(): x_target = u.deg x_unit = u.arcsec y_target = u.km y_unit = u.kpc @u.quantity_input(x=[x_target, None], y=[None, y_target]) def myfunc_args(x, y): return x, y x, y = myfunc_args(1*x_unit, None) assert isinstance(x, u.Quantity) assert x.unit == x_unit assert y is None x, y = myfunc_args(None, 1*y_unit) assert isinstance(y, u.Quantity) assert y.unit == y_unit assert x is None def test_args_None_kwarg(): x_target = u.deg x_unit = u.arcsec y_target = u.km @u.quantity_input(x=x_target, y=y_target) def myfunc_args(x, y=None): return x, y x, y = myfunc_args(1*x_unit) assert isinstance(x, u.Quantity) assert x.unit == x_unit assert y is None x, y = myfunc_args(1*x_unit, None) assert isinstance(x, u.Quantity) assert x.unit == x_unit assert y is None with pytest.raises(TypeError): x, y = myfunc_args(None, None)
93fae62e10819cf7ed8b897591aeae6a0c662b4d1eb0ce8552fb51615bacb33a
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst import numpy as np from astropy.units import (dimensionless_unscaled, photometric, Unit, CompositeUnit, UnitsError, UnitTypeError, UnitConversionError) from .core import FunctionUnitBase, FunctionQuantity from .units import dex, dB, mag __all__ = ['LogUnit', 'MagUnit', 'DexUnit', 'DecibelUnit', 'LogQuantity', 'Magnitude', 'Decibel', 'Dex', 'STmag', 'ABmag', 'M_bol', 'm_bol'] class LogUnit(FunctionUnitBase): """Logarithmic unit containing a physical one Usually, logarithmic units are instantiated via specific subclasses such `MagUnit`, `DecibelUnit`, and `DexUnit`. Parameters ---------- physical_unit : `~astropy.units.Unit` or `string` Unit that is encapsulated within the logarithmic function unit. If not given, dimensionless. function_unit : `~astropy.units.Unit` or `string` By default, the same as the logarithmic unit set by the subclass. """ # the four essential overrides of FunctionUnitBase @property def _default_function_unit(self): return dex @property def _quantity_class(self): return LogQuantity def from_physical(self, x): """Transformation from value in physical to value in logarithmic units. Used in equivalency.""" return dex.to(self._function_unit, np.log10(x)) def to_physical(self, x): """Transformation from value in logarithmic to value in physical units. Used in equivalency.""" return 10 ** self._function_unit.to(dex, x) # ^^^^ the four essential overrides of FunctionUnitBase # add addition and subtraction, which imply multiplication/division of # the underlying physical units def _add_and_adjust_physical_unit(self, other, sign_self, sign_other): """Add/subtract LogUnit to/from another unit, and adjust physical unit. self and other are multiplied by sign_self and sign_other, resp. We wish to do: ±lu_1 + ±lu_2 -> lu_f (lu=logarithmic unit) and pu_1^(±1) * pu_2^(±1) -> pu_f (pu=physical unit) Raises ------ UnitsError If function units are not equivalent. """ # First, insist on compatible logarithmic type. Here, plain u.mag, # u.dex, and u.dB are OK, i.e., other does not have to be LogUnit # (this will indirectly test whether other is a unit at all). try: getattr(other, 'function_unit', other)._to(self._function_unit) except AttributeError: # if other is not a unit (i.e., does not have _to). return NotImplemented except UnitsError: raise UnitsError("Can only add/subtract logarithmic units of" "of compatible type.") other_physical_unit = getattr(other, 'physical_unit', dimensionless_unscaled) physical_unit = CompositeUnit( 1, [self._physical_unit, other_physical_unit], [sign_self, sign_other]) return self._copy(physical_unit) def __neg__(self): return self._copy(self.physical_unit**(-1)) def __add__(self, other): # Only know how to add to a logarithmic unit with compatible type, # be it a plain one (u.mag, etc.,) or another LogUnit return self._add_and_adjust_physical_unit(other, +1, +1) def __radd__(self, other): return self._add_and_adjust_physical_unit(other, +1, +1) def __sub__(self, other): return self._add_and_adjust_physical_unit(other, +1, -1) def __rsub__(self, other): # here, in normal usage other cannot be LogUnit; only equivalent one # would be u.mag,u.dB,u.dex. But might as well use common routine. return self._add_and_adjust_physical_unit(other, -1, +1) class MagUnit(LogUnit): """Logarithmic physical units expressed in magnitudes Parameters ---------- physical_unit : `~astropy.units.Unit` or `string` Unit that is encapsulated within the magnitude function unit. If not given, dimensionless. function_unit : `~astropy.units.Unit` or `string` By default, this is ``mag``, but this allows one to use an equivalent unit such as ``2 mag``. """ @property def _default_function_unit(self): return mag @property def _quantity_class(self): return Magnitude class DexUnit(LogUnit): """Logarithmic physical units expressed in magnitudes Parameters ---------- physical_unit : `~astropy.units.Unit` or `string` Unit that is encapsulated within the magnitude function unit. If not given, dimensionless. function_unit : `~astropy.units.Unit` or `string` By default, this is ``dex`, but this allows one to use an equivalent unit such as ``0.5 dex``. """ @property def _default_function_unit(self): return dex @property def _quantity_class(self): return Dex class DecibelUnit(LogUnit): """Logarithmic physical units expressed in dB Parameters ---------- physical_unit : `~astropy.units.Unit` or `string` Unit that is encapsulated within the decibel function unit. If not given, dimensionless. function_unit : `~astropy.units.Unit` or `string` By default, this is ``dB``, but this allows one to use an equivalent unit such as ``2 dB``. """ @property def _default_function_unit(self): return dB @property def _quantity_class(self): return Decibel class LogQuantity(FunctionQuantity): """A representation of a (scaled) logarithm of a number with a unit Parameters ---------- value : number, `~astropy.units.Quantity`, `~astropy.units.function.logarithmic.LogQuantity`, or sequence of convertible items. The numerical value of the logarithmic quantity. If a number or a `~astropy.units.Quantity` with a logarithmic unit, it will be converted to ``unit`` and the physical unit will be inferred from ``unit``. If a `~astropy.units.Quantity` with just a physical unit, it will converted to the logarithmic unit, after, if necessary, converting it to the physical unit inferred from ``unit``. unit : string, `~astropy.units.UnitBase` or `~astropy.units.function.FunctionUnitBase` instance, optional For an `~astropy.units.function.FunctionUnitBase` instance, the physical unit will be taken from it; for other input, it will be inferred from ``value``. By default, ``unit`` is set by the subclass. dtype : `~numpy.dtype`, optional The ``dtype`` of the resulting Numpy array or scalar that will hold the value. If not provided, is is determined automatically from the input value. copy : bool, optional If `True` (default), then the value is copied. Otherwise, a copy will only be made if ``__array__`` returns a copy, if value is a nested sequence, or if a copy is needed to satisfy an explicitly given ``dtype``. (The `False` option is intended mostly for internal use, to speed up initialization where a copy is known to have been made. Use with care.) Examples -------- Typically, use is made of an `~astropy.units.function.FunctionQuantity` subclass, as in:: >>> import astropy.units as u >>> u.Magnitude(-2.5) <Magnitude -2.5 mag> >>> u.Magnitude(10.*u.count/u.second) <Magnitude -2.5 mag(ct / s)> >>> u.Decibel(1.*u.W, u.DecibelUnit(u.mW)) # doctest: +FLOAT_CMP <Decibel 30. dB(mW)> """ # only override of FunctionQuantity _unit_class = LogUnit # additions that work just for logarithmic units def __add__(self, other): # Add function units, thus multiplying physical units. If no unit is # given, assume dimensionless_unscaled; this will give the appropriate # exception in LogUnit.__add__. new_unit = self.unit + getattr(other, 'unit', dimensionless_unscaled) # Add actual logarithmic values, rescaling, e.g., dB -> dex. result = self._function_view + getattr(other, '_function_view', other) return self._new_view(result, new_unit) def __radd__(self, other): return self.__add__(other) def __iadd__(self, other): new_unit = self.unit + getattr(other, 'unit', dimensionless_unscaled) # Do calculation in-place using _function_view of array. function_view = self._function_view function_view += getattr(other, '_function_view', other) self._set_unit(new_unit) return self def __sub__(self, other): # Subtract function units, thus dividing physical units. new_unit = self.unit - getattr(other, 'unit', dimensionless_unscaled) # Subtract actual logarithmic values, rescaling, e.g., dB -> dex. result = self._function_view - getattr(other, '_function_view', other) return self._new_view(result, new_unit) def __rsub__(self, other): new_unit = self.unit.__rsub__( getattr(other, 'unit', dimensionless_unscaled)) result = self._function_view.__rsub__( getattr(other, '_function_view', other)) # Ensure the result is in right function unit scale # (with rsub, this does not have to be one's own). result = result.to(new_unit.function_unit) return self._new_view(result, new_unit) def __isub__(self, other): new_unit = self.unit - getattr(other, 'unit', dimensionless_unscaled) # Do calculation in-place using _function_view of array. function_view = self._function_view function_view -= getattr(other, '_function_view', other) self._set_unit(new_unit) return self def __pow__(self, other): # We check if this power is OK by applying it first to the unit. try: other = float(other) except TypeError: return NotImplemented new_unit = self.unit ** other new_value = self.view(np.ndarray) ** other return self._new_view(new_value, new_unit) def __ilshift__(self, other): try: other = Unit(other) except UnitTypeError: return NotImplemented if not isinstance(other, self._unit_class): return NotImplemented try: factor = self.unit.physical_unit._to(other.physical_unit) except UnitConversionError: # Maybe via equivalencies? Now we do make a temporary copy. try: value = self._to_value(other) except UnitConversionError: return NotImplemented self.view(np.ndarray)[...] = value else: self.view(np.ndarray)[...] += self.unit.from_physical(factor) self._set_unit(other) return self # Could add __mul__ and __div__ and try interpreting other as a power, # but this seems just too error-prone. # Methods that do not work for function units generally but are OK for # logarithmic units as they imply differences and independence of # physical unit. def var(self, axis=None, dtype=None, out=None, ddof=0): return self._wrap_function(np.var, axis, dtype, out=out, ddof=ddof, unit=self.unit.function_unit**2) def std(self, axis=None, dtype=None, out=None, ddof=0): return self._wrap_function(np.std, axis, dtype, out=out, ddof=ddof, unit=self.unit._copy(dimensionless_unscaled)) def ptp(self, axis=None, out=None): return self._wrap_function(np.ptp, axis, out=out, unit=self.unit._copy(dimensionless_unscaled)) def diff(self, n=1, axis=-1): return self._wrap_function(np.diff, n, axis, unit=self.unit._copy(dimensionless_unscaled)) def ediff1d(self, to_end=None, to_begin=None): return self._wrap_function(np.ediff1d, to_end, to_begin, unit=self.unit._copy(dimensionless_unscaled)) _supported_functions = (FunctionQuantity._supported_functions | set(getattr(np, function) for function in ('var', 'std', 'ptp', 'diff', 'ediff1d'))) class Dex(LogQuantity): _unit_class = DexUnit class Decibel(LogQuantity): _unit_class = DecibelUnit class Magnitude(LogQuantity): _unit_class = MagUnit dex._function_unit_class = DexUnit dB._function_unit_class = DecibelUnit mag._function_unit_class = MagUnit STmag = MagUnit(photometric.STflux) STmag.__doc__ = "ST magnitude: STmag=-21.1 corresponds to 1 erg/s/cm2/A" ABmag = MagUnit(photometric.ABflux) ABmag.__doc__ = "AB magnitude: ABmag=-48.6 corresponds to 1 erg/s/cm2/Hz" M_bol = MagUnit(photometric.Bol) M_bol.__doc__ = ("Absolute bolometric magnitude: M_bol=0 corresponds to " "L_bol0={0}".format(photometric.Bol.si)) m_bol = MagUnit(photometric.bol) m_bol.__doc__ = ("Apparent bolometric magnitude: m_bol=0 corresponds to " "f_bol0={0}".format(photometric.bol.si))
c832903d61481ae45b9abcac932faaf5d65223dcbe835aec7064f19f21346201
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """Function Units and Quantities.""" from abc import ABCMeta, abstractmethod import numpy as np from astropy.units import (Unit, UnitBase, UnitsError, UnitTypeError, UnitConversionError, dimensionless_unscaled, Quantity) __all__ = ['FunctionUnitBase', 'FunctionQuantity'] SUPPORTED_UFUNCS = set(getattr(np.core.umath, ufunc) for ufunc in ( 'isfinite', 'isinf', 'isnan', 'sign', 'signbit', 'rint', 'floor', 'ceil', 'trunc', '_ones_like', 'ones_like', 'positive') if hasattr(np.core.umath, ufunc)) # TODO: the following could work if helper changed relative to Quantity: # - spacing should return dimensionless, not same unit # - negative should negate unit too, # - add, subtract, comparisons can work if units added/subtracted SUPPORTED_FUNCTIONS = set(getattr(np, function) for function in ('clip', 'trace', 'mean', 'min', 'max', 'round')) # subclassing UnitBase or CompositeUnit was found to be problematic, requiring # a large number of overrides. Hence, define new class. class FunctionUnitBase(metaclass=ABCMeta): """Abstract base class for function units. Function units are functions containing a physical unit, such as dB(mW). Most of the arithmetic operations on function units are defined in this base class. While instantiation is defined, this class should not be used directly. Rather, subclasses should be used that override the abstract properties `_default_function_unit` and `_quantity_class`, and the abstract methods `from_physical`, and `to_physical`. Parameters ---------- physical_unit : `~astropy.units.Unit` or `string` Unit that is encapsulated within the function unit. If not given, dimensionless. function_unit : `~astropy.units.Unit` or `string` By default, the same as the function unit set by the subclass. """ # ↓↓↓ the following four need to be set by subclasses # Make this a property so we can ensure subclasses define it. @property @abstractmethod def _default_function_unit(self): """Default function unit corresponding to the function. This property should be overridden by subclasses, with, e.g., `~astropy.unit.MagUnit` returning `~astropy.unit.mag`. """ # This has to be a property because the function quantity will not be # known at unit definition time, as it gets defined after. @property @abstractmethod def _quantity_class(self): """Function quantity class corresponding to this function unit. This property should be overridden by subclasses, with, e.g., `~astropy.unit.MagUnit` returning `~astropy.unit.Magnitude`. """ @abstractmethod def from_physical(self, x): """Transformation from value in physical to value in function units. This method should be overridden by subclasses. It is used to provide automatic transformations using an equivalency. """ @abstractmethod def to_physical(self, x): """Transformation from value in function to value in physical units. This method should be overridden by subclasses. It is used to provide automatic transformations using an equivalency. """ # ↑↑↑ the above four need to be set by subclasses # have priority over arrays, regular units, and regular quantities __array_priority__ = 30000 def __init__(self, physical_unit=None, function_unit=None): if physical_unit is None: self._physical_unit = dimensionless_unscaled else: self._physical_unit = Unit(physical_unit) if (not isinstance(self._physical_unit, UnitBase) or self._physical_unit.is_equivalent( self._default_function_unit)): raise UnitConversionError("Unit {0} is not a physical unit." .format(self._physical_unit)) if function_unit is None: self._function_unit = self._default_function_unit else: # any function unit should be equivalent to subclass default function_unit = Unit(getattr(function_unit, 'function_unit', function_unit)) if function_unit.is_equivalent(self._default_function_unit): self._function_unit = function_unit else: raise UnitConversionError( "Cannot initialize '{0}' instance with function unit '{1}'" ", as it is not equivalent to default function unit '{2}'." .format(self.__class__.__name__, function_unit, self._default_function_unit)) def _copy(self, physical_unit=None): """Copy oneself, possibly with a different physical unit.""" if physical_unit is None: physical_unit = self.physical_unit return self.__class__(physical_unit, self.function_unit) @property def physical_unit(self): return self._physical_unit @property def function_unit(self): return self._function_unit @property def equivalencies(self): """List of equivalencies between function and physical units. Uses the `from_physical` and `to_physical` methods. """ return [(self, self.physical_unit, self.to_physical, self.from_physical)] # ↓↓↓ properties/methods required to behave like a unit def decompose(self, bases=set()): """Copy the current unit with the physical unit decomposed. For details, see `~astropy.units.UnitBase.decompose`. """ return self._copy(self.physical_unit.decompose(bases)) @property def si(self): """Copy the current function unit with the physical unit in SI.""" return self._copy(self.physical_unit.si) @property def cgs(self): """Copy the current function unit with the physical unit in CGS.""" return self._copy(self.physical_unit.cgs) def _get_physical_type_id(self): """Get physical type corresponding to physical unit.""" return self.physical_unit._get_physical_type_id() @property def physical_type(self): """Return the physical type of the physical unit (e.g., 'length').""" return self.physical_unit.physical_type def is_equivalent(self, other, equivalencies=[]): """ Returns `True` if this unit is equivalent to ``other``. Parameters ---------- other : unit object or string or tuple The unit to convert to. If a tuple of units is specified, this method returns true if the unit matches any of those in the tuple. equivalencies : list of equivalence pairs, optional A list of equivalence pairs to try if the units are not directly convertible. See :ref:`unit_equivalencies`. This list is in addition to the built-in equivalencies between the function unit and the physical one, as well as possible global defaults set by, e.g., `~astropy.units.set_enabled_equivalencies`. Use `None` to turn off any global equivalencies. Returns ------- bool """ if isinstance(other, tuple): return any(self.is_equivalent(u, equivalencies=equivalencies) for u in other) other_physical_unit = getattr(other, 'physical_unit', ( dimensionless_unscaled if self.function_unit.is_equivalent(other) else other)) return self.physical_unit.is_equivalent(other_physical_unit, equivalencies) def to(self, other, value=1., equivalencies=[]): """ Return the converted values in the specified unit. Parameters ---------- other : `~astropy.units.Unit` object, `~astropy.units.function.FunctionUnitBase` object or string The unit to convert to. value : scalar int or float, or sequence convertible to array, optional Value(s) in the current unit to be converted to the specified unit. If not provided, defaults to 1.0. equivalencies : list of equivalence pairs, optional A list of equivalence pairs to try if the units are not directly convertible. See :ref:`unit_equivalencies`. This list is in meant to treat only equivalencies between different physical units; the build-in equivalency between the function unit and the physical one is automatically taken into account. Returns ------- values : scalar or array Converted value(s). Input value sequences are returned as numpy arrays. Raises ------ UnitsError If units are inconsistent. """ # conversion to one's own physical unit should be fastest if other is self.physical_unit: return self.to_physical(value) other_function_unit = getattr(other, 'function_unit', other) if self.function_unit.is_equivalent(other_function_unit): # when other is an equivalent function unit: # first convert physical units to other's physical units other_physical_unit = getattr(other, 'physical_unit', dimensionless_unscaled) if self.physical_unit != other_physical_unit: value_other_physical = self.physical_unit.to( other_physical_unit, self.to_physical(value), equivalencies) # make function unit again, in own system value = self.from_physical(value_other_physical) # convert possible difference in function unit (e.g., dex->dB) return self.function_unit.to(other_function_unit, value) else: try: # when other is not a function unit return self.physical_unit.to(other, self.to_physical(value), equivalencies) except UnitConversionError as e: if self.function_unit == Unit('mag'): # One can get to raw magnitudes via math that strips the dimensions off. # Include extra information in the exception to remind users of this. msg = "Did you perhaps subtract magnitudes so the unit got lost?" e.args += (msg,) raise e else: raise def is_unity(self): return False def __eq__(self, other): return (self.physical_unit == getattr(other, 'physical_unit', dimensionless_unscaled) and self.function_unit == getattr(other, 'function_unit', other)) def __ne__(self, other): return not self.__eq__(other) def __rlshift__(self, other): """Unit converstion operator ``<<``""" try: return self._quantity_class(other, self, copy=False, subok=True) except Exception: return NotImplemented def __mul__(self, other): if isinstance(other, (str, UnitBase, FunctionUnitBase)): if self.physical_unit == dimensionless_unscaled: # If dimensionless, drop back to normal unit and retry. return self.function_unit * other else: raise UnitsError("Cannot multiply a function unit " "with a physical dimension with any unit.") else: # Anything not like a unit, try initialising as a function quantity. try: return self._quantity_class(other, unit=self) except Exception: return NotImplemented def __rmul__(self, other): return self.__mul__(other) def __div__(self, other): if isinstance(other, (str, UnitBase, FunctionUnitBase)): if self.physical_unit == dimensionless_unscaled: # If dimensionless, drop back to normal unit and retry. return self.function_unit / other else: raise UnitsError("Cannot divide a function unit " "with a physical dimension by any unit.") else: # Anything not like a unit, try initialising as a function quantity. try: return self._quantity_class(1./other, unit=self) except Exception: return NotImplemented def __rdiv__(self, other): if isinstance(other, (str, UnitBase, FunctionUnitBase)): if self.physical_unit == dimensionless_unscaled: # If dimensionless, drop back to normal unit and retry. return other / self.function_unit else: raise UnitsError("Cannot divide a function unit " "with a physical dimension into any unit") else: # Don't know what to do with anything not like a unit. return NotImplemented __truediv__ = __div__ __rtruediv__ = __rdiv__ def __pow__(self, power): if power == 0: return dimensionless_unscaled elif power == 1: return self._copy() if self.physical_unit == dimensionless_unscaled: return self.function_unit ** power raise UnitsError("Cannot raise a function unit " "with a physical dimension to any power but 0 or 1.") def __pos__(self): return self._copy() def to_string(self, format='generic'): """ Output the unit in the given format as a string. The physical unit is appended, within parentheses, to the function unit, as in "dB(mW)", with both units set using the given format Parameters ---------- format : `astropy.units.format.Base` instance or str The name of a format or a formatter object. If not provided, defaults to the generic format. """ if format not in ('generic', 'unscaled', 'latex'): raise ValueError("Function units cannot be written in {0} format. " "Only 'generic', 'unscaled' and 'latex' are " "supported.".format(format)) self_str = self.function_unit.to_string(format) pu_str = self.physical_unit.to_string(format) if pu_str == '': pu_str = '1' if format == 'latex': self_str += r'$\mathrm{{\left( {0} \right)}}$'.format( pu_str[1:-1]) # need to strip leading and trailing "$" else: self_str += '({0})'.format(pu_str) return self_str def __str__(self): """Return string representation for unit.""" self_str = str(self.function_unit) pu_str = str(self.physical_unit) if pu_str: self_str += '({0})'.format(pu_str) return self_str def __repr__(self): # By default, try to give a representation using `Unit(<string>)`, # with string such that parsing it would give the correct FunctionUnit. if callable(self.function_unit): return 'Unit("{0}")'.format(self.to_string()) else: return '{0}("{1}"{2})'.format( self.__class__.__name__, self.physical_unit, "" if self.function_unit is self._default_function_unit else ', unit="{0}"'.format(self.function_unit)) def _repr_latex_(self): """ Generate latex representation of unit name. This is used by the IPython notebook to print a unit with a nice layout. Returns ------- Latex string """ return self.to_string('latex') def __hash__(self): return hash((self.function_unit, self.physical_unit)) class FunctionQuantity(Quantity): """A representation of a (scaled) function of a number with a unit. Function quantities are quantities whose units are functions containing a physical unit, such as dB(mW). Most of the arithmetic operations on function quantities are defined in this base class. While instantiation is also defined here, this class should not be instantiated directly. Rather, subclasses should be made which have ``_unit_class`` pointing back to the corresponding function unit class. Parameters ---------- value : number, sequence of convertible items, `~astropy.units.Quantity`, or `~astropy.units.function.FunctionQuantity` The numerical value of the function quantity. If a number or a `~astropy.units.Quantity` with a function unit, it will be converted to ``unit`` and the physical unit will be inferred from ``unit``. If a `~astropy.units.Quantity` with just a physical unit, it will converted to the function unit, after, if necessary, converting it to the physical unit inferred from ``unit``. unit : string, `~astropy.units.UnitBase` or `~astropy.units.function.FunctionUnitBase` instance, optional For an `~astropy.units.function.FunctionUnitBase` instance, the physical unit will be taken from it; for other input, it will be inferred from ``value``. By default, ``unit`` is set by the subclass. dtype : `~numpy.dtype`, optional The dtype of the resulting Numpy array or scalar that will hold the value. If not provided, it is determined from the input, except that any input that cannot represent float (integer and bool) is converted to float. copy : bool, optional If `True` (default), then the value is copied. Otherwise, a copy will only be made if ``__array__`` returns a copy, if value is a nested sequence, or if a copy is needed to satisfy an explicitly given ``dtype``. (The `False` option is intended mostly for internal use, to speed up initialization where a copy is known to have been made. Use with care.) order : {'C', 'F', 'A'}, optional Specify the order of the array. As in `~numpy.array`. Ignored if the input does not need to be converted and ``copy=False``. subok : bool, optional If `False` (default), the returned array will be forced to be of the class used. Otherwise, subclasses will be passed through. ndmin : int, optional Specifies the minimum number of dimensions that the resulting array should have. Ones will be pre-pended to the shape as needed to meet this requirement. This parameter is ignored if the input is a `~astropy.units.Quantity` and ``copy=False``. Raises ------ TypeError If the value provided is not a Python numeric type. TypeError If the unit provided is not a `~astropy.units.function.FunctionUnitBase` or `~astropy.units.Unit` object, or a parseable string unit. """ _unit_class = None """Default `~astropy.units.function.FunctionUnitBase` subclass. This should be overridden by subclasses. """ # Ensure priority over ndarray, regular Unit & Quantity, and FunctionUnit. __array_priority__ = 40000 # Define functions that work on FunctionQuantity. _supported_ufuncs = SUPPORTED_UFUNCS _supported_functions = SUPPORTED_FUNCTIONS def __new__(cls, value, unit=None, dtype=None, copy=True, order=None, subok=False, ndmin=0): if unit is not None: # Convert possible string input to a (function) unit. unit = Unit(unit) if not isinstance(unit, FunctionUnitBase): # By default, use value's physical unit. value_unit = getattr(value, 'unit', None) if value_unit is None: # if iterable, see if first item has a unit # (mixed lists fail in super call below). try: value_unit = getattr(value[0], 'unit') except Exception: pass physical_unit = getattr(value_unit, 'physical_unit', value_unit) unit = cls._unit_class(physical_unit, function_unit=unit) # initialise! return super().__new__(cls, value, unit, dtype=dtype, copy=copy, order=order, subok=subok, ndmin=ndmin) # ↓↓↓ properties not found in Quantity @property def physical(self): """The physical quantity corresponding the function one.""" return self.to(self.unit.physical_unit) @property def _function_view(self): """View as Quantity with function unit, dropping the physical unit. Use `~astropy.units.quantity.Quantity.value` for just the value. """ return self._new_view(unit=self.unit.function_unit) # ↓↓↓ methods overridden to change the behavior @property def si(self): """Return a copy with the physical unit in SI units.""" return self.__class__(self.physical.si) @property def cgs(self): """Return a copy with the physical unit in CGS units.""" return self.__class__(self.physical.cgs) def decompose(self, bases=[]): """Generate a new `FunctionQuantity` with the physical unit decomposed. For details, see `~astropy.units.Quantity.decompose`. """ return self.__class__(self.physical.decompose(bases)) # ↓↓↓ methods overridden to add additional behavior def __quantity_subclass__(self, unit): if isinstance(unit, FunctionUnitBase): return self.__class__, True else: return super().__quantity_subclass__(unit)[0], False def _set_unit(self, unit): if not isinstance(unit, self._unit_class): # Have to take care of, e.g., (10*u.mag).view(u.Magnitude) try: # "or 'nonsense'" ensures `None` breaks, just in case. unit = self._unit_class(function_unit=unit or 'nonsense') except Exception: raise UnitTypeError( "{0} instances require {1} function units" .format(type(self).__name__, self._unit_class.__name__) + ", so cannot set it to '{0}'.".format(unit)) self._unit = unit def __array_ufunc__(self, function, method, *inputs, **kwargs): # TODO: it would be more logical to have this in Quantity already, # instead of in UFUNC_HELPERS, where it cannot be overridden. # And really it should just return NotImplemented, since possibly # another argument might know what to do. if function not in self._supported_ufuncs: raise UnitTypeError( "Cannot use ufunc '{0}' with function quantities" .format(function.__name__)) return super().__array_ufunc__(function, method, *inputs, **kwargs) # ↓↓↓ methods overridden to change behavior def __mul__(self, other): if self.unit.physical_unit == dimensionless_unscaled: return self._function_view * other raise UnitTypeError("Cannot multiply function quantities which " "are not dimensionless with anything.") def __truediv__(self, other): if self.unit.physical_unit == dimensionless_unscaled: return self._function_view / other raise UnitTypeError("Cannot divide function quantities which " "are not dimensionless by anything.") def __rtruediv__(self, other): if self.unit.physical_unit == dimensionless_unscaled: return self._function_view.__rdiv__(other) raise UnitTypeError("Cannot divide function quantities which " "are not dimensionless into anything.") def _comparison(self, other, comparison_func): """Do a comparison between self and other, raising UnitsError when other cannot be converted to self because it has different physical unit, and returning NotImplemented when there are other errors.""" try: # will raise a UnitsError if physical units not equivalent other_in_own_unit = self._to_own_unit(other, check_precision=False) except UnitsError as exc: if self.unit.physical_unit != dimensionless_unscaled: raise exc try: other_in_own_unit = self._function_view._to_own_unit( other, check_precision=False) except Exception: raise exc except Exception: return NotImplemented return comparison_func(other_in_own_unit) def __eq__(self, other): try: return self._comparison(other, self.value.__eq__) except UnitsError: return False def __ne__(self, other): try: return self._comparison(other, self.value.__ne__) except UnitsError: return True def __gt__(self, other): return self._comparison(other, self.value.__gt__) def __ge__(self, other): return self._comparison(other, self.value.__ge__) def __lt__(self, other): return self._comparison(other, self.value.__lt__) def __le__(self, other): return self._comparison(other, self.value.__le__) def __lshift__(self, other): """Unit converstion operator `<<`""" try: other = Unit(other, parse_strict='silent') except UnitTypeError: return NotImplemented return self.__class__(self, other, copy=False, subok=True) # Ensure Quantity methods are used only if they make sense. def _wrap_function(self, function, *args, **kwargs): if function in self._supported_functions: return super()._wrap_function(function, *args, **kwargs) # For dimensionless, we can convert to regular quantities. if all(arg.unit.physical_unit == dimensionless_unscaled for arg in (self,) + args if (hasattr(arg, 'unit') and hasattr(arg.unit, 'physical_unit'))): args = tuple(getattr(arg, '_function_view', arg) for arg in args) return self._function_view._wrap_function(function, *args, **kwargs) raise TypeError("Cannot use method that uses function '{0}' with " "function quantities that are not dimensionless." .format(function.__name__)) # Override functions that are supported but do not use _wrap_function # in Quantity. def max(self, axis=None, out=None, keepdims=False): return self._wrap_function(np.max, axis, out=out, keepdims=keepdims) def min(self, axis=None, out=None, keepdims=False): return self._wrap_function(np.min, axis, out=out, keepdims=keepdims) def sum(self, axis=None, dtype=None, out=None, keepdims=False): return self._wrap_function(np.sum, axis, dtype, out=out, keepdims=keepdims) def cumsum(self, axis=None, dtype=None, out=None): return self._wrap_function(np.cumsum, axis, dtype, out=out)
01063464a7202fe5ed95b84fa435596940388cb98b0bcda9e15bc86173900b3e
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module has been deprecated and moved to astropy.units.photometric. The names remain here for backwards compatibility. """ from warnings import warn from astropy.units.photometric import AB, ST from astropy.utils import deprecated _ns = globals() @deprecated(since='3.1', alternative='astropy.units.photometric', message='The magnitude_zero_points module has been deprecated, and' ' moved to astropy.units.photometric and are enabled by ' 'default. magnitude_zero_points is retained as aliases to ' 'the new units.') def enable(): """ Enable magnitude zero point units so they appear in results of `~astropy.units.UnitBase.find_equivalent_units` and `~astropy.units.UnitBase.compose`. This may be used with the ``with`` statement to enable these units only temporarily. """ # While it may seem like the below can be removed, in fact it needs to # remain as long as this function is around so that enable acts as a context # manager # Local import to avoid cyclical import from astropy.units.core import add_enabled_units # Local import to avoid polluting namespace import inspect return add_enabled_units(inspect.getmodule(enable))
0f863d73de37e1cff52822fa4bc83301cb7cf45429862f74d83f4b008394e71e
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst import warnings import numpy as np from astropy import units as u from astropy.utils.decorators import format_doc from astropy.utils.exceptions import AstropyDeprecationWarning from astropy.coordinates.angles import Angle from astropy.coordinates.matrix_utilities import rotation_matrix, matrix_product, matrix_transpose from astropy.coordinates import representation as r from astropy.coordinates.baseframe import (BaseCoordinateFrame, frame_transform_graph, RepresentationMapping, base_doc) from astropy.coordinates.attributes import (Attribute, CoordinateAttribute, QuantityAttribute, DifferentialAttribute) from astropy.coordinates.transformations import AffineTransform from astropy.coordinates.errors import ConvertError from .icrs import ICRS __all__ = ['Galactocentric'] # Measured by minimizing the difference between a plane of coordinates along # l=0, b=[-90,90] and the Galactocentric x-z plane # This is not used directly, but accessed via `get_roll0`. We define it here to # prevent having to create new Angle objects every time `get_roll0` is called. _ROLL0 = Angle(58.5986320306*u.degree) doc_components = """ x : `~astropy.units.Quantity`, optional Cartesian, Galactocentric :math:`x` position component. y : `~astropy.units.Quantity`, optional Cartesian, Galactocentric :math:`y` position component. z : `~astropy.units.Quantity`, optional Cartesian, Galactocentric :math:`z` position component. v_x : `~astropy.units.Quantity`, optional Cartesian, Galactocentric :math:`v_x` velocity component. v_y : `~astropy.units.Quantity`, optional Cartesian, Galactocentric :math:`v_y` velocity component. v_z : `~astropy.units.Quantity`, optional Cartesian, Galactocentric :math:`v_z` velocity component. """ doc_footer = """ Other parameters ---------------- galcen_coord : `ICRS`, optional, must be keyword The ICRS coordinates of the Galactic center. galcen_distance : `~astropy.units.Quantity`, optional, must be keyword The distance from the sun to the Galactic center. galcen_v_sun : `~astropy.coordinates.representation.CartesianDifferential`, optional, must be keyword The velocity of the sun *in the Galactocentric frame* as Cartesian velocity components. z_sun : `~astropy.units.Quantity`, optional, must be keyword The distance from the sun to the Galactic midplane. roll : `Angle`, optional, must be keyword The angle to rotate about the final x-axis, relative to the orientation for Galactic. For example, if this roll angle is 0, the final x-z plane will align with the Galactic coordinates x-z plane. Unless you really know what this means, you probably should not change this! Examples -------- To transform to the Galactocentric frame with the default frame attributes, pass the uninstantiated class name to the ``transform_to()`` method of a coordinate frame or `~astropy.coordinates.SkyCoord` object:: >>> import astropy.units as u >>> import astropy.coordinates as coord >>> c = coord.ICRS(ra=[158.3122, 24.5] * u.degree, ... dec=[-17.3, 81.52] * u.degree, ... distance=[11.5, 24.12] * u.kpc) >>> c.transform_to(coord.Galactocentric) # doctest: +FLOAT_CMP <Galactocentric Coordinate (galcen_coord=<ICRS Coordinate: (ra, dec) in deg ( 266.4051, -28.936175)>, galcen_distance=8.3 kpc, galcen_v_sun=( 11.1, 232.24, 7.25) km / s, z_sun=27.0 pc, roll=0.0 deg): (x, y, z) in kpc [( -9.6083819 , -9.40062188, 6.52056066), (-21.28302307, 18.76334013, 7.84693855)]> To specify a custom set of parameters, you have to include extra keyword arguments when initializing the Galactocentric frame object:: >>> c.transform_to(coord.Galactocentric(galcen_distance=8.1*u.kpc)) # doctest: +FLOAT_CMP <Galactocentric Coordinate (galcen_coord=<ICRS Coordinate: (ra, dec) in deg ( 266.4051, -28.936175)>, galcen_distance=8.1 kpc, galcen_v_sun=( 11.1, 232.24, 7.25) km / s, z_sun=27.0 pc, roll=0.0 deg): (x, y, z) in kpc [( -9.40785924, -9.40062188, 6.52066574), (-21.08239383, 18.76334013, 7.84798135)]> Similarly, transforming from the Galactocentric frame to another coordinate frame:: >>> c = coord.Galactocentric(x=[-8.3, 4.5] * u.kpc, ... y=[0., 81.52] * u.kpc, ... z=[0.027, 24.12] * u.kpc) >>> c.transform_to(coord.ICRS) # doctest: +FLOAT_CMP <ICRS Coordinate: (ra, dec, distance) in (deg, deg, kpc) [( 86.22349059, 28.83894138, 4.39157788e-05), ( 289.66802652, 49.88763881, 8.59640735e+01)]> Or, with custom specification of the Galactic center:: >>> c = coord.Galactocentric(x=[-8.0, 4.5] * u.kpc, ... y=[0., 81.52] * u.kpc, ... z=[21.0, 24120.0] * u.pc, ... z_sun=21 * u.pc, galcen_distance=8. * u.kpc) >>> c.transform_to(coord.ICRS) # doctest: +FLOAT_CMP <ICRS Coordinate: (ra, dec, distance) in (deg, deg, kpc) [( 86.2585249 , 28.85773187, 2.75625475e-05), ( 289.77285255, 50.06290457, 8.59216010e+01)]> """ @format_doc(base_doc, components=doc_components, footer=doc_footer) class Galactocentric(BaseCoordinateFrame): r""" A coordinate or frame in the Galactocentric system. This frame requires specifying the Sun-Galactic center distance, and optionally the height of the Sun above the Galactic midplane. The position of the Sun is assumed to be on the x axis of the final, right-handed system. That is, the x axis points from the position of the Sun projected to the Galactic midplane to the Galactic center -- roughly towards :math:`(l,b) = (0^\circ,0^\circ)`. For the default transformation (:math:`{\rm roll}=0^\circ`), the y axis points roughly towards Galactic longitude :math:`l=90^\circ`, and the z axis points roughly towards the North Galactic Pole (:math:`b=90^\circ`). The default position of the Galactic Center in ICRS coordinates is taken from Reid et al. 2004, http://adsabs.harvard.edu/abs/2004ApJ...616..872R. .. math:: {\rm RA} = 17:45:37.224~{\rm hr}\\ {\rm Dec} = -28:56:10.23~{\rm deg} The default distance to the Galactic Center is 8.3 kpc, e.g., Gillessen et al. (2009), https://ui.adsabs.harvard.edu/#abs/2009ApJ...692.1075G/abstract The default height of the Sun above the Galactic midplane is taken to be 27 pc, as measured by Chen et al. (2001), https://ui.adsabs.harvard.edu/#abs/2001ApJ...553..184C/abstract The default solar motion relative to the Galactic center is taken from a combination of Schönrich et al. (2010) [for the peculiar velocity] and Bovy (2015) [for the circular velocity at the solar radius], https://ui.adsabs.harvard.edu/#abs/2010MNRAS.403.1829S/abstract https://ui.adsabs.harvard.edu/#abs/2015ApJS..216...29B/abstract For a more detailed look at the math behind this transformation, see the document :ref:`coordinates-galactocentric`. The frame attributes are listed under **Other Parameters**. """ default_representation = r.CartesianRepresentation default_differential = r.CartesianDifferential # frame attributes galcen_coord = CoordinateAttribute(default=ICRS(ra=266.4051*u.degree, dec=-28.936175*u.degree), frame=ICRS) galcen_distance = QuantityAttribute(default=8.3*u.kpc) galcen_v_sun = DifferentialAttribute( default=r.CartesianDifferential([11.1, 220+12.24, 7.25] * u.km/u.s), allowed_classes=[r.CartesianDifferential]) z_sun = QuantityAttribute(default=27.*u.pc) roll = QuantityAttribute(default=0.*u.deg) def __init__(self, *args, **kwargs): # backwards-compatibility if ('galcen_ra' in kwargs or 'galcen_dec' in kwargs): warnings.warn("The arguments 'galcen_ra', and 'galcen_dec' are " "deprecated in favor of specifying the sky coordinate" " as a CoordinateAttribute using the 'galcen_coord' " "argument", AstropyDeprecationWarning) galcen_kw = dict() galcen_kw['ra'] = kwargs.pop('galcen_ra', self.galcen_coord.ra) galcen_kw['dec'] = kwargs.pop('galcen_dec', self.galcen_coord.dec) kwargs['galcen_coord'] = ICRS(**galcen_kw) super().__init__(*args, **kwargs) @property def galcen_ra(self): warnings.warn("The attribute 'galcen_ra' is deprecated. Use " "'.galcen_coord.ra' instead.", AstropyDeprecationWarning) return self.galcen_coord.ra @property def galcen_dec(self): warnings.warn("The attribute 'galcen_dec' is deprecated. Use " "'.galcen_coord.dec' instead.", AstropyDeprecationWarning) return self.galcen_coord.dec @classmethod def get_roll0(cls): """ The additional roll angle (about the final x axis) necessary to align the final z axis to match the Galactic yz-plane. Setting the ``roll`` frame attribute to -this method's return value removes this rotation, allowing the use of the `Galactocentric` frame in more general contexts. """ # note that the actual value is defined at the module level. We make at # a property here because this module isn't actually part of the public # API, so it's better for it to be accessable from Galactocentric return _ROLL0 # ICRS to/from Galactocentric -----------------------> def get_matrix_vectors(galactocentric_frame, inverse=False): """ Use the ``inverse`` argument to get the inverse transformation, matrix and offsets to go from Galactocentric to ICRS. """ # shorthand gcf = galactocentric_frame # rotation matrix to align x(ICRS) with the vector to the Galactic center mat1 = rotation_matrix(-gcf.galcen_coord.dec, 'y') mat2 = rotation_matrix(gcf.galcen_coord.ra, 'z') # extra roll away from the Galactic x-z plane mat0 = rotation_matrix(gcf.get_roll0() - gcf.roll, 'x') # construct transformation matrix and use it R = matrix_product(mat0, mat1, mat2) # Now need to translate by Sun-Galactic center distance around x' and # rotate about y' to account for tilt due to Sun's height above the plane translation = r.CartesianRepresentation(gcf.galcen_distance * [1., 0., 0.]) z_d = gcf.z_sun / gcf.galcen_distance H = rotation_matrix(-np.arcsin(z_d), 'y') # compute total matrices A = matrix_product(H, R) # Now we re-align the translation vector to account for the Sun's height # above the midplane offset = -translation.transform(H) if inverse: # the inverse of a rotation matrix is a transpose, which is much faster # and more stable to compute A = matrix_transpose(A) offset = (-offset).transform(A) offset_v = r.CartesianDifferential.from_cartesian( (-gcf.galcen_v_sun).to_cartesian().transform(A)) offset = offset.with_differentials(offset_v) else: offset = offset.with_differentials(gcf.galcen_v_sun) return A, offset def _check_coord_repr_diff_types(c): if isinstance(c.data, r.UnitSphericalRepresentation): raise ConvertError("Transforming to/from a Galactocentric frame " "requires a 3D coordinate, e.g. (angle, angle, " "distance) or (x, y, z).") if ('s' in c.data.differentials and isinstance(c.data.differentials['s'], (r.UnitSphericalDifferential, r.UnitSphericalCosLatDifferential, r.RadialDifferential))): raise ConvertError("Transforming to/from a Galactocentric frame " "requires a 3D velocity, e.g., proper motion " "components and radial velocity.") @frame_transform_graph.transform(AffineTransform, ICRS, Galactocentric) def icrs_to_galactocentric(icrs_coord, galactocentric_frame): _check_coord_repr_diff_types(icrs_coord) return get_matrix_vectors(galactocentric_frame) @frame_transform_graph.transform(AffineTransform, Galactocentric, ICRS) def galactocentric_to_icrs(galactocentric_coord, icrs_frame): _check_coord_repr_diff_types(galactocentric_coord) return get_matrix_vectors(galactocentric_coord, inverse=True)
c7b72be6ab0279bf6186ccd374031edcc5a581f2c99da041bc7239b0434f64aa
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst from astropy.coordinates.matrix_utilities import (rotation_matrix, matrix_product, matrix_transpose) from astropy.coordinates.baseframe import frame_transform_graph from astropy.coordinates.transformations import StaticMatrixTransform from .galactic import Galactic from .supergalactic import Supergalactic @frame_transform_graph.transform(StaticMatrixTransform, Galactic, Supergalactic) def gal_to_supergal(): mat1 = rotation_matrix(90, 'z') mat2 = rotation_matrix(90 - Supergalactic._nsgp_gal.b.degree, 'y') mat3 = rotation_matrix(Supergalactic._nsgp_gal.l.degree, 'z') return matrix_product(mat1, mat2, mat3) @frame_transform_graph.transform(StaticMatrixTransform, Supergalactic, Galactic) def supergal_to_gal(): return matrix_transpose(gal_to_supergal())
530ee94fda68be1ca7227469504b524bf9e80077c2b4095a24a7f6a4b2cb7c9b
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst from astropy.utils.decorators import format_doc from astropy.coordinates.baseframe import frame_transform_graph, base_doc from astropy.coordinates.attributes import TimeAttribute from astropy.coordinates.transformations import DynamicMatrixTransform from astropy.coordinates import earth_orientation as earth from .baseradec import BaseRADecFrame, doc_components from .utils import EQUINOX_J2000 __all__ = ['FK5'] doc_footer = """ Other parameters ---------------- equinox : `~astropy.time.Time` The equinox of this frame. """ @format_doc(base_doc, components=doc_components, footer=doc_footer) class FK5(BaseRADecFrame): """ A coordinate or frame in the FK5 system. Note that this is a barycentric version of FK5 - that is, the origin for this frame is the Solar System Barycenter, *not* the Earth geocenter. The frame attributes are listed under **Other Parameters**. """ equinox = TimeAttribute(default=EQUINOX_J2000) @staticmethod def _precession_matrix(oldequinox, newequinox): """ Compute and return the precession matrix for FK5 based on Capitaine et al. 2003/IAU2006. Used inside some of the transformation functions. Parameters ---------- oldequinox : `~astropy.time.Time` The equinox to precess from. newequinox : `~astropy.time.Time` The equinox to precess to. Returns ------- newcoord : array The precession matrix to transform to the new equinox """ return earth.precession_matrix_Capitaine(oldequinox, newequinox) # This is the "self-transform". Defined at module level because the decorator # needs a reference to the FK5 class @frame_transform_graph.transform(DynamicMatrixTransform, FK5, FK5) def fk5_to_fk5(fk5coord1, fk5frame2): return fk5coord1._precession_matrix(fk5coord1.equinox, fk5frame2.equinox)
65c3490f78eabee2bbef3f8cebc41412aa96118b38690e99872a8867706cfc3c
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """ Contains the transformation functions for getting to/from ITRS, GCRS, and CIRS. These are distinct from the ICRS and AltAz functions because they are just rotations without aberration corrections or offsets. """ import numpy as np from astropy.coordinates.baseframe import frame_transform_graph from astropy.coordinates.transformations import FunctionTransformWithFiniteDifference from astropy.coordinates.matrix_utilities import matrix_transpose from astropy import _erfa as erfa from .gcrs import GCRS, PrecessedGeocentric from .cirs import CIRS from .itrs import ITRS from .utils import get_polar_motion, get_jd12 # # first define helper functions def gcrs_to_cirs_mat(time): # celestial-to-intermediate matrix return erfa.c2i06a(*get_jd12(time, 'tt')) def cirs_to_itrs_mat(time): # compute the polar motion p-matrix xp, yp = get_polar_motion(time) sp = erfa.sp00(*get_jd12(time, 'tt')) pmmat = erfa.pom00(xp, yp, sp) # now determine the Earth Rotation Angle for the input obstime # era00 accepts UT1, so we convert if need be era = erfa.era00(*get_jd12(time, 'ut1')) # c2tcio expects a GCRS->CIRS matrix, but we just set that to an I-matrix # because we're already in CIRS return erfa.c2tcio(np.eye(3), era, pmmat) def gcrs_precession_mat(equinox): gamb, phib, psib, epsa = erfa.pfw06(*get_jd12(equinox, 'tt')) return erfa.fw2m(gamb, phib, psib, epsa) # now the actual transforms @frame_transform_graph.transform(FunctionTransformWithFiniteDifference, GCRS, CIRS) def gcrs_to_cirs(gcrs_coo, cirs_frame): # first get us to a 0 pos/vel GCRS at the target obstime gcrs_coo2 = gcrs_coo.transform_to(GCRS(obstime=cirs_frame.obstime)) # now get the pmatrix pmat = gcrs_to_cirs_mat(cirs_frame.obstime) crepr = gcrs_coo2.cartesian.transform(pmat) return cirs_frame.realize_frame(crepr) @frame_transform_graph.transform(FunctionTransformWithFiniteDifference, CIRS, GCRS) def cirs_to_gcrs(cirs_coo, gcrs_frame): # compute the pmatrix, and then multiply by its transpose pmat = gcrs_to_cirs_mat(cirs_coo.obstime) newrepr = cirs_coo.cartesian.transform(matrix_transpose(pmat)) gcrs = GCRS(newrepr, obstime=cirs_coo.obstime) # now do any needed offsets (no-op if same obstime and 0 pos/vel) return gcrs.transform_to(gcrs_frame) @frame_transform_graph.transform(FunctionTransformWithFiniteDifference, CIRS, ITRS) def cirs_to_itrs(cirs_coo, itrs_frame): # first get us to CIRS at the target obstime cirs_coo2 = cirs_coo.transform_to(CIRS(obstime=itrs_frame.obstime)) # now get the pmatrix pmat = cirs_to_itrs_mat(itrs_frame.obstime) crepr = cirs_coo2.cartesian.transform(pmat) return itrs_frame.realize_frame(crepr) @frame_transform_graph.transform(FunctionTransformWithFiniteDifference, ITRS, CIRS) def itrs_to_cirs(itrs_coo, cirs_frame): # compute the pmatrix, and then multiply by its transpose pmat = cirs_to_itrs_mat(itrs_coo.obstime) newrepr = itrs_coo.cartesian.transform(matrix_transpose(pmat)) cirs = CIRS(newrepr, obstime=itrs_coo.obstime) # now do any needed offsets (no-op if same obstime) return cirs.transform_to(cirs_frame) @frame_transform_graph.transform(FunctionTransformWithFiniteDifference, ITRS, ITRS) def itrs_to_itrs(from_coo, to_frame): # this self-transform goes through CIRS right now, which implicitly also # goes back to ICRS return from_coo.transform_to(CIRS).transform_to(to_frame) # TODO: implement GCRS<->CIRS if there's call for it. The thing that's awkward # is that they both have obstimes, so an extra set of transformations are necessary. # so unless there's a specific need for that, better to just have it go through the above # two steps anyway @frame_transform_graph.transform(FunctionTransformWithFiniteDifference, GCRS, PrecessedGeocentric) def gcrs_to_precessedgeo(from_coo, to_frame): # first get us to GCRS with the right attributes (might be a no-op) gcrs_coo = from_coo.transform_to(GCRS(obstime=to_frame.obstime, obsgeoloc=to_frame.obsgeoloc, obsgeovel=to_frame.obsgeovel)) # now precess to the requested equinox pmat = gcrs_precession_mat(to_frame.equinox) crepr = gcrs_coo.cartesian.transform(pmat) return to_frame.realize_frame(crepr) @frame_transform_graph.transform(FunctionTransformWithFiniteDifference, PrecessedGeocentric, GCRS) def precessedgeo_to_gcrs(from_coo, to_frame): # first un-precess pmat = gcrs_precession_mat(from_coo.equinox) crepr = from_coo.cartesian.transform(matrix_transpose(pmat)) gcrs_coo = GCRS(crepr, obstime=to_frame.obstime, obsgeoloc=to_frame.obsgeoloc, obsgeovel=to_frame.obsgeovel) # then move to the GCRS that's actually desired return gcrs_coo.transform_to(to_frame)
849b688ccf16f57c1114bcb4afa42bc0df3f4dcbdfe0af41f313c7a607358ffd
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst from astropy.utils.decorators import format_doc from astropy.coordinates.attributes import TimeAttribute from .utils import DEFAULT_OBSTIME from astropy.coordinates.baseframe import base_doc from .baseradec import BaseRADecFrame, doc_components __all__ = ['HCRS'] doc_footer = """ Other parameters ---------------- obstime : `~astropy.time.Time` The time at which the observation is taken. Used for determining the position of the Sun. """ @format_doc(base_doc, components=doc_components, footer=doc_footer) class HCRS(BaseRADecFrame): """ A coordinate or frame in a Heliocentric system, with axes aligned to ICRS. The ICRS has an origin at the Barycenter and axes which are fixed with respect to space. This coordinate system is distinct from ICRS mainly in that it is relative to the Sun's center-of-mass rather than the solar system Barycenter. In principle, therefore, this frame should include the effects of aberration (unlike ICRS), but this is not done, since they are very small, of the order of 8 milli-arcseconds. For more background on the ICRS and related coordinate transformations, see the references provided in the :ref:`astropy-coordinates-seealso` section of the documentation. The frame attributes are listed under **Other Parameters**. """ obstime = TimeAttribute(default=DEFAULT_OBSTIME) # Transformations are defined in icrs_circ_transforms.py
f570e62cd35dc3517734a380d9ac52eb4dc1ba08cbc2d38170618f7d6fd2dfad
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst import numpy as np from astropy import units as u from astropy.utils.decorators import format_doc from astropy.coordinates.baseframe import frame_transform_graph, base_doc from astropy.coordinates.attributes import TimeAttribute from astropy.coordinates.transformations import (FunctionTransformWithFiniteDifference, FunctionTransform, DynamicMatrixTransform) from astropy.coordinates.representation import (CartesianRepresentation, UnitSphericalRepresentation) from astropy.coordinates import earth_orientation as earth from .utils import EQUINOX_B1950 from .baseradec import doc_components, BaseRADecFrame __all__ = ['FK4', 'FK4NoETerms'] doc_footer_fk4 = """ Other parameters ---------------- equinox : `~astropy.time.Time` The equinox of this frame. obstime : `~astropy.time.Time` The time this frame was observed. If ``None``, will be the same as ``equinox``. """ @format_doc(base_doc, components=doc_components, footer=doc_footer_fk4) class FK4(BaseRADecFrame): """ A coordinate or frame in the FK4 system. Note that this is a barycentric version of FK4 - that is, the origin for this frame is the Solar System Barycenter, *not* the Earth geocenter. The frame attributes are listed under **Other Parameters**. """ equinox = TimeAttribute(default=EQUINOX_B1950) obstime = TimeAttribute(default=None, secondary_attribute='equinox') # the "self" transform @frame_transform_graph.transform(FunctionTransformWithFiniteDifference, FK4, FK4) def fk4_to_fk4(fk4coord1, fk4frame2): # deceptively complicated: need to transform to No E-terms FK4, precess, and # then come back, because precession is non-trivial with E-terms fnoe_w_eqx1 = fk4coord1.transform_to(FK4NoETerms(equinox=fk4coord1.equinox)) fnoe_w_eqx2 = fnoe_w_eqx1.transform_to(FK4NoETerms(equinox=fk4frame2.equinox)) return fnoe_w_eqx2.transform_to(fk4frame2) @format_doc(base_doc, components=doc_components, footer=doc_footer_fk4) class FK4NoETerms(BaseRADecFrame): """ A coordinate or frame in the FK4 system, but with the E-terms of aberration removed. The frame attributes are listed under **Other Parameters**. """ equinox = TimeAttribute(default=EQUINOX_B1950) obstime = TimeAttribute(default=None, secondary_attribute='equinox') @staticmethod def _precession_matrix(oldequinox, newequinox): """ Compute and return the precession matrix for FK4 using Newcomb's method. Used inside some of the transformation functions. Parameters ---------- oldequinox : `~astropy.time.Time` The equinox to precess from. newequinox : `~astropy.time.Time` The equinox to precess to. Returns ------- newcoord : array The precession matrix to transform to the new equinox """ return earth._precession_matrix_besselian(oldequinox.byear, newequinox.byear) # the "self" transform @frame_transform_graph.transform(DynamicMatrixTransform, FK4NoETerms, FK4NoETerms) def fk4noe_to_fk4noe(fk4necoord1, fk4neframe2): return fk4necoord1._precession_matrix(fk4necoord1.equinox, fk4neframe2.equinox) # FK4-NO-E to/from FK4 -----------------------------> # Unlike other frames, this module include *two* frame classes for FK4 # coordinates - one including the E-terms of aberration (FK4), and # one not including them (FK4NoETerms). The following functions # implement the transformation between these two. def fk4_e_terms(equinox): """ Return the e-terms of aberation vector Parameters ---------- equinox : Time object The equinox for which to compute the e-terms """ # Constant of aberration at J2000; from Explanatory Supplement to the # Astronomical Almanac (Seidelmann, 2005). k = 0.0056932 # in degrees (v_earth/c ~ 1e-4 rad ~ 0.0057 deg) k = np.radians(k) # Eccentricity of the Earth's orbit e = earth.eccentricity(equinox.jd) # Mean longitude of perigee of the solar orbit g = earth.mean_lon_of_perigee(equinox.jd) g = np.radians(g) # Obliquity of the ecliptic o = earth.obliquity(equinox.jd, algorithm=1980) o = np.radians(o) return e * k * np.sin(g), \ -e * k * np.cos(g) * np.cos(o), \ -e * k * np.cos(g) * np.sin(o) @frame_transform_graph.transform(FunctionTransformWithFiniteDifference, FK4, FK4NoETerms) def fk4_to_fk4_no_e(fk4coord, fk4noeframe): # Extract cartesian vector rep = fk4coord.cartesian # Find distance (for re-normalization) d_orig = rep.norm() rep /= d_orig # Apply E-terms of aberration. Note that this depends on the equinox (not # the observing time/epoch) of the coordinates. See issue #1496 for a # discussion of this. eterms_a = CartesianRepresentation( u.Quantity(fk4_e_terms(fk4coord.equinox), u.dimensionless_unscaled, copy=False), copy=False) rep = rep - eterms_a + eterms_a.dot(rep) * rep # Find new distance (for re-normalization) d_new = rep.norm() # Renormalize rep *= d_orig / d_new # now re-cast into an appropriate Representation, and precess if need be if isinstance(fk4coord.data, UnitSphericalRepresentation): rep = rep.represent_as(UnitSphericalRepresentation) # if no obstime was given in the new frame, use the old one for consistency newobstime = fk4coord._obstime if fk4noeframe._obstime is None else fk4noeframe._obstime fk4noe = FK4NoETerms(rep, equinox=fk4coord.equinox, obstime=newobstime) if fk4coord.equinox != fk4noeframe.equinox: # precession fk4noe = fk4noe.transform_to(fk4noeframe) return fk4noe @frame_transform_graph.transform(FunctionTransformWithFiniteDifference, FK4NoETerms, FK4) def fk4_no_e_to_fk4(fk4noecoord, fk4frame): # first precess, if necessary if fk4noecoord.equinox != fk4frame.equinox: fk4noe_w_fk4equinox = FK4NoETerms(equinox=fk4frame.equinox, obstime=fk4noecoord.obstime) fk4noecoord = fk4noecoord.transform_to(fk4noe_w_fk4equinox) # Extract cartesian vector rep = fk4noecoord.cartesian # Find distance (for re-normalization) d_orig = rep.norm() rep /= d_orig # Apply E-terms of aberration. Note that this depends on the equinox (not # the observing time/epoch) of the coordinates. See issue #1496 for a # discussion of this. eterms_a = CartesianRepresentation( u.Quantity(fk4_e_terms(fk4noecoord.equinox), u.dimensionless_unscaled, copy=False), copy=False) rep0 = rep.copy() for _ in range(10): rep = (eterms_a + rep0) / (1. + eterms_a.dot(rep)) # Find new distance (for re-normalization) d_new = rep.norm() # Renormalize rep *= d_orig / d_new # now re-cast into an appropriate Representation, and precess if need be if isinstance(fk4noecoord.data, UnitSphericalRepresentation): rep = rep.represent_as(UnitSphericalRepresentation) return fk4frame.realize_frame(rep)
847fc986a60dc8c0dddc18ad8939d3fd3d137bb3a8a223aec01aee9c8837fabc
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst from astropy.utils.decorators import format_doc from astropy.coordinates import representation as r from astropy.coordinates.baseframe import BaseCoordinateFrame, RepresentationMapping, base_doc __all__ = ['BaseRADecFrame'] doc_components = """ ra : `Angle`, optional, must be keyword The RA for this object (``dec`` must also be given and ``representation`` must be None). dec : `Angle`, optional, must be keyword The Declination for this object (``ra`` must also be given and ``representation`` must be None). distance : `~astropy.units.Quantity`, optional, must be keyword The Distance for this object along the line-of-sight. (``representation`` must be None). pm_ra_cosdec : :class:`~astropy.units.Quantity`, optional, must be keyword The proper motion in Right Ascension (including the ``cos(dec)`` factor) for this object (``pm_dec`` must also be given). pm_dec : :class:`~astropy.units.Quantity`, optional, must be keyword The proper motion in Declination for this object (``pm_ra_cosdec`` must also be given). radial_velocity : :class:`~astropy.units.Quantity`, optional, must be keyword The radial velocity of this object. """ @format_doc(base_doc, components=doc_components, footer="") class BaseRADecFrame(BaseCoordinateFrame): """ A base class that defines default representation info for frames that represent longitude and latitude as Right Ascension and Declination following typical "equatorial" conventions. """ frame_specific_representation_info = { r.SphericalRepresentation: [ RepresentationMapping('lon', 'ra'), RepresentationMapping('lat', 'dec') ] } default_representation = r.SphericalRepresentation default_differential = r.SphericalCosLatDifferential
e272b8dd7d90cb9b2e3382c46c6b1a69d0295188ba94131bf879d575dd3dae4f
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """ This package contains the coordinate frames actually implemented by astropy. Users shouldn't use this module directly, but rather import from the `astropy.coordinates` module. While it is likely to exist for the long-term, the existence of this package and details of its organization should be considered an implementation detail, and is not guaranteed to hold for future versions of astropy. Notes ----- The builtin frame classes are all imported automatically into this package's namespace, so there's no need to access the sub-modules directly. To implement a new frame in Astropy, a developer should add the frame as a new module in this package. Any "self" transformations (i.e., those that transform from one frame to another frame of the same class) should be included in that module. Transformation functions connecting the new frame to other frames should be in a separate module, which should be imported in this package's ``__init__.py`` to ensure the transformations are hooked up when this package is imported. Placing the trasnformation functions in separate modules avoids circular dependencies, because they need references to the frame classes. """ from .baseradec import BaseRADecFrame from .icrs import ICRS from .fk5 import FK5 from .fk4 import FK4, FK4NoETerms from .galactic import Galactic from .galactocentric import Galactocentric from .lsr import LSR, GalacticLSR from .supergalactic import Supergalactic from .altaz import AltAz from .gcrs import GCRS, PrecessedGeocentric from .cirs import CIRS from .itrs import ITRS from .hcrs import HCRS from .ecliptic import * # there are a lot of these so we don't list them all explicitly from .skyoffset import SkyOffsetFrame # need to import transformations so that they get registered in the graph from . import icrs_fk5_transforms from . import fk4_fk5_transforms from . import galactic_transforms from . import supergalactic_transforms from . import icrs_cirs_transforms from . import cirs_observed_transforms from . import intermediate_rotation_transforms from . import ecliptic_transforms from astropy.coordinates.baseframe import frame_transform_graph # we define an __all__ because otherwise the transformation modules # get included __all__ = ['ICRS', 'FK5', 'FK4', 'FK4NoETerms', 'Galactic', 'Galactocentric', 'Supergalactic', 'AltAz', 'GCRS', 'CIRS', 'ITRS', 'HCRS', 'PrecessedGeocentric', 'GeocentricMeanEcliptic', 'BarycentricMeanEcliptic', 'HeliocentricMeanEcliptic', 'GeocentricTrueEcliptic', 'BarycentricTrueEcliptic', 'HeliocentricTrueEcliptic', 'SkyOffsetFrame', 'GalacticLSR', 'LSR', 'BaseEclipticFrame', 'BaseRADecFrame', 'make_transform_graph_docs'] def make_transform_graph_docs(transform_graph): """ Generates a string that can be used in other docstrings to include a transformation graph, showing the available transforms and coordinate systems. Parameters ---------- transform_graph : `~.coordinates.TransformGraph` Returns ------- docstring : str A string that can be added to the end of a docstring to show the transform graph. """ from textwrap import dedent coosys = [transform_graph.lookup_name(item) for item in transform_graph.get_names()] # currently, all of the priorities are set to 1, so we don't need to show # then in the transform graph. graphstr = transform_graph.to_dot_graph(addnodes=coosys, priorities=False) docstr = """ The diagram below shows all of the built in coordinate systems, their aliases (useful for converting other coordinates to them using attribute-style access) and the pre-defined transformations between them. The user is free to override any of these transformations by defining new transformations between these systems, but the pre-defined transformations should be sufficient for typical usage. The color of an edge in the graph (i.e. the transformations between two frames) is set by the type of transformation; the legend box defines the mapping from transform class name to color. .. Wrap the graph in a div with a custom class to allow themeing. .. container:: frametransformgraph .. graphviz:: """ docstr = dedent(docstr) + ' ' + graphstr.replace('\n', '\n ') # colors are in dictionary at the bottom of transformations.py from astropy.coordinates.transformations import trans_to_color html_list_items = [] for cls, color in trans_to_color.items(): block = u""" <li style='list-style: none;'> <p style="font-size: 12px;line-height: 24px;font-weight: normal;color: #848484;padding: 0;margin: 0;"> <b>{0}:</b> <span style="font-size: 24px; color: {1};"><b>➝</b></span> </p> </li> """.format(cls.__name__, color) html_list_items.append(block) graph_legend = u""" .. raw:: html <ul> {} </ul> """.format("\n".join(html_list_items)) docstr = docstr + dedent(graph_legend) return docstr _transform_graph_docs = make_transform_graph_docs(frame_transform_graph)
86adbb837239a609551689ad75204e35e324d607ad7fd88be6234ee9ed233eda
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst from astropy import units as u from astropy.utils.decorators import format_doc from astropy.coordinates import representation as r from astropy.coordinates.baseframe import BaseCoordinateFrame, RepresentationMapping, base_doc from .galactic import Galactic __all__ = ['Supergalactic'] doc_components = """ sgl : `Angle`, optional, must be keyword The supergalactic longitude for this object (``sgb`` must also be given and ``representation`` must be None). sgb : `Angle`, optional, must be keyword The supergalactic latitude for this object (``sgl`` must also be given and ``representation`` must be None). distance : `~astropy.units.Quantity`, optional, must be keyword The Distance for this object along the line-of-sight. pm_sgl_cossgb : :class:`~astropy.units.Quantity`, optional, must be keyword The proper motion in Right Ascension for this object (``pm_sgb`` must also be given). pm_sgb : :class:`~astropy.units.Quantity`, optional, must be keyword The proper motion in Declination for this object (``pm_sgl_cossgb`` must also be given). radial_velocity : :class:`~astropy.units.Quantity`, optional, must be keyword The radial velocity of this object. """ @format_doc(base_doc, components=doc_components, footer="") class Supergalactic(BaseCoordinateFrame): """ Supergalactic Coordinates (see Lahav et al. 2000, <http://adsabs.harvard.edu/abs/2000MNRAS.312..166L>, and references therein). """ frame_specific_representation_info = { r.SphericalRepresentation: [ RepresentationMapping('lon', 'sgl'), RepresentationMapping('lat', 'sgb') ], r.CartesianRepresentation: [ RepresentationMapping('x', 'sgx'), RepresentationMapping('y', 'sgy'), RepresentationMapping('z', 'sgz') ], r.CartesianDifferential: [ RepresentationMapping('d_x', 'v_x', u.km/u.s), RepresentationMapping('d_y', 'v_y', u.km/u.s), RepresentationMapping('d_z', 'v_z', u.km/u.s) ], } default_representation = r.SphericalRepresentation default_differential = r.SphericalCosLatDifferential # North supergalactic pole in Galactic coordinates. # Needed for transformations to/from Galactic coordinates. _nsgp_gal = Galactic(l=47.37*u.degree, b=+6.32*u.degree)
d43e8dfb1efc70504e34a6a0fe4156669f9a7b6b8498fcc2ecd1652ae661670f
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst from astropy.utils.decorators import format_doc from astropy.coordinates.baseframe import base_doc from .baseradec import BaseRADecFrame, doc_components __all__ = ['ICRS'] @format_doc(base_doc, components=doc_components, footer="") class ICRS(BaseRADecFrame): """ A coordinate or frame in the ICRS system. If you're looking for "J2000" coordinates, and aren't sure if you want to use this or `~astropy.coordinates.FK5`, you probably want to use ICRS. It's more well-defined as a catalog coordinate and is an inertial system, and is very close (within tens of milliarcseconds) to J2000 equatorial. For more background on the ICRS and related coordinate transformations, see the references provided in the :ref:`astropy-coordinates-seealso` section of the documentation. """
105e0d2d0174ee2c900b98327ecb8c18c50eef1ba2d6451dbfa24157ff9681ed
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst from astropy import units as u from astropy.utils.decorators import format_doc from astropy.coordinates.attributes import (TimeAttribute, CartesianRepresentationAttribute) from .utils import DEFAULT_OBSTIME, EQUINOX_J2000 from astropy.coordinates.baseframe import base_doc from .baseradec import BaseRADecFrame, doc_components __all__ = ['GCRS', 'PrecessedGeocentric'] doc_footer_gcrs = """ Other parameters ---------------- obstime : `~astropy.time.Time` The time at which the observation is taken. Used for determining the position of the Earth. obsgeoloc : `~astropy.coordinates.CartesianRepresentation`, `~astropy.units.Quantity` The position of the observer relative to the center-of-mass of the Earth, oriented the same as BCRS/ICRS. Either [0, 0, 0], `~astropy.coordinates.CartesianRepresentation`, or proper input for one, i.e., a `~astropy.units.Quantity` with shape (3, ...) and length units. Defaults to [0, 0, 0], meaning "true" GCRS. obsgeovel : `~astropy.coordinates.CartesianRepresentation`, `~astropy.units.Quantity` The velocity of the observer relative to the center-of-mass of the Earth, oriented the same as BCRS/ICRS. Either [0, 0, 0], `~astropy.coordinates.CartesianRepresentation`, or proper input for one, i.e., a `~astropy.units.Quantity` with shape (3, ...) and velocity units. Defaults to [0, 0, 0], meaning "true" GCRS. """ @format_doc(base_doc, components=doc_components, footer=doc_footer_gcrs) class GCRS(BaseRADecFrame): """ A coordinate or frame in the Geocentric Celestial Reference System (GCRS). GCRS is distinct form ICRS mainly in that it is relative to the Earth's center-of-mass rather than the solar system Barycenter. That means this frame includes the effects of aberration (unlike ICRS). For more background on the GCRS, see the references provided in the :ref:`astropy-coordinates-seealso` section of the documentation. (Of particular note is Section 1.2 of `USNO Circular 179 <http://aa.usno.navy.mil/publications/docs/Circular_179.php>`_) This frame also includes frames that are defined *relative* to the Earth, but that are offset (in both position and velocity) from the Earth. The frame attributes are listed under **Other Parameters**. """ obstime = TimeAttribute(default=DEFAULT_OBSTIME) obsgeoloc = CartesianRepresentationAttribute(default=[0, 0, 0], unit=u.m) obsgeovel = CartesianRepresentationAttribute(default=[0, 0, 0], unit=u.m/u.s) # The "self-transform" is defined in icrs_cirs_transformations.py, because in # the current implementation it goes through ICRS (like CIRS) doc_footer_prec_geo = """ Other parameters ---------------- equinox : `~astropy.time.Time` The (mean) equinox to precess the coordinates to. obstime : `~astropy.time.Time` The time at which the observation is taken. Used for determining the position of the Earth. obsgeoloc : `~astropy.coordinates.CartesianRepresentation`, `~astropy.units.Quantity` The position of the observer relative to the center-of-mass of the Earth, oriented the same as BCRS/ICRS. Either [0, 0, 0], `~astropy.coordinates.CartesianRepresentation`, or proper input for one, i.e., a `~astropy.units.Quantity` with shape (3, ...) and length units. Defaults to [0, 0, 0], meaning "true" Geocentric. obsgeovel : `~astropy.coordinates.CartesianRepresentation`, `~astropy.units.Quantity` The velocity of the observer relative to the center-of-mass of the Earth, oriented the same as BCRS/ICRS. Either 0, `~astropy.coordinates.CartesianRepresentation`, or proper input for one, i.e., a `~astropy.units.Quantity` with shape (3, ...) and velocity units. Defaults to [0, 0, 0], meaning "true" Geocentric. """ @format_doc(base_doc, components=doc_components, footer=doc_footer_prec_geo) class PrecessedGeocentric(BaseRADecFrame): """ A coordinate frame defined in a similar manner as GCRS, but precessed to a requested (mean) equinox. Note that this does *not* end up the same as regular GCRS even for J2000 equinox, because the GCRS orientation is fixed to that of ICRS, which is not quite the same as the dynamical J2000 orientation. The frame attributes are listed under **Other Parameters** """ equinox = TimeAttribute(default=EQUINOX_J2000) obstime = TimeAttribute(default=DEFAULT_OBSTIME) obsgeoloc = CartesianRepresentationAttribute(default=[0, 0, 0], unit=u.m) obsgeovel = CartesianRepresentationAttribute(default=[0, 0, 0], unit=u.m/u.s)
cfc8e8ea8524e107a763fc8996398f71e509a3c058184f4c4c0796960f495689
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst from astropy import units as u from astropy.utils.decorators import format_doc from astropy.coordinates import representation as r from astropy.coordinates.baseframe import BaseCoordinateFrame, RepresentationMapping, base_doc from astropy.coordinates.attributes import TimeAttribute, QuantityAttribute from .utils import EQUINOX_J2000, DEFAULT_OBSTIME __all__ = ['GeocentricMeanEcliptic', 'BarycentricMeanEcliptic', 'HeliocentricMeanEcliptic', 'BaseEclipticFrame', 'GeocentricTrueEcliptic', 'BarycentricTrueEcliptic', 'HeliocentricTrueEcliptic', 'HeliocentricEclipticIAU76', 'CustomBarycentricEcliptic'] doc_components_ecl = """ lon : `Angle`, optional, must be keyword The ecliptic longitude for this object (``lat`` must also be given and ``representation`` must be None). lat : `Angle`, optional, must be keyword The ecliptic latitude for this object (``lon`` must also be given and ``representation`` must be None). distance : `~astropy.units.Quantity`, optional, must be keyword The distance for this object from the {0}. (``representation`` must be None). pm_lon_coslat : `Angle`, optional, must be keyword The proper motion in the ecliptic longitude (including the ``cos(lat)`` factor) for this object (``pm_lat`` must also be given). pm_lat : `Angle`, optional, must be keyword The proper motion in the ecliptic latitude for this object (``pm_lon_coslat`` must also be given). radial_velocity : :class:`~astropy.units.Quantity`, optional, must be keyword The radial velocity of this object. """ @format_doc(base_doc, components=doc_components_ecl.format('specified location'), footer="") class BaseEclipticFrame(BaseCoordinateFrame): """ A base class for frames that have names and conventions like that of ecliptic frames. .. warning:: In the current version of astropy, the ecliptic frames do not yet have stringent accuracy tests. We recommend you test to "known-good" cases to ensure this frames are what you are looking for. (and then ideally you would contribute these tests to Astropy!) """ default_representation = r.SphericalRepresentation default_differential = r.SphericalCosLatDifferential doc_footer_geo = """ Other parameters ---------------- equinox : `~astropy.time.Time`, optional The date to assume for this frame. Determines the location of the x-axis and the location of the Earth (necessary for transformation to non-geocentric systems). Defaults to the 'J2000' equinox. obstime : `~astropy.time.Time`, optional The time at which the observation is taken. Used for determining the position of the Earth. Defaults to J2000. """ @format_doc(base_doc, components=doc_components_ecl.format('geocenter'), footer=doc_footer_geo) class GeocentricMeanEcliptic(BaseEclipticFrame): """ Geocentric mean ecliptic coordinates. These origin of the coordinates are the geocenter (Earth), with the x axis pointing to the *mean* (not true) equinox at the time specified by the ``equinox`` attribute, and the xy-plane in the plane of the ecliptic for that date. Be aware that the definition of "geocentric" here means that this frame *includes* light deflection from the sun, aberration, etc when transforming to/from e.g. ICRS. The frame attributes are listed under **Other Parameters**. """ equinox = TimeAttribute(default=EQUINOX_J2000) obstime = TimeAttribute(default=DEFAULT_OBSTIME) @format_doc(base_doc, components=doc_components_ecl.format('geocenter'), footer=doc_footer_geo) class GeocentricTrueEcliptic(BaseEclipticFrame): """ Geocentric true ecliptic coordinates. These origin of the coordinates are the geocenter (Earth), with the x axis pointing to the *true* (not mean) equinox at the time specified by the ``equinox`` attribute, and the xy-plane in the plane of the ecliptic for that date. Be aware that the definition of "geocentric" here means that this frame *includes* light deflection from the sun, aberration, etc when transforming to/from e.g. ICRS. The frame attributes are listed under **Other Parameters**. """ equinox = TimeAttribute(default=EQUINOX_J2000) obstime = TimeAttribute(default=DEFAULT_OBSTIME) doc_footer_bary = """ Other parameters ---------------- equinox : `~astropy.time.Time`, optional The date to assume for this frame. Determines the location of the x-axis and the location of the Earth and Sun. Defaults to the 'J2000' equinox. """ @format_doc(base_doc, components=doc_components_ecl.format("barycenter"), footer=doc_footer_bary) class BarycentricMeanEcliptic(BaseEclipticFrame): """ Barycentric mean ecliptic coordinates. These origin of the coordinates are the barycenter of the solar system, with the x axis pointing in the direction of the *mean* (not true) equinox as at the time specified by the ``equinox`` attribute (as seen from Earth), and the xy-plane in the plane of the ecliptic for that date. The frame attributes are listed under **Other Parameters**. """ equinox = TimeAttribute(default=EQUINOX_J2000) @format_doc(base_doc, components=doc_components_ecl.format("barycenter"), footer=doc_footer_bary) class BarycentricTrueEcliptic(BaseEclipticFrame): """ Barycentric true ecliptic coordinates. These origin of the coordinates are the barycenter of the solar system, with the x axis pointing in the direction of the *true* (not mean) equinox as at the time specified by the ``equinox`` attribute (as seen from Earth), and the xy-plane in the plane of the ecliptic for that date. The frame attributes are listed under **Other Parameters**. """ equinox = TimeAttribute(default=EQUINOX_J2000) doc_footer_helio = """ Other parameters ---------------- equinox : `~astropy.time.Time`, optional The date to assume for this frame. Determines the location of the x-axis and the location of the Earth and Sun. Defaults to the 'J2000' equinox. obstime : `~astropy.time.Time`, optional The time at which the observation is taken. Used for determining the position of the Sun. Defaults to J2000. """ @format_doc(base_doc, components=doc_components_ecl.format("sun's center"), footer=doc_footer_helio) class HeliocentricMeanEcliptic(BaseEclipticFrame): """ Heliocentric mean ecliptic coordinates. These origin of the coordinates are the center of the sun, with the x axis pointing in the direction of the *mean* (not true) equinox as at the time specified by the ``equinox`` attribute (as seen from Earth), and the xy-plane in the plane of the ecliptic for that date. The frame attributes are listed under **Other Parameters**. {params} """ equinox = TimeAttribute(default=EQUINOX_J2000) obstime = TimeAttribute(default=DEFAULT_OBSTIME) @format_doc(base_doc, components=doc_components_ecl.format("sun's center"), footer=doc_footer_helio) class HeliocentricTrueEcliptic(BaseEclipticFrame): """ Heliocentric true ecliptic coordinates. These origin of the coordinates are the center of the sun, with the x axis pointing in the direction of the *true* (not mean) equinox as at the time specified by the ``equinox`` attribute (as seen from Earth), and the xy-plane in the plane of the ecliptic for that date. The frame attributes are listed under **Other Parameters**. {params} """ equinox = TimeAttribute(default=EQUINOX_J2000) obstime = TimeAttribute(default=DEFAULT_OBSTIME) @format_doc(base_doc, components=doc_components_ecl.format("sun's center"), footer="") class HeliocentricEclipticIAU76(BaseEclipticFrame): """ Heliocentric mean (IAU 1976) ecliptic coordinates. These origin of the coordinates are the center of the sun, with the x axis pointing in the direction of the *mean* (not true) equinox of J2000, and the xy-plane in the plane of the ecliptic of J2000 (according to the IAU 1976/1980 obliquity model). It has, therefore, a fixed equinox and an older obliquity value than the rest of the frames. The frame attributes are listed under **Other Parameters**. {params} """ obstime = TimeAttribute(default=DEFAULT_OBSTIME) @format_doc(base_doc, components=doc_components_ecl.format("barycenter"), footer="") class CustomBarycentricEcliptic(BaseEclipticFrame): """ Barycentric ecliptic coordinates with custom obliquity. These origin of the coordinates are the barycenter of the solar system, with the x axis pointing in the direction of the *mean* (not true) equinox of J2000, and the xy-plane in the plane of the ecliptic tilted a custom obliquity angle. The frame attributes are listed under **Other Parameters**. """ obliquity = QuantityAttribute(default=84381.448 * u.arcsec, unit=u.arcsec)
3da705e55e58e535f54f1e400d081c7ba30fe0c1e7df2e13811668d1b8c2b19d
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst from astropy.utils.decorators import format_doc from astropy.coordinates.representation import CartesianRepresentation, CartesianDifferential from astropy.coordinates.baseframe import BaseCoordinateFrame, base_doc from astropy.coordinates.attributes import TimeAttribute from .utils import DEFAULT_OBSTIME __all__ = ['ITRS'] @format_doc(base_doc, components="", footer="") class ITRS(BaseCoordinateFrame): """ A coordinate or frame in the International Terrestrial Reference System (ITRS). This is approximately a geocentric system, although strictly it is defined by a series of reference locations near the surface of the Earth. For more background on the ITRS, see the references provided in the :ref:`astropy-coordinates-seealso` section of the documentation. """ default_representation = CartesianRepresentation default_differential = CartesianDifferential obstime = TimeAttribute(default=DEFAULT_OBSTIME) @property def earth_location(self): """ The data in this frame as an `~astropy.coordinates.EarthLocation` class. """ from astropy.coordinates.earth import EarthLocation cart = self.represent_as(CartesianRepresentation) return EarthLocation(x=cart.x, y=cart.y, z=cart.z) # Self-transform is in intermediate_rotation_transforms.py with all the other # ITRS transforms
114119261c1b227db86a5f1331e136fa75d504452c53297c1c9e26170b95d45b
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module contains functions/values used repeatedly in different modules of the ``builtin_frames`` package. """ import warnings import numpy as np from astropy import units as u from astropy import _erfa as erfa from astropy.time import Time from astropy.utils import iers from astropy.utils.exceptions import AstropyWarning # We use tt as the time scale for this equinoxes, primarily because it is the # convention for J2000 (it is unclear if there is any "right answer" for B1950) # while #8600 makes this the default behavior, we show it here to ensure it's # clear which is used here EQUINOX_J2000 = Time('J2000', scale='tt') EQUINOX_B1950 = Time('B1950', scale='tt') # This is a time object that is the default "obstime" when such an attribute is # necessary. Currently, we use J2000. DEFAULT_OBSTIME = Time('J2000', scale='tt') PIOVER2 = np.pi / 2. # comes from the mean of the 1962-2014 IERS B data _DEFAULT_PM = (0.035, 0.29)*u.arcsec def get_polar_motion(time): """ gets the two polar motion components in radians for use with apio13 """ # Get the polar motion from the IERS table xp, yp, status = iers.IERS_Auto.open().pm_xy(time, return_status=True) wmsg = None if np.any(status == iers.TIME_BEFORE_IERS_RANGE): wmsg = ('Tried to get polar motions for times before IERS data is ' 'valid. Defaulting to polar motion from the 50-yr mean for those. ' 'This may affect precision at the 10s of arcsec level') xp.ravel()[status.ravel() == iers.TIME_BEFORE_IERS_RANGE] = _DEFAULT_PM[0] yp.ravel()[status.ravel() == iers.TIME_BEFORE_IERS_RANGE] = _DEFAULT_PM[1] warnings.warn(wmsg, AstropyWarning) if np.any(status == iers.TIME_BEYOND_IERS_RANGE): wmsg = ('Tried to get polar motions for times after IERS data is ' 'valid. Defaulting to polar motion from the 50-yr mean for those. ' 'This may affect precision at the 10s of arcsec level') xp.ravel()[status.ravel() == iers.TIME_BEYOND_IERS_RANGE] = _DEFAULT_PM[0] yp.ravel()[status.ravel() == iers.TIME_BEYOND_IERS_RANGE] = _DEFAULT_PM[1] warnings.warn(wmsg, AstropyWarning) return xp.to_value(u.radian), yp.to_value(u.radian) def _warn_iers(ierserr): """ Generate a warning for an IERSRangeerror Parameters ---------- ierserr : An `~astropy.utils.iers.IERSRangeError` """ msg = '{0} Assuming UT1-UTC=0 for coordinate transformations.' warnings.warn(msg.format(ierserr.args[0]), AstropyWarning) def get_dut1utc(time): """ This function is used to get UT1-UTC in coordinates because normally it gives an error outside the IERS range, but in coordinates we want to allow it to go through but with a warning. """ try: return time.delta_ut1_utc except iers.IERSRangeError as e: _warn_iers(e) return np.zeros(time.shape) def get_jd12(time, scale): """ Gets ``jd1`` and ``jd2`` from a time object in a particular scale. Parameters ---------- time : `~astropy.time.Time` The time to get the jds for scale : str The time scale to get the jds for Returns ------- jd1 : float jd2 : float """ if time.scale == scale: newtime = time else: try: newtime = getattr(time, scale) except iers.IERSRangeError as e: _warn_iers(e) newtime = time return newtime.jd1, newtime.jd2 def norm(p): """ Normalise a p-vector. """ if np.__version__ == '1.14.0': # there is a bug in numpy v1.14.0 (fixed in 1.14.1) that causes # this einsum call to break with the default of optimize=True # see https://github.com/astropy/astropy/issues/7051 return p / np.sqrt(np.einsum('...i,...i', p, p, optimize=False))[..., np.newaxis] else: return p / np.sqrt(np.einsum('...i,...i', p, p))[..., np.newaxis] def get_cip(jd1, jd2): """ Find the X, Y coordinates of the CIP and the CIO locator, s. Parameters ---------- jd1 : float or `np.ndarray` First part of two part Julian date (TDB) jd2 : float or `np.ndarray` Second part of two part Julian date (TDB) Returns -------- x : float or `np.ndarray` x coordinate of the CIP y : float or `np.ndarray` y coordinate of the CIP s : float or `np.ndarray` CIO locator, s """ # classical NPB matrix, IAU 2006/2000A rpnb = erfa.pnm06a(jd1, jd2) # CIP X, Y coordinates from array x, y = erfa.bpn2xy(rpnb) # CIO locator, s s = erfa.s06(jd1, jd2, x, y) return x, y, s def aticq(ri, di, astrom): """ A slightly modified version of the ERFA function ``eraAticq``. ``eraAticq`` performs the transformations between two coordinate systems, with the details of the transformation being encoded into the ``astrom`` array. The companion function ``eraAtciqz`` is meant to be its inverse. However, this is not true for directions close to the Solar centre, since the light deflection calculations are numerically unstable and therefore not reversible. This version sidesteps that problem by artificially reducing the light deflection for directions which are within 90 arcseconds of the Sun's position. This is the same approach used by the ERFA functions above, except that they use a threshold of 9 arcseconds. Parameters ---------- ri : float or `~numpy.ndarray` right ascension, radians di : float or `~numpy.ndarray` declination, radians astrom : eraASTROM array ERFA astrometry context, as produced by, e.g. ``eraApci13`` or ``eraApcs13`` Returns -------- rc : float or `~numpy.ndarray` dc : float or `~numpy.ndarray` """ # RA, Dec to cartesian unit vectors pos = erfa.s2c(ri, di) # Bias-precession-nutation, giving GCRS proper direction. ppr = erfa.trxp(astrom['bpn'], pos) # Aberration, giving GCRS natural direction d = np.zeros_like(ppr) for j in range(2): before = norm(ppr-d) after = erfa.ab(before, astrom['v'], astrom['em'], astrom['bm1']) d = after - before pnat = norm(ppr-d) # Light deflection by the Sun, giving BCRS coordinate direction d = np.zeros_like(pnat) for j in range(5): before = norm(pnat-d) after = erfa.ld(1.0, before, before, astrom['eh'], astrom['em'], 5e-8) d = after - before pco = norm(pnat-d) # ICRS astrometric RA, Dec rc, dc = erfa.c2s(pco) return erfa.anp(rc), dc def atciqz(rc, dc, astrom): """ A slightly modified version of the ERFA function ``eraAtciqz``. ``eraAtciqz`` performs the transformations between two coordinate systems, with the details of the transformation being encoded into the ``astrom`` array. The companion function ``eraAticq`` is meant to be its inverse. However, this is not true for directions close to the Solar centre, since the light deflection calculations are numerically unstable and therefore not reversible. This version sidesteps that problem by artificially reducing the light deflection for directions which are within 90 arcseconds of the Sun's position. This is the same approach used by the ERFA functions above, except that they use a threshold of 9 arcseconds. Parameters ---------- rc : float or `~numpy.ndarray` right ascension, radians dc : float or `~numpy.ndarray` declination, radians astrom : eraASTROM array ERFA astrometry context, as produced by, e.g. ``eraApci13`` or ``eraApcs13`` Returns -------- ri : float or `~numpy.ndarray` di : float or `~numpy.ndarray` """ # BCRS coordinate direction (unit vector). pco = erfa.s2c(rc, dc) # Light deflection by the Sun, giving BCRS natural direction. pnat = erfa.ld(1.0, pco, pco, astrom['eh'], astrom['em'], 5e-8) # Aberration, giving GCRS proper direction. ppr = erfa.ab(pnat, astrom['v'], astrom['em'], astrom['bm1']) # Bias-precession-nutation, giving CIRS proper direction. # Has no effect if matrix is identity matrix, in which case gives GCRS ppr. pi = erfa.rxp(astrom['bpn'], ppr) # CIRS (GCRS) RA, Dec ri, di = erfa.c2s(pi) return erfa.anp(ri), di def prepare_earth_position_vel(time): """ Get barycentric position and velocity, and heliocentric position of Earth Parameters ----------- time : `~astropy.time.Time` time at which to calculate position and velocity of Earth Returns -------- earth_pv : `np.ndarray` Barycentric position and velocity of Earth, in au and au/day earth_helio : `np.ndarray` Heliocentric position of Earth in au """ # this goes here to avoid circular import errors from astropy.coordinates.solar_system import (get_body_barycentric, get_body_barycentric_posvel) # get barycentric position and velocity of earth earth_p, earth_v = get_body_barycentric_posvel('earth', time) # get heliocentric position of earth, preparing it for passing to erfa. sun = get_body_barycentric('sun', time) earth_heliocentric = (earth_p - sun).get_xyz(xyz_axis=-1).to_value(u.au) # Also prepare earth_pv for passing to erfa, which wants it as # a structured dtype. earth_pv = erfa.pav2pv( earth_p.get_xyz(xyz_axis=-1).to_value(u.au), earth_v.get_xyz(xyz_axis=-1).to_value(u.au/u.d)) return earth_pv, earth_heliocentric
d41dbd4403fde735de1c4300dc82648ba91529e7dd9e80976bc9cebd36137c0e
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """ Contains the transformation functions for getting to/from ecliptic systems. """ from astropy import units as u from astropy.coordinates.baseframe import frame_transform_graph from astropy.coordinates.transformations import ( FunctionTransformWithFiniteDifference, DynamicMatrixTransform, AffineTransform, ) from astropy.coordinates.matrix_utilities import (rotation_matrix, matrix_product, matrix_transpose) from astropy.coordinates.representation import CartesianRepresentation from astropy import _erfa as erfa from .icrs import ICRS from .gcrs import GCRS from .ecliptic import (GeocentricMeanEcliptic, BarycentricMeanEcliptic, HeliocentricMeanEcliptic, GeocentricTrueEcliptic, BarycentricTrueEcliptic, HeliocentricTrueEcliptic, HeliocentricEclipticIAU76, CustomBarycentricEcliptic) from .utils import get_jd12, EQUINOX_J2000 from astropy.coordinates.errors import UnitsError def _mean_ecliptic_rotation_matrix(equinox): # This code calls pmat06 from ERFA, which retrieves the precession # matrix (including frame bias) according to the IAU 2006 model, but # leaves out the nutation. This matches what ERFA does in the ecm06 # function and also brings the results closer to what other libraries # give (see https://github.com/astropy/astropy/pull/6508). jd1, jd2 = get_jd12(equinox, 'tt') rbp = erfa.pmat06(jd1, jd2) obl = erfa.obl06(jd1, jd2)*u.radian return matrix_product(rotation_matrix(obl, 'x'), rbp) def _true_ecliptic_rotation_matrix(equinox): # This code calls pnm06a from ERFA, which retrieves the precession # matrix (including frame bias) according to the IAU 2006 model, and # including the nutation. This family of systems is less popular # (see https://github.com/astropy/astropy/pull/6508). jd1, jd2 = get_jd12(equinox, 'tt') rnpb = erfa.pnm06a(jd1, jd2) obl = erfa.obl06(jd1, jd2)*u.radian return matrix_product(rotation_matrix(obl, 'x'), rnpb) def _obliquity_only_rotation_matrix(obl=erfa.obl80(EQUINOX_J2000.jd1, EQUINOX_J2000.jd2) * u.radian): # This code only accounts for the obliquity, # which can be passed explicitly. # The default value is the IAU 1980 value for J2000, # which is computed using obl80 from ERFA: # # obl = _erfa.obl80(EQUINOX_J2000.jd1, EQUINOX_J2000.jd2) * u.radian return rotation_matrix(obl, "x") # MeanEcliptic frames @frame_transform_graph.transform(FunctionTransformWithFiniteDifference, GCRS, GeocentricMeanEcliptic, finite_difference_frameattr_name='equinox') def gcrs_to_geoecliptic(gcrs_coo, to_frame): # first get us to a 0 pos/vel GCRS at the target equinox gcrs_coo2 = gcrs_coo.transform_to(GCRS(obstime=to_frame.obstime)) rmat = _mean_ecliptic_rotation_matrix(to_frame.equinox) newrepr = gcrs_coo2.cartesian.transform(rmat) return to_frame.realize_frame(newrepr) @frame_transform_graph.transform(FunctionTransformWithFiniteDifference, GeocentricMeanEcliptic, GCRS) def geoecliptic_to_gcrs(from_coo, gcrs_frame): rmat = _mean_ecliptic_rotation_matrix(from_coo.equinox) newrepr = from_coo.cartesian.transform(matrix_transpose(rmat)) gcrs = GCRS(newrepr, obstime=from_coo.obstime) # now do any needed offsets (no-op if same obstime and 0 pos/vel) return gcrs.transform_to(gcrs_frame) @frame_transform_graph.transform(DynamicMatrixTransform, ICRS, BarycentricMeanEcliptic) def icrs_to_baryecliptic(from_coo, to_frame): return _mean_ecliptic_rotation_matrix(to_frame.equinox) @frame_transform_graph.transform(DynamicMatrixTransform, BarycentricMeanEcliptic, ICRS) def baryecliptic_to_icrs(from_coo, to_frame): return matrix_transpose(icrs_to_baryecliptic(to_frame, from_coo)) _NEED_ORIGIN_HINT = ("The input {0} coordinates do not have length units. This " "probably means you created coordinates with lat/lon but " "no distance. Heliocentric<->ICRS transforms cannot " "function in this case because there is an origin shift.") @frame_transform_graph.transform(AffineTransform, ICRS, HeliocentricMeanEcliptic) def icrs_to_helioecliptic(from_coo, to_frame): if not u.m.is_equivalent(from_coo.cartesian.x.unit): raise UnitsError(_NEED_ORIGIN_HINT.format(from_coo.__class__.__name__)) # get barycentric sun coordinate # this goes here to avoid circular import errors from astropy.coordinates.solar_system import get_body_barycentric bary_sun_pos = get_body_barycentric('sun', to_frame.obstime) # now compute the matrix to precess to the right orientation rmat = _mean_ecliptic_rotation_matrix(to_frame.equinox) return rmat, (-bary_sun_pos).transform(rmat) @frame_transform_graph.transform(AffineTransform, HeliocentricMeanEcliptic, ICRS) def helioecliptic_to_icrs(from_coo, to_frame): if not u.m.is_equivalent(from_coo.cartesian.x.unit): raise UnitsError(_NEED_ORIGIN_HINT.format(from_coo.__class__.__name__)) # first un-precess from ecliptic to ICRS orientation rmat = _mean_ecliptic_rotation_matrix(from_coo.equinox) # now offset back to barycentric, which is the correct center for ICRS # this goes here to avoid circular import errors from astropy.coordinates.solar_system import get_body_barycentric # get barycentric sun coordinate bary_sun_pos = get_body_barycentric('sun', from_coo.obstime) return matrix_transpose(rmat), bary_sun_pos # TrueEcliptic frames @frame_transform_graph.transform(FunctionTransformWithFiniteDifference, GCRS, GeocentricTrueEcliptic, finite_difference_frameattr_name='equinox') def gcrs_to_true_geoecliptic(gcrs_coo, to_frame): # first get us to a 0 pos/vel GCRS at the target equinox gcrs_coo2 = gcrs_coo.transform_to(GCRS(obstime=to_frame.obstime)) rmat = _true_ecliptic_rotation_matrix(to_frame.equinox) newrepr = gcrs_coo2.cartesian.transform(rmat) return to_frame.realize_frame(newrepr) @frame_transform_graph.transform(FunctionTransformWithFiniteDifference, GeocentricTrueEcliptic, GCRS) def true_geoecliptic_to_gcrs(from_coo, gcrs_frame): rmat = _true_ecliptic_rotation_matrix(from_coo.equinox) newrepr = from_coo.cartesian.transform(matrix_transpose(rmat)) gcrs = GCRS(newrepr, obstime=from_coo.obstime) # now do any needed offsets (no-op if same obstime and 0 pos/vel) return gcrs.transform_to(gcrs_frame) @frame_transform_graph.transform(DynamicMatrixTransform, ICRS, BarycentricTrueEcliptic) def icrs_to_true_baryecliptic(from_coo, to_frame): return _true_ecliptic_rotation_matrix(to_frame.equinox) @frame_transform_graph.transform(DynamicMatrixTransform, BarycentricTrueEcliptic, ICRS) def true_baryecliptic_to_icrs(from_coo, to_frame): return matrix_transpose(icrs_to_true_baryecliptic(to_frame, from_coo)) @frame_transform_graph.transform(AffineTransform, ICRS, HeliocentricTrueEcliptic) def icrs_to_true_helioecliptic(from_coo, to_frame): if not u.m.is_equivalent(from_coo.cartesian.x.unit): raise UnitsError(_NEED_ORIGIN_HINT.format(from_coo.__class__.__name__)) # get barycentric sun coordinate # this goes here to avoid circular import errors from astropy.coordinates.solar_system import get_body_barycentric bary_sun_pos = get_body_barycentric('sun', to_frame.obstime) # now compute the matrix to precess to the right orientation rmat = _true_ecliptic_rotation_matrix(to_frame.equinox) return rmat, (-bary_sun_pos).transform(rmat) @frame_transform_graph.transform(AffineTransform, HeliocentricTrueEcliptic, ICRS) def true_helioecliptic_to_icrs(from_coo, to_frame): if not u.m.is_equivalent(from_coo.cartesian.x.unit): raise UnitsError(_NEED_ORIGIN_HINT.format(from_coo.__class__.__name__)) # first un-precess from ecliptic to ICRS orientation rmat = _true_ecliptic_rotation_matrix(from_coo.equinox) # now offset back to barycentric, which is the correct center for ICRS # this goes here to avoid circular import errors from astropy.coordinates.solar_system import get_body_barycentric # get barycentric sun coordinate bary_sun_pos = get_body_barycentric('sun', from_coo.obstime) return matrix_transpose(rmat), bary_sun_pos # Other ecliptic frames @frame_transform_graph.transform(AffineTransform, HeliocentricEclipticIAU76, ICRS) def ecliptic_to_iau76_icrs(from_coo, to_frame): # first un-precess from ecliptic to ICRS orientation rmat = _obliquity_only_rotation_matrix() # now offset back to barycentric, which is the correct center for ICRS # get barycentric sun coordinate # this goes here to avoid circular import errors from astropy.coordinates.solar_system import get_body_barycentric bary_sun_pos = get_body_barycentric("sun", from_coo.obstime) return matrix_transpose(rmat), bary_sun_pos @frame_transform_graph.transform(AffineTransform, ICRS, HeliocentricEclipticIAU76) def icrs_to_iau76_ecliptic(from_coo, to_frame): # get barycentric sun coordinate # this goes here to avoid circular import errors from astropy.coordinates.solar_system import get_body_barycentric bary_sun_pos = get_body_barycentric("sun", to_frame.obstime) # now compute the matrix to precess to the right orientation rmat = _obliquity_only_rotation_matrix() return rmat, (-bary_sun_pos).transform(rmat) @frame_transform_graph.transform(DynamicMatrixTransform, ICRS, CustomBarycentricEcliptic) def icrs_to_custombaryecliptic(from_coo, to_frame): return _obliquity_only_rotation_matrix(to_frame.obliquity) @frame_transform_graph.transform(DynamicMatrixTransform, CustomBarycentricEcliptic, ICRS) def custombaryecliptic_to_icrs(from_coo, to_frame): return icrs_to_custombaryecliptic(to_frame, from_coo).T
026a7f4f86218a1c985abf14794e877d1dfc865093c7b4c5b5fe1fe183c6275f
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """ Contains the transformation functions for getting from ICRS/HCRS to CIRS and anything in between (currently that means GCRS) """ import numpy as np from astropy import units as u from astropy.coordinates.baseframe import frame_transform_graph from astropy.coordinates.transformations import FunctionTransformWithFiniteDifference, AffineTransform from astropy.coordinates.representation import (SphericalRepresentation, CartesianRepresentation, UnitSphericalRepresentation) from astropy import _erfa as erfa from .icrs import ICRS from .gcrs import GCRS from .cirs import CIRS from .hcrs import HCRS from .utils import get_jd12, aticq, atciqz, get_cip, prepare_earth_position_vel # First the ICRS/CIRS related transforms @frame_transform_graph.transform(FunctionTransformWithFiniteDifference, ICRS, CIRS) def icrs_to_cirs(icrs_coo, cirs_frame): # first set up the astrometry context for ICRS<->CIRS jd1, jd2 = get_jd12(cirs_frame.obstime, 'tdb') x, y, s = get_cip(jd1, jd2) earth_pv, earth_heliocentric = prepare_earth_position_vel(cirs_frame.obstime) astrom = erfa.apci(jd1, jd2, earth_pv, earth_heliocentric, x, y, s) if icrs_coo.data.get_name() == 'unitspherical' or icrs_coo.data.to_cartesian().x.unit == u.one: # if no distance, just do the infinite-distance/no parallax calculation usrepr = icrs_coo.represent_as(UnitSphericalRepresentation) i_ra = usrepr.lon.to_value(u.radian) i_dec = usrepr.lat.to_value(u.radian) cirs_ra, cirs_dec = atciqz(i_ra, i_dec, astrom) newrep = UnitSphericalRepresentation(lat=u.Quantity(cirs_dec, u.radian, copy=False), lon=u.Quantity(cirs_ra, u.radian, copy=False), copy=False) else: # When there is a distance, we first offset for parallax to get the # astrometric coordinate direction and *then* run the ERFA transform for # no parallax/PM. This ensures reversibility and is more sensible for # inside solar system objects astrom_eb = CartesianRepresentation(astrom['eb'], unit=u.au, xyz_axis=-1, copy=False) newcart = icrs_coo.cartesian - astrom_eb srepr = newcart.represent_as(SphericalRepresentation) i_ra = srepr.lon.to_value(u.radian) i_dec = srepr.lat.to_value(u.radian) cirs_ra, cirs_dec = atciqz(i_ra, i_dec, astrom) newrep = SphericalRepresentation(lat=u.Quantity(cirs_dec, u.radian, copy=False), lon=u.Quantity(cirs_ra, u.radian, copy=False), distance=srepr.distance, copy=False) return cirs_frame.realize_frame(newrep) @frame_transform_graph.transform(FunctionTransformWithFiniteDifference, CIRS, ICRS) def cirs_to_icrs(cirs_coo, icrs_frame): srepr = cirs_coo.represent_as(SphericalRepresentation) cirs_ra = srepr.lon.to_value(u.radian) cirs_dec = srepr.lat.to_value(u.radian) # set up the astrometry context for ICRS<->cirs and then convert to # astrometric coordinate direction jd1, jd2 = get_jd12(cirs_coo.obstime, 'tdb') x, y, s = get_cip(jd1, jd2) earth_pv, earth_heliocentric = prepare_earth_position_vel(cirs_coo.obstime) astrom = erfa.apci(jd1, jd2, earth_pv, earth_heliocentric, x, y, s) i_ra, i_dec = aticq(cirs_ra, cirs_dec, astrom) if cirs_coo.data.get_name() == 'unitspherical' or cirs_coo.data.to_cartesian().x.unit == u.one: # if no distance, just use the coordinate direction to yield the # infinite-distance/no parallax answer newrep = UnitSphericalRepresentation(lat=u.Quantity(i_dec, u.radian, copy=False), lon=u.Quantity(i_ra, u.radian, copy=False), copy=False) else: # When there is a distance, apply the parallax/offset to the SSB as the # last step - ensures round-tripping with the icrs_to_cirs transform # the distance in intermedrep is *not* a real distance as it does not # include the offset back to the SSB intermedrep = SphericalRepresentation(lat=u.Quantity(i_dec, u.radian, copy=False), lon=u.Quantity(i_ra, u.radian, copy=False), distance=srepr.distance, copy=False) astrom_eb = CartesianRepresentation(astrom['eb'], unit=u.au, xyz_axis=-1, copy=False) newrep = intermedrep + astrom_eb return icrs_frame.realize_frame(newrep) @frame_transform_graph.transform(FunctionTransformWithFiniteDifference, CIRS, CIRS) def cirs_to_cirs(from_coo, to_frame): if np.all(from_coo.obstime == to_frame.obstime): return to_frame.realize_frame(from_coo.data) else: # the CIRS<-> CIRS transform actually goes through ICRS. This has a # subtle implication that a point in CIRS is uniquely determined # by the corresponding astrometric ICRS coordinate *at its # current time*. This has some subtle implications in terms of GR, but # is sort of glossed over in the current scheme because we are dropping # distances anyway. return from_coo.transform_to(ICRS).transform_to(to_frame) # Now the GCRS-related transforms to/from ICRS @frame_transform_graph.transform(FunctionTransformWithFiniteDifference, ICRS, GCRS) def icrs_to_gcrs(icrs_coo, gcrs_frame): # first set up the astrometry context for ICRS<->GCRS. There are a few steps... # get the position and velocity arrays for the observatory. Need to # have xyz in last dimension, and pos/vel in one-but-last. # (Note could use np.stack once our minimum numpy version is >=1.10.) obs_pv = erfa.pav2pv( gcrs_frame.obsgeoloc.get_xyz(xyz_axis=-1).to_value(u.m), gcrs_frame.obsgeovel.get_xyz(xyz_axis=-1).to_value(u.m/u.s)) # find the position and velocity of earth jd1, jd2 = get_jd12(gcrs_frame.obstime, 'tdb') earth_pv, earth_heliocentric = prepare_earth_position_vel(gcrs_frame.obstime) # get astrometry context object, astrom. astrom = erfa.apcs(jd1, jd2, obs_pv, earth_pv, earth_heliocentric) if icrs_coo.data.get_name() == 'unitspherical' or icrs_coo.data.to_cartesian().x.unit == u.one: # if no distance, just do the infinite-distance/no parallax calculation usrepr = icrs_coo.represent_as(UnitSphericalRepresentation) i_ra = usrepr.lon.to_value(u.radian) i_dec = usrepr.lat.to_value(u.radian) gcrs_ra, gcrs_dec = atciqz(i_ra, i_dec, astrom) newrep = UnitSphericalRepresentation(lat=u.Quantity(gcrs_dec, u.radian, copy=False), lon=u.Quantity(gcrs_ra, u.radian, copy=False), copy=False) else: # When there is a distance, we first offset for parallax to get the # BCRS coordinate direction and *then* run the ERFA transform for no # parallax/PM. This ensures reversibility and is more sensible for # inside solar system objects astrom_eb = CartesianRepresentation(astrom['eb'], unit=u.au, xyz_axis=-1, copy=False) newcart = icrs_coo.cartesian - astrom_eb srepr = newcart.represent_as(SphericalRepresentation) i_ra = srepr.lon.to_value(u.radian) i_dec = srepr.lat.to_value(u.radian) gcrs_ra, gcrs_dec = atciqz(i_ra, i_dec, astrom) newrep = SphericalRepresentation(lat=u.Quantity(gcrs_dec, u.radian, copy=False), lon=u.Quantity(gcrs_ra, u.radian, copy=False), distance=srepr.distance, copy=False) return gcrs_frame.realize_frame(newrep) @frame_transform_graph.transform(FunctionTransformWithFiniteDifference, GCRS, ICRS) def gcrs_to_icrs(gcrs_coo, icrs_frame): srepr = gcrs_coo.represent_as(SphericalRepresentation) gcrs_ra = srepr.lon.to_value(u.radian) gcrs_dec = srepr.lat.to_value(u.radian) # set up the astrometry context for ICRS<->GCRS and then convert to BCRS # coordinate direction obs_pv = erfa.pav2pv( gcrs_coo.obsgeoloc.get_xyz(xyz_axis=-1).to_value(u.m), gcrs_coo.obsgeovel.get_xyz(xyz_axis=-1).to_value(u.m/u.s)) jd1, jd2 = get_jd12(gcrs_coo.obstime, 'tdb') earth_pv, earth_heliocentric = prepare_earth_position_vel(gcrs_coo.obstime) astrom = erfa.apcs(jd1, jd2, obs_pv, earth_pv, earth_heliocentric) i_ra, i_dec = aticq(gcrs_ra, gcrs_dec, astrom) if gcrs_coo.data.get_name() == 'unitspherical' or gcrs_coo.data.to_cartesian().x.unit == u.one: # if no distance, just use the coordinate direction to yield the # infinite-distance/no parallax answer newrep = UnitSphericalRepresentation(lat=u.Quantity(i_dec, u.radian, copy=False), lon=u.Quantity(i_ra, u.radian, copy=False), copy=False) else: # When there is a distance, apply the parallax/offset to the SSB as the # last step - ensures round-tripping with the icrs_to_gcrs transform # the distance in intermedrep is *not* a real distance as it does not # include the offset back to the SSB intermedrep = SphericalRepresentation(lat=u.Quantity(i_dec, u.radian, copy=False), lon=u.Quantity(i_ra, u.radian, copy=False), distance=srepr.distance, copy=False) astrom_eb = CartesianRepresentation(astrom['eb'], unit=u.au, xyz_axis=-1, copy=False) newrep = intermedrep + astrom_eb return icrs_frame.realize_frame(newrep) @frame_transform_graph.transform(FunctionTransformWithFiniteDifference, GCRS, GCRS) def gcrs_to_gcrs(from_coo, to_frame): if (np.all(from_coo.obstime == to_frame.obstime) and np.all(from_coo.obsgeoloc == to_frame.obsgeoloc)): return to_frame.realize_frame(from_coo.data) else: # like CIRS, we do this self-transform via ICRS return from_coo.transform_to(ICRS).transform_to(to_frame) @frame_transform_graph.transform(FunctionTransformWithFiniteDifference, GCRS, HCRS) def gcrs_to_hcrs(gcrs_coo, hcrs_frame): if np.any(gcrs_coo.obstime != hcrs_frame.obstime): # if they GCRS obstime and HCRS obstime are not the same, we first # have to move to a GCRS where they are. frameattrs = gcrs_coo.get_frame_attr_names() frameattrs['obstime'] = hcrs_frame.obstime gcrs_coo = gcrs_coo.transform_to(GCRS(**frameattrs)) srepr = gcrs_coo.represent_as(SphericalRepresentation) gcrs_ra = srepr.lon.to_value(u.radian) gcrs_dec = srepr.lat.to_value(u.radian) # set up the astrometry context for ICRS<->GCRS and then convert to ICRS # coordinate direction obs_pv = erfa.pav2pv( gcrs_coo.obsgeoloc.get_xyz(xyz_axis=-1).to_value(u.m), gcrs_coo.obsgeovel.get_xyz(xyz_axis=-1).to_value(u.m/u.s)) jd1, jd2 = get_jd12(hcrs_frame.obstime, 'tdb') earth_pv, earth_heliocentric = prepare_earth_position_vel(gcrs_coo.obstime) astrom = erfa.apcs(jd1, jd2, obs_pv, earth_pv, earth_heliocentric) i_ra, i_dec = aticq(gcrs_ra, gcrs_dec, astrom) # convert to Quantity objects i_ra = u.Quantity(i_ra, u.radian, copy=False) i_dec = u.Quantity(i_dec, u.radian, copy=False) if gcrs_coo.data.get_name() == 'unitspherical' or gcrs_coo.data.to_cartesian().x.unit == u.one: # if no distance, just use the coordinate direction to yield the # infinite-distance/no parallax answer newrep = UnitSphericalRepresentation(lat=i_dec, lon=i_ra, copy=False) else: # When there is a distance, apply the parallax/offset to the # Heliocentre as the last step to ensure round-tripping with the # hcrs_to_gcrs transform # Note that the distance in intermedrep is *not* a real distance as it # does not include the offset back to the Heliocentre intermedrep = SphericalRepresentation(lat=i_dec, lon=i_ra, distance=srepr.distance, copy=False) # astrom['eh'] and astrom['em'] contain Sun to observer unit vector, # and distance, respectively. Shapes are (X) and (X,3), where (X) is the # shape resulting from broadcasting the shape of the times object # against the shape of the pv array. # broadcast em to eh and scale eh eh = astrom['eh'] * astrom['em'][..., np.newaxis] eh = CartesianRepresentation(eh, unit=u.au, xyz_axis=-1, copy=False) newrep = intermedrep.to_cartesian() + eh return hcrs_frame.realize_frame(newrep) _NEED_ORIGIN_HINT = ("The input {0} coordinates do not have length units. This " "probably means you created coordinates with lat/lon but " "no distance. Heliocentric<->ICRS transforms cannot " "function in this case because there is an origin shift.") @frame_transform_graph.transform(AffineTransform, HCRS, ICRS) def hcrs_to_icrs(hcrs_coo, icrs_frame): # this is just an origin translation so without a distance it cannot go ahead if isinstance(hcrs_coo.data, UnitSphericalRepresentation): raise u.UnitsError(_NEED_ORIGIN_HINT.format(hcrs_coo.__class__.__name__)) if hcrs_coo.data.differentials: from astropy.coordinates.solar_system import get_body_barycentric_posvel bary_sun_pos, bary_sun_vel = get_body_barycentric_posvel('sun', hcrs_coo.obstime) bary_sun_pos = bary_sun_pos.with_differentials(bary_sun_vel) else: from astropy.coordinates.solar_system import get_body_barycentric bary_sun_pos = get_body_barycentric('sun', hcrs_coo.obstime) bary_sun_vel = None return None, bary_sun_pos @frame_transform_graph.transform(AffineTransform, ICRS, HCRS) def icrs_to_hcrs(icrs_coo, hcrs_frame): # this is just an origin translation so without a distance it cannot go ahead if isinstance(icrs_coo.data, UnitSphericalRepresentation): raise u.UnitsError(_NEED_ORIGIN_HINT.format(icrs_coo.__class__.__name__)) if icrs_coo.data.differentials: from astropy.coordinates.solar_system import get_body_barycentric_posvel bary_sun_pos, bary_sun_vel = get_body_barycentric_posvel('sun', hcrs_frame.obstime) bary_sun_pos = -bary_sun_pos.with_differentials(-bary_sun_vel) else: from astropy.coordinates.solar_system import get_body_barycentric bary_sun_pos = -get_body_barycentric('sun', hcrs_frame.obstime) bary_sun_vel = None return None, bary_sun_pos @frame_transform_graph.transform(FunctionTransformWithFiniteDifference, HCRS, HCRS) def hcrs_to_hcrs(from_coo, to_frame): if np.all(from_coo.obstime == to_frame.obstime): return to_frame.realize_frame(from_coo.data) else: # like CIRS, we do this self-transform via ICRS return from_coo.transform_to(ICRS).transform_to(to_frame)
2649d2d151cae481b77b256e7d27bef2b885a979f04b0a6adc618b56b3e0835b
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst from astropy import units as u from astropy.utils.compat import namedtuple_asdict from astropy.coordinates import representation as r from astropy.coordinates.transformations import DynamicMatrixTransform, FunctionTransform from astropy.coordinates.baseframe import (frame_transform_graph, RepresentationMapping, BaseCoordinateFrame) from astropy.coordinates.attributes import CoordinateAttribute, QuantityAttribute from astropy.coordinates.matrix_utilities import (rotation_matrix, matrix_product, matrix_transpose) _skyoffset_cache = {} def make_skyoffset_cls(framecls): """ Create a new class that is the sky offset frame for a specific class of origin frame. If such a class has already been created for this frame, the same class will be returned. The new class will always have component names for spherical coordinates of ``lon``/``lat``. Parameters ---------- framecls : coordinate frame class (i.e., subclass of `~astropy.coordinates.BaseCoordinateFrame`) The class to create the SkyOffsetFrame of. Returns ------- skyoffsetframecls : class The class for the new skyoffset frame. Notes ----- This function is necessary because Astropy's frame transformations depend on connection between specific frame *classes*. So each type of frame needs its own distinct skyoffset frame class. This function generates just that class, as well as ensuring that only one example of such a class actually gets created in any given python session. """ if framecls in _skyoffset_cache: return _skyoffset_cache[framecls] # the class of a class object is the metaclass framemeta = framecls.__class__ class SkyOffsetMeta(framemeta): """ This metaclass renames the class to be "SkyOffset<framecls>" and also adjusts the frame specific representation info so that spherical names are always "lon" and "lat" (instead of e.g. "ra" and "dec"). """ def __new__(cls, name, bases, members): # Only 'origin' is needed here, to set the origin frame properly. members['origin'] = CoordinateAttribute(frame=framecls, default=None) # This has to be done because FrameMeta will set these attributes # to the defaults from BaseCoordinateFrame when it creates the base # SkyOffsetFrame class initially. members['_default_representation'] = framecls._default_representation members['_default_differential'] = framecls._default_differential newname = name[:-5] if name.endswith('Frame') else name newname += framecls.__name__ return super().__new__(cls, newname, bases, members) # We need this to handle the intermediate metaclass correctly, otherwise we could # just subclass SkyOffsetFrame. _SkyOffsetFramecls = SkyOffsetMeta('SkyOffsetFrame', (SkyOffsetFrame, framecls), {'__doc__': SkyOffsetFrame.__doc__}) @frame_transform_graph.transform(FunctionTransform, _SkyOffsetFramecls, _SkyOffsetFramecls) def skyoffset_to_skyoffset(from_skyoffset_coord, to_skyoffset_frame): """Transform between two skyoffset frames.""" # This transform goes through the parent frames on each side. # from_frame -> from_frame.origin -> to_frame.origin -> to_frame intermediate_from = from_skyoffset_coord.transform_to(from_skyoffset_coord.origin) intermediate_to = intermediate_from.transform_to(to_skyoffset_frame.origin) return intermediate_to.transform_to(to_skyoffset_frame) @frame_transform_graph.transform(DynamicMatrixTransform, framecls, _SkyOffsetFramecls) def reference_to_skyoffset(reference_frame, skyoffset_frame): """Convert a reference coordinate to an sky offset frame.""" # Define rotation matrices along the position angle vector, and # relative to the origin. origin = skyoffset_frame.origin.spherical mat1 = rotation_matrix(-skyoffset_frame.rotation, 'x') mat2 = rotation_matrix(-origin.lat, 'y') mat3 = rotation_matrix(origin.lon, 'z') return matrix_product(mat1, mat2, mat3) @frame_transform_graph.transform(DynamicMatrixTransform, _SkyOffsetFramecls, framecls) def skyoffset_to_reference(skyoffset_coord, reference_frame): """Convert an sky offset frame coordinate to the reference frame""" # use the forward transform, but just invert it R = reference_to_skyoffset(reference_frame, skyoffset_coord) # transpose is the inverse because R is a rotation matrix return matrix_transpose(R) _skyoffset_cache[framecls] = _SkyOffsetFramecls return _SkyOffsetFramecls class SkyOffsetFrame(BaseCoordinateFrame): """ A frame which is relative to some specific position and oriented to match its frame. SkyOffsetFrames always have component names for spherical coordinates of ``lon``/``lat``, *not* the component names for the frame of ``origin``. This is useful for calculating offsets and dithers in the frame of the sky relative to an arbitrary position. Coordinates in this frame are both centered on the position specified by the ``origin`` coordinate, *and* they are oriented in the same manner as the ``origin`` frame. E.g., if ``origin`` is `~astropy.coordinates.ICRS`, this object's ``lat`` will be pointed in the direction of Dec, while ``lon`` will point in the direction of RA. For more on skyoffset frames, see :ref:`astropy-skyoffset-frames`. Parameters ---------- representation : `BaseRepresentation` or None A representation object or None to have no data (or use the other keywords) origin : `SkyCoord` or low-level coordinate object. The coordinate which specifies the origin of this frame. Note that this origin is used purely for on-sky location/rotation. It can have a ``distance`` but it will not be used by this ``SkyOffsetFrame``. rotation : `~astropy.coordinates.Angle` or `~astropy.units.Quantity` with angle units The final rotation of the frame about the ``origin``. The sign of the rotation is the left-hand rule. That is, an object at a particular position angle in the un-rotated system will be sent to the positive latitude (z) direction in the final frame. Notes ----- ``SkyOffsetFrame`` is a factory class. That is, the objects that it yields are *not* actually objects of class ``SkyOffsetFrame``. Instead, distinct classes are created on-the-fly for whatever the frame class is of ``origin``. """ rotation = QuantityAttribute(default=0, unit=u.deg) origin = CoordinateAttribute(default=None, frame=None) def __new__(cls, *args, **kwargs): # We don't want to call this method if we've already set up # an skyoffset frame for this class. if not (issubclass(cls, SkyOffsetFrame) and cls is not SkyOffsetFrame): # We get the origin argument, and handle it here. try: origin_frame = kwargs['origin'] except KeyError: raise TypeError("Can't initialize an SkyOffsetFrame without origin= keyword.") if hasattr(origin_frame, 'frame'): origin_frame = origin_frame.frame newcls = make_skyoffset_cls(origin_frame.__class__) return newcls.__new__(newcls, *args, **kwargs) # http://stackoverflow.com/questions/19277399/why-does-object-new-work-differently-in-these-three-cases # See above for why this is necessary. Basically, because some child # may override __new__, we must override it here to never pass # arguments to the object.__new__ method. if super().__new__ is object.__new__: return super().__new__(cls) return super().__new__(cls, *args, **kwargs) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) if self.origin is not None and not self.origin.has_data: raise ValueError('The origin supplied to SkyOffsetFrame has no ' 'data.') if self.has_data and hasattr(self.data, 'lon'): self.data.lon.wrap_angle = 180*u.deg
7c201742d728f80b4dbbfc869c7063677ed3f4ae7f53eb299ba06287b9fbb108
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """ Contains the transformation functions for getting to "observed" systems from CIRS. Currently that just means AltAz. """ import numpy as np from astropy import units as u from astropy.coordinates.baseframe import frame_transform_graph from astropy.coordinates.transformations import FunctionTransformWithFiniteDifference from astropy.coordinates.representation import (SphericalRepresentation, UnitSphericalRepresentation) from astropy import _erfa as erfa from .cirs import CIRS from .altaz import AltAz from .utils import get_polar_motion, get_dut1utc, get_jd12, PIOVER2 @frame_transform_graph.transform(FunctionTransformWithFiniteDifference, CIRS, AltAz) def cirs_to_altaz(cirs_coo, altaz_frame): if np.any(cirs_coo.obstime != altaz_frame.obstime): # the only frame attribute for the current CIRS is the obstime, but this # would need to be updated if a future change allowed specifying an # Earth location algorithm or something cirs_coo = cirs_coo.transform_to(CIRS(obstime=altaz_frame.obstime)) # we use the same obstime everywhere now that we know they're the same obstime = cirs_coo.obstime # if the data are UnitSphericalRepresentation, we can skip the distance calculations is_unitspherical = (isinstance(cirs_coo.data, UnitSphericalRepresentation) or cirs_coo.cartesian.x.unit == u.one) if is_unitspherical: usrepr = cirs_coo.represent_as(UnitSphericalRepresentation) cirs_ra = usrepr.lon.to_value(u.radian) cirs_dec = usrepr.lat.to_value(u.radian) else: # compute an "astrometric" ra/dec -i.e., the direction of the # displacement vector from the observer to the target in CIRS loccirs = altaz_frame.location.get_itrs(cirs_coo.obstime).transform_to(cirs_coo) diffrepr = (cirs_coo.cartesian - loccirs.cartesian).represent_as(UnitSphericalRepresentation) cirs_ra = diffrepr.lon.to_value(u.radian) cirs_dec = diffrepr.lat.to_value(u.radian) lon, lat, height = altaz_frame.location.to_geodetic('WGS84') xp, yp = get_polar_motion(obstime) # first set up the astrometry context for CIRS<->AltAz jd1, jd2 = get_jd12(obstime, 'utc') astrom = erfa.apio13(jd1, jd2, get_dut1utc(obstime), lon.to_value(u.radian), lat.to_value(u.radian), height.to_value(u.m), xp, yp, # polar motion # all below are already in correct units because they are QuantityFrameAttribues altaz_frame.pressure.value, altaz_frame.temperature.value, altaz_frame.relative_humidity.value, altaz_frame.obswl.value) az, zen, _, _, _ = erfa.atioq(cirs_ra, cirs_dec, astrom) if is_unitspherical: rep = UnitSphericalRepresentation(lat=u.Quantity(PIOVER2 - zen, u.radian, copy=False), lon=u.Quantity(az, u.radian, copy=False), copy=False) else: # now we get the distance as the cartesian distance from the earth # location to the coordinate location locitrs = altaz_frame.location.get_itrs(obstime) distance = locitrs.separation_3d(cirs_coo) rep = SphericalRepresentation(lat=u.Quantity(PIOVER2 - zen, u.radian, copy=False), lon=u.Quantity(az, u.radian, copy=False), distance=distance, copy=False) return altaz_frame.realize_frame(rep) @frame_transform_graph.transform(FunctionTransformWithFiniteDifference, AltAz, CIRS) def altaz_to_cirs(altaz_coo, cirs_frame): usrepr = altaz_coo.represent_as(UnitSphericalRepresentation) az = usrepr.lon.to_value(u.radian) zen = PIOVER2 - usrepr.lat.to_value(u.radian) lon, lat, height = altaz_coo.location.to_geodetic('WGS84') xp, yp = get_polar_motion(altaz_coo.obstime) # first set up the astrometry context for ICRS<->CIRS at the altaz_coo time jd1, jd2 = get_jd12(altaz_coo.obstime, 'utc') astrom = erfa.apio13(jd1, jd2, get_dut1utc(altaz_coo.obstime), lon.to_value(u.radian), lat.to_value(u.radian), height.to_value(u.m), xp, yp, # polar motion # all below are already in correct units because they are QuantityFrameAttribues altaz_coo.pressure.value, altaz_coo.temperature.value, altaz_coo.relative_humidity.value, altaz_coo.obswl.value) # the 'A' indicates zen/az inputs cirs_ra, cirs_dec = erfa.atoiq('A', az, zen, astrom)*u.radian if isinstance(altaz_coo.data, UnitSphericalRepresentation) or altaz_coo.cartesian.x.unit == u.one: cirs_at_aa_time = CIRS(ra=cirs_ra, dec=cirs_dec, distance=None, obstime=altaz_coo.obstime) else: # treat the output of atoiq as an "astrometric" RA/DEC, so to get the # actual RA/Dec from the observers vantage point, we have to reverse # the vector operation of cirs_to_altaz (see there for more detail) loccirs = altaz_coo.location.get_itrs(altaz_coo.obstime).transform_to(cirs_frame) astrometric_rep = SphericalRepresentation(lon=cirs_ra, lat=cirs_dec, distance=altaz_coo.distance) newrepr = astrometric_rep + loccirs.cartesian cirs_at_aa_time = CIRS(newrepr, obstime=altaz_coo.obstime) # this final transform may be a no-op if the obstimes are the same return cirs_at_aa_time.transform_to(cirs_frame) @frame_transform_graph.transform(FunctionTransformWithFiniteDifference, AltAz, AltAz) def altaz_to_altaz(from_coo, to_frame): # for now we just implement this through CIRS to make sure we get everything # covered return from_coo.transform_to(CIRS(obstime=from_coo.obstime)).transform_to(to_frame)
a09b4cea66391b784ec8471fd5fb2a9cd358f1282cde614fd00ee26db815c250
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst import numpy as np from astropy import units as u from astropy.utils.decorators import format_doc from astropy.coordinates import representation as r from astropy.coordinates.baseframe import BaseCoordinateFrame, RepresentationMapping, base_doc from astropy.coordinates.attributes import (Attribute, TimeAttribute, QuantityAttribute, EarthLocationAttribute) __all__ = ['AltAz'] _90DEG = 90*u.deg doc_components = """ az : `Angle`, optional, must be keyword The Azimuth for this object (``alt`` must also be given and ``representation`` must be None). alt : `Angle`, optional, must be keyword The Altitude for this object (``az`` must also be given and ``representation`` must be None). distance : :class:`~astropy.units.Quantity`, optional, must be keyword The Distance for this object along the line-of-sight. pm_az_cosalt : :class:`~astropy.units.Quantity`, optional, must be keyword The proper motion in azimuth (including the ``cos(alt)`` factor) for this object (``pm_alt`` must also be given). pm_alt : :class:`~astropy.units.Quantity`, optional, must be keyword The proper motion in altitude for this object (``pm_az_cosalt`` must also be given). radial_velocity : :class:`~astropy.units.Quantity`, optional, must be keyword The radial velocity of this object.""" doc_footer = """ Other parameters ---------------- obstime : `~astropy.time.Time` The time at which the observation is taken. Used for determining the position and orientation of the Earth. location : `~astropy.coordinates.EarthLocation` The location on the Earth. This can be specified either as an `~astropy.coordinates.EarthLocation` object or as anything that can be transformed to an `~astropy.coordinates.ITRS` frame. pressure : `~astropy.units.Quantity` The atmospheric pressure as an `~astropy.units.Quantity` with pressure units. This is necessary for performing refraction corrections. Setting this to 0 (the default) will disable refraction calculations when transforming to/from this frame. temperature : `~astropy.units.Quantity` The ground-level temperature as an `~astropy.units.Quantity` in deg C. This is necessary for performing refraction corrections. relative_humidity`` : `~astropy.units.Quantity` or number. The relative humidity as a dimensionless quantity between 0 to 1. This is necessary for performing refraction corrections. obswl : `~astropy.units.Quantity` The average wavelength of observations as an `~astropy.units.Quantity` with length units. This is necessary for performing refraction corrections. Notes ----- The refraction model is based on that implemented in ERFA, which is fast but becomes inaccurate for altitudes below about 5 degrees. Near and below altitudes of 0, it can even give meaningless answers, and in this case transforming to AltAz and back to another frame can give highly discrepant results. For much better numerical stability, leaving the ``pressure`` at ``0`` (the default), disabling the refraction correction (yielding "topocentric" horizontal coordinates). """ @format_doc(base_doc, components=doc_components, footer=doc_footer) class AltAz(BaseCoordinateFrame): """ A coordinate or frame in the Altitude-Azimuth system (Horizontal coordinates). Azimuth is oriented East of North (i.e., N=0, E=90 degrees). This frame is assumed to *include* refraction effects if the ``pressure`` frame attribute is non-zero. The frame attributes are listed under **Other Parameters**, which are necessary for transforming from AltAz to some other system. """ frame_specific_representation_info = { r.SphericalRepresentation: [ RepresentationMapping('lon', 'az'), RepresentationMapping('lat', 'alt') ] } default_representation = r.SphericalRepresentation default_differential = r.SphericalCosLatDifferential obstime = TimeAttribute(default=None) location = EarthLocationAttribute(default=None) pressure = QuantityAttribute(default=0, unit=u.hPa) temperature = QuantityAttribute(default=0, unit=u.deg_C) relative_humidity = QuantityAttribute(default=0, unit=u.dimensionless_unscaled) obswl = QuantityAttribute(default=1*u.micron, unit=u.micron) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) @property def secz(self): """ Secant if the zenith angle for this coordinate, a common estimate of the airmass. """ return 1/np.sin(self.alt) @property def zen(self): """ The zenith angle for this coordinate """ return _90DEG.to(self.alt.unit) - self.alt # self-transform defined in cirs_observed_transforms.py
1d821bf7ef3be20a1c0afbcc4bf54da15cfe05397f8836136fe9111326672016
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst from astropy import units as u from astropy.utils.decorators import format_doc from astropy.time import Time from astropy.coordinates import representation as r from astropy.coordinates.baseframe import (BaseCoordinateFrame, RepresentationMapping, frame_transform_graph, base_doc) from astropy.coordinates.transformations import AffineTransform from astropy.coordinates.attributes import DifferentialAttribute from .baseradec import BaseRADecFrame, doc_components as doc_components_radec from .icrs import ICRS from .galactic import Galactic # For speed J2000 = Time('J2000') v_bary_Schoenrich2010 = r.CartesianDifferential([11.1, 12.24, 7.25]*u.km/u.s) __all__ = ['LSR', 'GalacticLSR'] doc_footer_lsr = """ Other parameters ---------------- v_bary : `~astropy.coordinates.representation.CartesianDifferential` The velocity of the solar system barycenter with respect to the LSR, in Galactic cartesian velocity components. """ @format_doc(base_doc, components=doc_components_radec, footer=doc_footer_lsr) class LSR(BaseRADecFrame): r"""A coordinate or frame in the Local Standard of Rest (LSR). This coordinate frame is axis-aligned and co-spatial with `ICRS`, but has a velocity offset relative to the solar system barycenter to remove the peculiar motion of the sun relative to the LSR. Roughly, the LSR is the mean velocity of the stars in the solar neighborhood, but the precise definition of which depends on the study. As defined in Schönrich et al. (2010): "The LSR is the rest frame at the location of the Sun of a star that would be on a circular orbit in the gravitational potential one would obtain by azimuthally averaging away non-axisymmetric features in the actual Galactic potential." No such orbit truly exists, but it is still a commonly used velocity frame. We use default values from Schönrich et al. (2010) for the barycentric velocity relative to the LSR, which is defined in Galactic (right-handed) cartesian velocity components :math:`(U, V, W) = (11.1, 12.24, 7.25)~{{\rm km}}~{{\rm s}}^{{-1}}`. These values are customizable via the ``v_bary`` argument which specifies the velocity of the solar system barycenter with respect to the LSR. The frame attributes are listed under **Other Parameters**. """ # frame attributes: v_bary = DifferentialAttribute(default=v_bary_Schoenrich2010, allowed_classes=[r.CartesianDifferential]) @frame_transform_graph.transform(AffineTransform, ICRS, LSR) def icrs_to_lsr(icrs_coord, lsr_frame): v_bary_gal = Galactic(lsr_frame.v_bary.to_cartesian()) v_bary_icrs = v_bary_gal.transform_to(icrs_coord) v_offset = v_bary_icrs.data.represent_as(r.CartesianDifferential) offset = r.CartesianRepresentation([0, 0, 0]*u.au, differentials=v_offset) return None, offset @frame_transform_graph.transform(AffineTransform, LSR, ICRS) def lsr_to_icrs(lsr_coord, icrs_frame): v_bary_gal = Galactic(lsr_coord.v_bary.to_cartesian()) v_bary_icrs = v_bary_gal.transform_to(icrs_frame) v_offset = v_bary_icrs.data.represent_as(r.CartesianDifferential) offset = r.CartesianRepresentation([0, 0, 0]*u.au, differentials=-v_offset) return None, offset # ------------------------------------------------------------------------------ doc_components_gal = """ l : `Angle`, optional, must be keyword The Galactic longitude for this object (``b`` must also be given and ``representation`` must be None). b : `Angle`, optional, must be keyword The Galactic latitude for this object (``l`` must also be given and ``representation`` must be None). distance : `~astropy.units.Quantity`, optional, must be keyword The Distance for this object along the line-of-sight. (``representation`` must be None). pm_l_cosb : :class:`~astropy.units.Quantity`, optional, must be keyword The proper motion in Galactic longitude (including the ``cos(b)`` term) for this object (``pm_b`` must also be given). pm_b : :class:`~astropy.units.Quantity`, optional, must be keyword The proper motion in Galactic latitude for this object (``pm_l_cosb`` must also be given). radial_velocity : :class:`~astropy.units.Quantity`, optional, must be keyword The radial velocity of this object. """ @format_doc(base_doc, components=doc_components_gal, footer=doc_footer_lsr) class GalacticLSR(BaseCoordinateFrame): r"""A coordinate or frame in the Local Standard of Rest (LSR), axis-aligned to the `Galactic` frame. This coordinate frame is axis-aligned and co-spatial with `ICRS`, but has a velocity offset relative to the solar system barycenter to remove the peculiar motion of the sun relative to the LSR. Roughly, the LSR is the mean velocity of the stars in the solar neighborhood, but the precise definition of which depends on the study. As defined in Schönrich et al. (2010): "The LSR is the rest frame at the location of the Sun of a star that would be on a circular orbit in the gravitational potential one would obtain by azimuthally averaging away non-axisymmetric features in the actual Galactic potential." No such orbit truly exists, but it is still a commonly used velocity frame. We use default values from Schönrich et al. (2010) for the barycentric velocity relative to the LSR, which is defined in Galactic (right-handed) cartesian velocity components :math:`(U, V, W) = (11.1, 12.24, 7.25)~{{\rm km}}~{{\rm s}}^{{-1}}`. These values are customizable via the ``v_bary`` argument which specifies the velocity of the solar system barycenter with respect to the LSR. The frame attributes are listed under **Other Parameters**. """ frame_specific_representation_info = { r.SphericalRepresentation: [ RepresentationMapping('lon', 'l'), RepresentationMapping('lat', 'b') ] } default_representation = r.SphericalRepresentation default_differential = r.SphericalCosLatDifferential # frame attributes: v_bary = DifferentialAttribute(default=v_bary_Schoenrich2010) @frame_transform_graph.transform(AffineTransform, Galactic, GalacticLSR) def galactic_to_galacticlsr(galactic_coord, lsr_frame): v_bary_gal = Galactic(lsr_frame.v_bary.to_cartesian()) v_offset = v_bary_gal.data.represent_as(r.CartesianDifferential) offset = r.CartesianRepresentation([0, 0, 0]*u.au, differentials=v_offset) return None, offset @frame_transform_graph.transform(AffineTransform, GalacticLSR, Galactic) def galacticlsr_to_galactic(lsr_coord, galactic_frame): v_bary_gal = Galactic(lsr_coord.v_bary.to_cartesian()) v_offset = v_bary_gal.data.represent_as(r.CartesianDifferential) offset = r.CartesianRepresentation([0, 0, 0]*u.au, differentials=-v_offset) return None, offset
c5359a834ab83148c5c589c18b320d082a511a1d2b24dc34db19ab05102e6402
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst from astropy.coordinates.matrix_utilities import (rotation_matrix, matrix_product, matrix_transpose) from astropy.coordinates.baseframe import frame_transform_graph from astropy.coordinates.transformations import DynamicMatrixTransform from .fk5 import FK5 from .fk4 import FK4NoETerms from .utils import EQUINOX_B1950, EQUINOX_J2000 from .galactic import Galactic # Galactic to/from FK4/FK5 -----------------------> # can't be static because the equinox is needed @frame_transform_graph.transform(DynamicMatrixTransform, FK5, Galactic) def fk5_to_gal(fk5coord, galframe): # need precess to J2000 first pmat = fk5coord._precession_matrix(fk5coord.equinox, EQUINOX_J2000) mat1 = rotation_matrix(180 - Galactic._lon0_J2000.degree, 'z') mat2 = rotation_matrix(90 - Galactic._ngp_J2000.dec.degree, 'y') mat3 = rotation_matrix(Galactic._ngp_J2000.ra.degree, 'z') return matrix_product(mat1, mat2, mat3, pmat) @frame_transform_graph.transform(DynamicMatrixTransform, Galactic, FK5) def _gal_to_fk5(galcoord, fk5frame): return matrix_transpose(fk5_to_gal(fk5frame, galcoord)) @frame_transform_graph.transform(DynamicMatrixTransform, FK4NoETerms, Galactic) def fk4_to_gal(fk4coords, galframe): mat1 = rotation_matrix(180 - Galactic._lon0_B1950.degree, 'z') mat2 = rotation_matrix(90 - Galactic._ngp_B1950.dec.degree, 'y') mat3 = rotation_matrix(Galactic._ngp_B1950.ra.degree, 'z') matprec = fk4coords._precession_matrix(fk4coords.equinox, EQUINOX_B1950) return matrix_product(mat1, mat2, mat3, matprec) @frame_transform_graph.transform(DynamicMatrixTransform, Galactic, FK4NoETerms) def gal_to_fk4(galcoords, fk4frame): return matrix_transpose(fk4_to_gal(fk4frame, galcoords))
5d00674914628b7948aac3edd6248672039eeddb2066f5b45475046a3102b7d9
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst from astropy import units as u from astropy.utils.decorators import format_doc from astropy.coordinates.angles import Angle from astropy.coordinates import representation as r from astropy.coordinates.baseframe import BaseCoordinateFrame, RepresentationMapping, base_doc # these are needed for defining the NGP from .fk5 import FK5 from .fk4 import FK4NoETerms __all__ = ['Galactic'] doc_components = """ l : `Angle`, optional, must be keyword The Galactic longitude for this object (``b`` must also be given and ``representation`` must be None). b : `Angle`, optional, must be keyword The Galactic latitude for this object (``l`` must also be given and ``representation`` must be None). distance : `~astropy.units.Quantity`, optional, must be keyword The Distance for this object along the line-of-sight. pm_l_cosb : :class:`~astropy.units.Quantity`, optional, must be keyword The proper motion in Galactic longitude (including the ``cos(b)`` term) for this object (``pm_b`` must also be given). pm_b : :class:`~astropy.units.Quantity`, optional, must be keyword The proper motion in Galactic latitude for this object (``pm_l_cosb`` must also be given). radial_velocity : :class:`~astropy.units.Quantity`, optional, must be keyword The radial velocity of this object. """ doc_footer = """ Notes ----- .. [1] Blaauw, A.; Gum, C. S.; Pawsey, J. L.; Westerhout, G. (1960), "The new I.A.U. system of galactic coordinates (1958 revision)," `MNRAS, Vol 121, pp.123 <http://adsabs.harvard.edu/abs/1960MNRAS.121..123B>`_. """ @format_doc(base_doc, components=doc_components, footer=doc_footer) class Galactic(BaseCoordinateFrame): """ A coordinate or frame in the Galactic coordinate system. This frame is used in a variety of Galactic contexts because it has as its x-y plane the plane of the Milky Way. The positive x direction (i.e., the l=0, b=0 direction) points to the center of the Milky Way and the z-axis points toward the North Galactic Pole (following the IAU's 1958 definition [1]_). However, unlike the `~astropy.coordinates.Galactocentric` frame, the *origin* of this frame in 3D space is the solar system barycenter, not the center of the Milky Way. """ frame_specific_representation_info = { r.SphericalRepresentation: [ RepresentationMapping('lon', 'l'), RepresentationMapping('lat', 'b') ], r.CartesianRepresentation: [ RepresentationMapping('x', 'u'), RepresentationMapping('y', 'v'), RepresentationMapping('z', 'w') ], r.CartesianDifferential: [ RepresentationMapping('d_x', 'U', u.km/u.s), RepresentationMapping('d_y', 'V', u.km/u.s), RepresentationMapping('d_z', 'W', u.km/u.s) ] } default_representation = r.SphericalRepresentation default_differential = r.SphericalCosLatDifferential # North galactic pole and zeropoint of l in FK4/FK5 coordinates. Needed for # transformations to/from FK4/5 # These are from the IAU's definition of galactic coordinates _ngp_B1950 = FK4NoETerms(ra=192.25*u.degree, dec=27.4*u.degree) _lon0_B1950 = Angle(123, u.degree) # These are *not* from Reid & Brunthaler 2004 - instead, they were # derived by doing: # # >>> FK4NoETerms(ra=192.25*u.degree, dec=27.4*u.degree).transform_to(FK5) # # This gives better consistency with other codes than using the values # from Reid & Brunthaler 2004 and the best self-consistency between FK5 # -> Galactic and FK5 -> FK4 -> Galactic. The lon0 angle was found by # optimizing the self-consistency. _ngp_J2000 = FK5(ra=192.8594812065348*u.degree, dec=27.12825118085622*u.degree) _lon0_J2000 = Angle(122.9319185680026, u.degree)
4b2605de01c4c465c3ca5a265e0dddb39a54d4459ecafa3b18a804641f7e5029
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst from astropy.coordinates.matrix_utilities import (rotation_matrix, matrix_product, matrix_transpose) from astropy.coordinates.baseframe import frame_transform_graph from astropy.coordinates.transformations import DynamicMatrixTransform from .fk5 import FK5 from .icrs import ICRS from .utils import EQUINOX_J2000 def _icrs_to_fk5_matrix(): """ B-matrix from USNO circular 179. Used by the ICRS->FK5 transformation functions. """ eta0 = -19.9 / 3600000. xi0 = 9.1 / 3600000. da0 = -22.9 / 3600000. m1 = rotation_matrix(-eta0, 'x') m2 = rotation_matrix(xi0, 'y') m3 = rotation_matrix(da0, 'z') return matrix_product(m1, m2, m3) # define this here because it only needs to be computed once _ICRS_TO_FK5_J2000_MAT = _icrs_to_fk5_matrix() @frame_transform_graph.transform(DynamicMatrixTransform, ICRS, FK5) def icrs_to_fk5(icrscoord, fk5frame): # ICRS is by design very close to J2000 equinox pmat = fk5frame._precession_matrix(EQUINOX_J2000, fk5frame.equinox) return matrix_product(pmat, _ICRS_TO_FK5_J2000_MAT) # can't be static because the equinox is needed @frame_transform_graph.transform(DynamicMatrixTransform, FK5, ICRS) def fk5_to_icrs(fk5coord, icrsframe): # ICRS is by design very close to J2000 equinox pmat = fk5coord._precession_matrix(fk5coord.equinox, EQUINOX_J2000) return matrix_product(matrix_transpose(_ICRS_TO_FK5_J2000_MAT), pmat)
3cbb95e0c8323b568321a04a6a49e86b33dc1009098649534ca50b7f9acefccc
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst from astropy.utils.decorators import format_doc from astropy.coordinates.attributes import TimeAttribute from astropy.coordinates.baseframe import base_doc from .baseradec import doc_components, BaseRADecFrame from .utils import DEFAULT_OBSTIME __all__ = ['CIRS'] doc_footer = """ Other parameters ---------------- obstime : `~astropy.time.Time` The time at which the observation is taken. Used for determining the position of the Earth and its precession. """ @format_doc(base_doc, components=doc_components, footer=doc_footer) class CIRS(BaseRADecFrame): """ A coordinate or frame in the Celestial Intermediate Reference System (CIRS). The frame attributes are listed under **Other Parameters**. """ obstime = TimeAttribute(default=DEFAULT_OBSTIME) # The "self-transform" is defined in icrs_cirs_transformations.py, because in # the current implementation it goes through ICRS (like GCRS)
aaf5e868cdec9f697ba6d981ac0ba262168d44251e08f6b86520f0312a0003e5
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst from copy import deepcopy import numpy as np from astropy import units as u from astropy.tests.helper import (catch_warnings, pytest, assert_quantity_allclose as assert_allclose) from astropy.utils import OrderedDescriptorContainer from astropy.utils.compat import NUMPY_LT_1_14 from astropy.utils.exceptions import AstropyWarning from astropy.coordinates import representation as r from astropy.coordinates.representation import REPRESENTATION_CLASSES from astropy.units import allclose from .test_representation import unitphysics # this fixture is used below def setup_function(func): func.REPRESENTATION_CLASSES_ORIG = deepcopy(REPRESENTATION_CLASSES) def teardown_function(func): REPRESENTATION_CLASSES.clear() REPRESENTATION_CLASSES.update(func.REPRESENTATION_CLASSES_ORIG) def test_frame_attribute_descriptor(): """ Unit tests of the Attribute descriptor """ from astropy.coordinates.attributes import Attribute class TestAttributes(metaclass=OrderedDescriptorContainer): attr_none = Attribute() attr_2 = Attribute(default=2) attr_3_attr2 = Attribute(default=3, secondary_attribute='attr_2') attr_none_attr2 = Attribute(default=None, secondary_attribute='attr_2') attr_none_nonexist = Attribute(default=None, secondary_attribute='nonexist') t = TestAttributes() # Defaults assert t.attr_none is None assert t.attr_2 == 2 assert t.attr_3_attr2 == 3 assert t.attr_none_attr2 == t.attr_2 assert t.attr_none_nonexist is None # No default and non-existent secondary attr # Setting values via '_'-prefixed internal vars (as would normally done in __init__) t._attr_none = 10 assert t.attr_none == 10 t._attr_2 = 20 assert t.attr_2 == 20 assert t.attr_3_attr2 == 3 assert t.attr_none_attr2 == t.attr_2 t._attr_none_attr2 = 40 assert t.attr_none_attr2 == 40 # Make sure setting values via public attribute fails with pytest.raises(AttributeError) as err: t.attr_none = 5 assert 'Cannot set frame attribute' in str(err) def test_frame_subclass_attribute_descriptor(): from astropy.coordinates.builtin_frames import FK4 from astropy.coordinates.attributes import Attribute, TimeAttribute from astropy.time import Time _EQUINOX_B1980 = Time('B1980', scale='tai') class MyFK4(FK4): # equinox inherited from FK4, obstime overridden, and newattr is new obstime = TimeAttribute(default=_EQUINOX_B1980) newattr = Attribute(default='newattr') mfk4 = MyFK4() assert mfk4.equinox.value == 'B1950.000' assert mfk4.obstime.value == 'B1980.000' assert mfk4.newattr == 'newattr' assert set(mfk4.get_frame_attr_names()) == set(['equinox', 'obstime', 'newattr']) mfk4 = MyFK4(equinox='J1980.0', obstime='J1990.0', newattr='world') assert mfk4.equinox.value == 'J1980.000' assert mfk4.obstime.value == 'J1990.000' assert mfk4.newattr == 'world' def test_create_data_frames(): from astropy.coordinates.builtin_frames import ICRS # from repr i1 = ICRS(r.SphericalRepresentation(1*u.deg, 2*u.deg, 3*u.kpc)) i2 = ICRS(r.UnitSphericalRepresentation(lon=1*u.deg, lat=2*u.deg)) # from preferred name i3 = ICRS(ra=1*u.deg, dec=2*u.deg, distance=3*u.kpc) i4 = ICRS(ra=1*u.deg, dec=2*u.deg) assert i1.data.lat == i3.data.lat assert i1.data.lon == i3.data.lon assert i1.data.distance == i3.data.distance assert i2.data.lat == i4.data.lat assert i2.data.lon == i4.data.lon # now make sure the preferred names work as properties assert_allclose(i1.ra, i3.ra) assert_allclose(i2.ra, i4.ra) assert_allclose(i1.distance, i3.distance) with pytest.raises(AttributeError): i1.ra = [11.]*u.deg def test_create_orderered_data(): from astropy.coordinates.builtin_frames import ICRS, Galactic, AltAz TOL = 1e-10*u.deg i = ICRS(1*u.deg, 2*u.deg) assert (i.ra - 1*u.deg) < TOL assert (i.dec - 2*u.deg) < TOL g = Galactic(1*u.deg, 2*u.deg) assert (g.l - 1*u.deg) < TOL assert (g.b - 2*u.deg) < TOL a = AltAz(1*u.deg, 2*u.deg) assert (a.az - 1*u.deg) < TOL assert (a.alt - 2*u.deg) < TOL with pytest.raises(TypeError): ICRS(1*u.deg, 2*u.deg, 1*u.deg, 2*u.deg) with pytest.raises(TypeError): sph = r.SphericalRepresentation(1*u.deg, 2*u.deg, 3*u.kpc) ICRS(sph, 1*u.deg, 2*u.deg) def test_create_nodata_frames(): from astropy.coordinates.builtin_frames import ICRS, FK4, FK5 i = ICRS() assert len(i.get_frame_attr_names()) == 0 f5 = FK5() assert f5.equinox == FK5.get_frame_attr_names()['equinox'] f4 = FK4() assert f4.equinox == FK4.get_frame_attr_names()['equinox'] # obstime is special because it's a property that uses equinox if obstime is not set assert f4.obstime in (FK4.get_frame_attr_names()['obstime'], FK4.get_frame_attr_names()['equinox']) def test_no_data_nonscalar_frames(): from astropy.coordinates.builtin_frames import AltAz from astropy.time import Time a1 = AltAz(obstime=Time('2012-01-01') + np.arange(10.) * u.day, temperature=np.ones((3, 1)) * u.deg_C) assert a1.obstime.shape == (3, 10) assert a1.temperature.shape == (3, 10) assert a1.shape == (3, 10) with pytest.raises(ValueError) as exc: AltAz(obstime=Time('2012-01-01') + np.arange(10.) * u.day, temperature=np.ones((3,)) * u.deg_C) assert 'inconsistent shapes' in str(exc) def test_frame_repr(): from astropy.coordinates.builtin_frames import ICRS, FK5 i = ICRS() assert repr(i) == '<ICRS Frame>' f5 = FK5() assert repr(f5).startswith('<FK5 Frame (equinox=') i2 = ICRS(ra=1*u.deg, dec=2*u.deg) i3 = ICRS(ra=1*u.deg, dec=2*u.deg, distance=3*u.kpc) assert repr(i2) == ('<ICRS Coordinate: (ra, dec) in deg\n' ' ({})>').format(' 1., 2.' if NUMPY_LT_1_14 else '1., 2.') assert repr(i3) == ('<ICRS Coordinate: (ra, dec, distance) in (deg, deg, kpc)\n' ' ({})>').format(' 1., 2., 3.' if NUMPY_LT_1_14 else '1., 2., 3.') # try with arrays i2 = ICRS(ra=[1.1, 2.1]*u.deg, dec=[2.1, 3.1]*u.deg) i3 = ICRS(ra=[1.1, 2.1]*u.deg, dec=[-15.6, 17.1]*u.deg, distance=[11., 21.]*u.kpc) assert repr(i2) == ('<ICRS Coordinate: (ra, dec) in deg\n' ' [{}]>').format('( 1.1, 2.1), ( 2.1, 3.1)' if NUMPY_LT_1_14 else '(1.1, 2.1), (2.1, 3.1)') if NUMPY_LT_1_14: assert repr(i3) == ('<ICRS Coordinate: (ra, dec, distance) in (deg, deg, kpc)\n' ' [( 1.1, -15.6, 11.), ( 2.1, 17.1, 21.)]>') else: assert repr(i3) == ('<ICRS Coordinate: (ra, dec, distance) in (deg, deg, kpc)\n' ' [(1.1, -15.6, 11.), (2.1, 17.1, 21.)]>') def test_frame_repr_vels(): from astropy.coordinates.builtin_frames import ICRS i = ICRS(ra=1*u.deg, dec=2*u.deg, pm_ra_cosdec=1*u.marcsec/u.yr, pm_dec=2*u.marcsec/u.yr) # unit comes out as mas/yr because of the preferred units defined in the # frame RepresentationMapping assert repr(i) == ('<ICRS Coordinate: (ra, dec) in deg\n' ' ({0})\n' ' (pm_ra_cosdec, pm_dec) in mas / yr\n' ' ({0})>').format(' 1., 2.' if NUMPY_LT_1_14 else '1., 2.') def test_converting_units(): import re from astropy.coordinates.baseframe import RepresentationMapping from astropy.coordinates.builtin_frames import ICRS, FK5 # this is a regular expression that with split (see below) removes what's # the decimal point to fix rounding problems rexrepr = re.compile(r'(.*?=\d\.).*?( .*?=\d\.).*?( .*)') # Use values that aren't subject to rounding down to X.9999... i2 = ICRS(ra=2.*u.deg, dec=2.*u.deg) i2_many = ICRS(ra=[2., 4.]*u.deg, dec=[2., -8.1]*u.deg) # converting from FK5 to ICRS and back changes the *internal* representation, # but it should still come out in the preferred form i4 = i2.transform_to(FK5).transform_to(ICRS) i4_many = i2_many.transform_to(FK5).transform_to(ICRS) ri2 = ''.join(rexrepr.split(repr(i2))) ri4 = ''.join(rexrepr.split(repr(i4))) assert ri2 == ri4 assert i2.data.lon.unit != i4.data.lon.unit # Internal repr changed ri2_many = ''.join(rexrepr.split(repr(i2_many))) ri4_many = ''.join(rexrepr.split(repr(i4_many))) assert ri2_many == ri4_many assert i2_many.data.lon.unit != i4_many.data.lon.unit # Internal repr changed # but that *shouldn't* hold if we turn off units for the representation class FakeICRS(ICRS): frame_specific_representation_info = { 'spherical': [RepresentationMapping('lon', 'ra', u.hourangle), RepresentationMapping('lat', 'dec', None), RepresentationMapping('distance', 'distance')] # should fall back to default of None unit } fi = FakeICRS(i4.data) ri2 = ''.join(rexrepr.split(repr(i2))) rfi = ''.join(rexrepr.split(repr(fi))) rfi = re.sub('FakeICRS', 'ICRS', rfi) # Force frame name to match assert ri2 != rfi # the attributes should also get the right units assert i2.dec.unit == i4.dec.unit # unless no/explicitly given units assert i2.dec.unit != fi.dec.unit assert i2.ra.unit != fi.ra.unit assert fi.ra.unit == u.hourangle def test_representation_info(): from astropy.coordinates.baseframe import RepresentationMapping from astropy.coordinates.builtin_frames import ICRS class NewICRS1(ICRS): frame_specific_representation_info = { r.SphericalRepresentation: [ RepresentationMapping('lon', 'rara', u.hourangle), RepresentationMapping('lat', 'decdec', u.degree), RepresentationMapping('distance', 'distance', u.kpc)] } i1 = NewICRS1(rara=10*u.degree, decdec=-12*u.deg, distance=1000*u.pc, pm_rara_cosdecdec=100*u.mas/u.yr, pm_decdec=17*u.mas/u.yr, radial_velocity=10*u.km/u.s) assert allclose(i1.rara, 10*u.deg) assert i1.rara.unit == u.hourangle assert allclose(i1.decdec, -12*u.deg) assert allclose(i1.distance, 1000*u.pc) assert i1.distance.unit == u.kpc assert allclose(i1.pm_rara_cosdecdec, 100*u.mas/u.yr) assert allclose(i1.pm_decdec, 17*u.mas/u.yr) # this should auto-set the names of UnitSpherical: i1.set_representation_cls(r.UnitSphericalRepresentation, s=r.UnitSphericalCosLatDifferential) assert allclose(i1.rara, 10*u.deg) assert allclose(i1.decdec, -12*u.deg) assert allclose(i1.pm_rara_cosdecdec, 100*u.mas/u.yr) assert allclose(i1.pm_decdec, 17*u.mas/u.yr) # For backwards compatibility, we also support the string name in the # representation info dictionary: class NewICRS2(ICRS): frame_specific_representation_info = { 'spherical': [ RepresentationMapping('lon', 'ang1', u.hourangle), RepresentationMapping('lat', 'ang2', u.degree), RepresentationMapping('distance', 'howfar', u.kpc)] } i2 = NewICRS2(ang1=10*u.degree, ang2=-12*u.deg, howfar=1000*u.pc) assert allclose(i2.ang1, 10*u.deg) assert i2.ang1.unit == u.hourangle assert allclose(i2.ang2, -12*u.deg) assert allclose(i2.howfar, 1000*u.pc) assert i2.howfar.unit == u.kpc # Test that the differential kwargs get overridden class NewICRS3(ICRS): frame_specific_representation_info = { r.SphericalCosLatDifferential: [ RepresentationMapping('d_lon_coslat', 'pm_ang1', u.hourangle/u.year), RepresentationMapping('d_lat', 'pm_ang2'), RepresentationMapping('d_distance', 'vlos', u.kpc/u.Myr)] } i3 = NewICRS3(lon=10*u.degree, lat=-12*u.deg, distance=1000*u.pc, pm_ang1=1*u.mas/u.yr, pm_ang2=2*u.mas/u.yr, vlos=100*u.km/u.s) assert allclose(i3.pm_ang1, 1*u.mas/u.yr) assert i3.pm_ang1.unit == u.hourangle/u.year assert allclose(i3.pm_ang2, 2*u.mas/u.yr) assert allclose(i3.vlos, 100*u.km/u.s) assert i3.vlos.unit == u.kpc/u.Myr def test_realizing(): from astropy.coordinates.builtin_frames import ICRS, FK5 from astropy.time import Time rep = r.SphericalRepresentation(1*u.deg, 2*u.deg, 3*u.kpc) i = ICRS() i2 = i.realize_frame(rep) assert not i.has_data assert i2.has_data f = FK5(equinox=Time('J2001')) f2 = f.realize_frame(rep) assert not f.has_data assert f2.has_data assert f2.equinox == f.equinox assert f2.equinox != FK5.get_frame_attr_names()['equinox'] # Check that a nicer error message is returned: with pytest.raises(TypeError) as excinfo: f.realize_frame(f.representation_type) assert ('Class passed as data instead of a representation' in excinfo.value.args[0]) def test_replicating(): from astropy.coordinates.builtin_frames import ICRS, AltAz from astropy.time import Time i = ICRS(ra=[1]*u.deg, dec=[2]*u.deg) icopy = i.replicate(copy=True) irepl = i.replicate(copy=False) i.data._lat[:] = 0*u.deg assert np.all(i.data.lat == irepl.data.lat) assert np.all(i.data.lat != icopy.data.lat) iclone = i.replicate_without_data() assert i.has_data assert not iclone.has_data aa = AltAz(alt=1*u.deg, az=2*u.deg, obstime=Time('J2000')) aaclone = aa.replicate_without_data(obstime=Time('J2001')) assert not aaclone.has_data assert aa.obstime != aaclone.obstime assert aa.pressure == aaclone.pressure assert aa.obswl == aaclone.obswl def test_getitem(): from astropy.coordinates.builtin_frames import ICRS rep = r.SphericalRepresentation( [1, 2, 3]*u.deg, [4, 5, 6]*u.deg, [7, 8, 9]*u.kpc) i = ICRS(rep) assert len(i.ra) == 3 iidx = i[1:] assert len(iidx.ra) == 2 iidx2 = i[0] assert iidx2.ra.isscalar def test_transform(): """ This test just makes sure the transform architecture works, but does *not* actually test all the builtin transforms themselves are accurate """ from astropy.coordinates.builtin_frames import ICRS, FK4, FK5, Galactic from astropy.time import Time i = ICRS(ra=[1, 2]*u.deg, dec=[3, 4]*u.deg) f = i.transform_to(FK5) i2 = f.transform_to(ICRS) assert i2.data.__class__ == r.UnitSphericalRepresentation assert_allclose(i.ra, i2.ra) assert_allclose(i.dec, i2.dec) i = ICRS(ra=[1, 2]*u.deg, dec=[3, 4]*u.deg, distance=[5, 6]*u.kpc) f = i.transform_to(FK5) i2 = f.transform_to(ICRS) assert i2.data.__class__ != r.UnitSphericalRepresentation f = FK5(ra=1*u.deg, dec=2*u.deg, equinox=Time('J2001')) f4 = f.transform_to(FK4) f4_2 = f.transform_to(FK4(equinox=f.equinox)) # make sure attributes are copied over correctly assert f4.equinox == FK4.get_frame_attr_names()['equinox'] assert f4_2.equinox == f.equinox # make sure self-transforms also work i = ICRS(ra=[1, 2]*u.deg, dec=[3, 4]*u.deg) i2 = i.transform_to(ICRS) assert_allclose(i.ra, i2.ra) assert_allclose(i.dec, i2.dec) f = FK5(ra=1*u.deg, dec=2*u.deg, equinox=Time('J2001')) f2 = f.transform_to(FK5) # default equinox, so should be *different* assert f2.equinox == FK5().equinox with pytest.raises(AssertionError): assert_allclose(f.ra, f2.ra) with pytest.raises(AssertionError): assert_allclose(f.dec, f2.dec) # finally, check Galactic round-tripping i1 = ICRS(ra=[1, 2]*u.deg, dec=[3, 4]*u.deg) i2 = i1.transform_to(Galactic).transform_to(ICRS) assert_allclose(i1.ra, i2.ra) assert_allclose(i1.dec, i2.dec) def test_transform_to_nonscalar_nodata_frame(): # https://github.com/astropy/astropy/pull/5254#issuecomment-241592353 from astropy.coordinates.builtin_frames import ICRS, FK5 from astropy.time import Time times = Time('2016-08-23') + np.linspace(0, 10, 12)*u.day coo1 = ICRS(ra=[[0.], [10.], [20.]]*u.deg, dec=[[-30.], [30.], [60.]]*u.deg) coo2 = coo1.transform_to(FK5(equinox=times)) assert coo2.shape == (3, 12) def test_sep(): from astropy.coordinates.builtin_frames import ICRS i1 = ICRS(ra=0*u.deg, dec=1*u.deg) i2 = ICRS(ra=0*u.deg, dec=2*u.deg) sep = i1.separation(i2) assert sep.deg == 1 i3 = ICRS(ra=[1, 2]*u.deg, dec=[3, 4]*u.deg, distance=[5, 6]*u.kpc) i4 = ICRS(ra=[1, 2]*u.deg, dec=[3, 4]*u.deg, distance=[4, 5]*u.kpc) sep3d = i3.separation_3d(i4) assert_allclose(sep3d.to(u.kpc), np.array([1, 1])*u.kpc) # check that it works even with velocities i5 = ICRS(ra=[1, 2]*u.deg, dec=[3, 4]*u.deg, distance=[5, 6]*u.kpc, pm_ra_cosdec=[1, 2]*u.mas/u.yr, pm_dec=[3, 4]*u.mas/u.yr, radial_velocity=[5, 6]*u.km/u.s) i6 = ICRS(ra=[1, 2]*u.deg, dec=[3, 4]*u.deg, distance=[7, 8]*u.kpc, pm_ra_cosdec=[1, 2]*u.mas/u.yr, pm_dec=[3, 4]*u.mas/u.yr, radial_velocity=[5, 6]*u.km/u.s) sep3d = i5.separation_3d(i6) assert_allclose(sep3d.to(u.kpc), np.array([2, 2])*u.kpc) def test_time_inputs(): """ Test validation and conversion of inputs for equinox and obstime attributes. """ from astropy.time import Time from astropy.coordinates.builtin_frames import FK4 c = FK4(1 * u.deg, 2 * u.deg, equinox='J2001.5', obstime='2000-01-01 12:00:00') assert c.equinox == Time('J2001.5') assert c.obstime == Time('2000-01-01 12:00:00') with pytest.raises(ValueError) as err: c = FK4(1 * u.deg, 2 * u.deg, equinox=1.5) assert 'Invalid time input' in str(err) with pytest.raises(ValueError) as err: c = FK4(1 * u.deg, 2 * u.deg, obstime='hello') assert 'Invalid time input' in str(err) # A vector time should work if the shapes match, but we don't automatically # broadcast the basic data (just like time). FK4([1, 2] * u.deg, [2, 3] * u.deg, obstime=['J2000', 'J2001']) with pytest.raises(ValueError) as err: FK4(1 * u.deg, 2 * u.deg, obstime=['J2000', 'J2001']) assert 'shape' in str(err) def test_is_frame_attr_default(): """ Check that the `is_frame_attr_default` machinery works as expected """ from astropy.time import Time from astropy.coordinates.builtin_frames import FK5 c1 = FK5(ra=1*u.deg, dec=1*u.deg) c2 = FK5(ra=1*u.deg, dec=1*u.deg, equinox=FK5.get_frame_attr_names()['equinox']) c3 = FK5(ra=1*u.deg, dec=1*u.deg, equinox=Time('J2001.5')) assert c1.equinox == c2.equinox assert c1.equinox != c3.equinox assert c1.is_frame_attr_default('equinox') assert not c2.is_frame_attr_default('equinox') assert not c3.is_frame_attr_default('equinox') c4 = c1.realize_frame(r.UnitSphericalRepresentation(3*u.deg, 4*u.deg)) c5 = c2.realize_frame(r.UnitSphericalRepresentation(3*u.deg, 4*u.deg)) assert c4.is_frame_attr_default('equinox') assert not c5.is_frame_attr_default('equinox') def test_altaz_attributes(): from astropy.time import Time from astropy.coordinates import EarthLocation, AltAz aa = AltAz(1*u.deg, 2*u.deg) assert aa.obstime is None assert aa.location is None aa2 = AltAz(1*u.deg, 2*u.deg, obstime='J2000') assert aa2.obstime == Time('J2000') aa3 = AltAz(1*u.deg, 2*u.deg, location=EarthLocation(0*u.deg, 0*u.deg, 0*u.m)) assert isinstance(aa3.location, EarthLocation) def test_representation(): """ Test the getter and setter properties for `representation` """ from astropy.coordinates.builtin_frames import ICRS # Create the frame object. icrs = ICRS(ra=1*u.deg, dec=1*u.deg) data = icrs.data # Create some representation objects. icrs_cart = icrs.cartesian icrs_spher = icrs.spherical # Testing when `_representation` set to `CartesianRepresentation`. icrs.representation_type = r.CartesianRepresentation assert icrs.representation_type == r.CartesianRepresentation assert icrs_cart.x == icrs.x assert icrs_cart.y == icrs.y assert icrs_cart.z == icrs.z assert icrs.data == data # Testing that an ICRS object in CartesianRepresentation must not have spherical attributes. for attr in ('ra', 'dec', 'distance'): with pytest.raises(AttributeError) as err: getattr(icrs, attr) assert 'object has no attribute' in str(err) # Testing when `_representation` set to `CylindricalRepresentation`. icrs.representation_type = r.CylindricalRepresentation assert icrs.representation_type == r.CylindricalRepresentation assert icrs.data == data # Testing setter input using text argument for spherical. icrs.representation_type = 'spherical' assert icrs.representation_type is r.SphericalRepresentation assert icrs_spher.lat == icrs.dec assert icrs_spher.lon == icrs.ra assert icrs_spher.distance == icrs.distance assert icrs.data == data # Testing that an ICRS object in SphericalRepresentation must not have cartesian attributes. for attr in ('x', 'y', 'z'): with pytest.raises(AttributeError) as err: getattr(icrs, attr) assert 'object has no attribute' in str(err) # Testing setter input using text argument for cylindrical. icrs.representation_type = 'cylindrical' assert icrs.representation_type is r.CylindricalRepresentation assert icrs.data == data with pytest.raises(ValueError) as err: icrs.representation_type = 'WRONG' assert 'but must be a BaseRepresentation class' in str(err) with pytest.raises(ValueError) as err: icrs.representation_type = ICRS assert 'but must be a BaseRepresentation class' in str(err) def test_represent_as(): from astropy.coordinates.builtin_frames import ICRS icrs = ICRS(ra=1*u.deg, dec=1*u.deg) cart1 = icrs.represent_as('cartesian') cart2 = icrs.represent_as(r.CartesianRepresentation) cart1.x == cart2.x cart1.y == cart2.y cart1.z == cart2.z # now try with velocities icrs = ICRS(ra=0*u.deg, dec=0*u.deg, distance=10*u.kpc, pm_ra_cosdec=0*u.mas/u.yr, pm_dec=0*u.mas/u.yr, radial_velocity=1*u.km/u.s) # single string rep2 = icrs.represent_as('cylindrical') assert isinstance(rep2, r.CylindricalRepresentation) assert isinstance(rep2.differentials['s'], r.CylindricalDifferential) # single class with positional in_frame_units, verify that warning raised with catch_warnings() as w: icrs.represent_as(r.CylindricalRepresentation, False) assert len(w) == 1 assert w[0].category == AstropyWarning assert 'argument position' in str(w[0].message) # TODO: this should probably fail in the future once we figure out a better # workaround for dealing with UnitSphericalRepresentation's with # RadialDifferential's # two classes # rep2 = icrs.represent_as(r.CartesianRepresentation, # r.SphericalCosLatDifferential) # assert isinstance(rep2, r.CartesianRepresentation) # assert isinstance(rep2.differentials['s'], r.SphericalCosLatDifferential) with pytest.raises(ValueError): icrs.represent_as('odaigahara') def test_shorthand_representations(): from astropy.coordinates.builtin_frames import ICRS rep = r.CartesianRepresentation([1, 2, 3]*u.pc) dif = r.CartesianDifferential([1, 2, 3]*u.km/u.s) rep = rep.with_differentials(dif) icrs = ICRS(rep) sph = icrs.spherical assert isinstance(sph, r.SphericalRepresentation) assert isinstance(sph.differentials['s'], r.SphericalDifferential) sph = icrs.sphericalcoslat assert isinstance(sph, r.SphericalRepresentation) assert isinstance(sph.differentials['s'], r.SphericalCosLatDifferential) def test_dynamic_attrs(): from astropy.coordinates.builtin_frames import ICRS c = ICRS(1*u.deg, 2*u.deg) assert 'ra' in dir(c) assert 'dec' in dir(c) with pytest.raises(AttributeError) as err: c.blahblah assert "object has no attribute 'blahblah'" in str(err) with pytest.raises(AttributeError) as err: c.ra = 1 assert "Cannot set any frame attribute" in str(err) c.blahblah = 1 assert c.blahblah == 1 def test_nodata_error(): from astropy.coordinates.builtin_frames import ICRS i = ICRS() with pytest.raises(ValueError) as excinfo: i.data assert 'does not have associated data' in str(excinfo.value) def test_len0_data(): from astropy.coordinates.builtin_frames import ICRS i = ICRS([]*u.deg, []*u.deg) assert i.has_data repr(i) def test_quantity_attributes(): from astropy.coordinates.builtin_frames import GCRS # make sure we can create a GCRS frame with valid inputs GCRS(obstime='J2002', obsgeoloc=[1, 2, 3]*u.km, obsgeovel=[4, 5, 6]*u.km/u.s) # make sure it fails for invalid lovs or vels with pytest.raises(TypeError): GCRS(obsgeoloc=[1, 2, 3]) # no unit with pytest.raises(u.UnitsError): GCRS(obsgeoloc=[1, 2, 3]*u.km/u.s) # incorrect unit with pytest.raises(ValueError): GCRS(obsgeoloc=[1, 3]*u.km) # incorrect shape @pytest.mark.remote_data def test_eloc_attributes(): from astropy.coordinates import AltAz, ITRS, GCRS, EarthLocation el = EarthLocation(lon=12.3*u.deg, lat=45.6*u.deg, height=1*u.km) it = ITRS(r.SphericalRepresentation(lon=12.3*u.deg, lat=45.6*u.deg, distance=1*u.km)) gc = GCRS(ra=12.3*u.deg, dec=45.6*u.deg, distance=6375*u.km) el1 = AltAz(location=el).location assert isinstance(el1, EarthLocation) # these should match *exactly* because the EarthLocation assert el1.lat == el.lat assert el1.lon == el.lon assert el1.height == el.height el2 = AltAz(location=it).location assert isinstance(el2, EarthLocation) # these should *not* match because giving something in Spherical ITRS is # *not* the same as giving it as an EarthLocation: EarthLocation is on an # elliptical geoid. So the longitude should match (because flattening is # only along the z-axis), but latitude should not. Also, height is relative # to the *surface* in EarthLocation, but the ITRS distance is relative to # the center of the Earth assert not allclose(el2.lat, it.spherical.lat) assert allclose(el2.lon, it.spherical.lon) assert el2.height < -6000*u.km el3 = AltAz(location=gc).location # GCRS inputs implicitly get transformed to ITRS and then onto # EarthLocation's elliptical geoid. So both lat and lon shouldn't match assert isinstance(el3, EarthLocation) assert not allclose(el3.lat, gc.dec) assert not allclose(el3.lon, gc.ra) assert np.abs(el3.height) < 500*u.km def test_equivalent_frames(): from astropy.coordinates import SkyCoord from astropy.coordinates.builtin_frames import ICRS, FK4, FK5, AltAz i = ICRS() i2 = ICRS(1*u.deg, 2*u.deg) assert i.is_equivalent_frame(i) assert i.is_equivalent_frame(i2) with pytest.raises(TypeError): assert i.is_equivalent_frame(10) with pytest.raises(TypeError): assert i2.is_equivalent_frame(SkyCoord(i2)) f0 = FK5() # this J2000 is TT f1 = FK5(equinox='J2000') f2 = FK5(1*u.deg, 2*u.deg, equinox='J2000') f3 = FK5(equinox='J2010') f4 = FK4(equinox='J2010') assert f1.is_equivalent_frame(f1) assert not i.is_equivalent_frame(f1) assert f0.is_equivalent_frame(f1) assert f1.is_equivalent_frame(f2) assert not f1.is_equivalent_frame(f3) assert not f3.is_equivalent_frame(f4) aa1 = AltAz() aa2 = AltAz(obstime='J2010') assert aa2.is_equivalent_frame(aa2) assert not aa1.is_equivalent_frame(i) assert not aa1.is_equivalent_frame(aa2) def test_representation_subclass(): # Regression test for #3354 from astropy.coordinates.builtin_frames import FK5 # Normally when instantiating a frame without a distance the frame will try # and use UnitSphericalRepresentation internally instead of # SphericalRepresentation. frame = FK5(representation_type=r.SphericalRepresentation, ra=32 * u.deg, dec=20 * u.deg) assert type(frame._data) == r.UnitSphericalRepresentation assert frame.representation_type == r.SphericalRepresentation # If using a SphericalRepresentation class this used to not work, so we # test here that this is now fixed. class NewSphericalRepresentation(r.SphericalRepresentation): attr_classes = r.SphericalRepresentation.attr_classes frame = FK5(representation_type=NewSphericalRepresentation, lon=32 * u.deg, lat=20 * u.deg) assert type(frame._data) == r.UnitSphericalRepresentation assert frame.representation_type == NewSphericalRepresentation # A similar issue then happened in __repr__ with subclasses of # SphericalRepresentation. assert repr(frame) == ("<FK5 Coordinate (equinox=J2000.000): (lon, lat) in deg\n" " ({})>").format(' 32., 20.' if NUMPY_LT_1_14 else '32., 20.') # A more subtle issue is when specifying a custom # UnitSphericalRepresentation subclass for the data and # SphericalRepresentation or a subclass for the representation. class NewUnitSphericalRepresentation(r.UnitSphericalRepresentation): attr_classes = r.UnitSphericalRepresentation.attr_classes def __repr__(self): return "<NewUnitSphericalRepresentation: spam spam spam>" frame = FK5(NewUnitSphericalRepresentation(lon=32 * u.deg, lat=20 * u.deg), representation_type=NewSphericalRepresentation) assert repr(frame) == "<FK5 Coordinate (equinox=J2000.000): spam spam spam>" def test_getitem_representation(): """ Make sure current representation survives __getitem__ even if different from data representation. """ from astropy.coordinates.builtin_frames import ICRS c = ICRS([1, 1] * u.deg, [2, 2] * u.deg) c.representation_type = 'cartesian' assert c[0].representation_type is r.CartesianRepresentation def test_component_error_useful(): """ Check that a data-less frame gives useful error messages about not having data when the attributes asked for are possible coordinate components """ from astropy.coordinates.builtin_frames import ICRS i = ICRS() with pytest.raises(ValueError) as excinfo: i.ra assert 'does not have associated data' in str(excinfo.value) with pytest.raises(AttributeError) as excinfo1: i.foobar with pytest.raises(AttributeError) as excinfo2: i.lon # lon is *not* the component name despite being the underlying representation's name assert "object has no attribute 'foobar'" in str(excinfo1.value) assert "object has no attribute 'lon'" in str(excinfo2.value) def test_cache_clear(): from astropy.coordinates.builtin_frames import ICRS i = ICRS(1*u.deg, 2*u.deg) # Add an in frame units version of the rep to the cache. repr(i) assert len(i.cache['representation']) == 2 i.cache.clear() assert len(i.cache['representation']) == 0 def test_inplace_array(): from astropy.coordinates.builtin_frames import ICRS i = ICRS([[1, 2], [3, 4]]*u.deg, [[10, 20], [30, 40]]*u.deg) # Add an in frame units version of the rep to the cache. repr(i) # Check that repr() has added a rep to the cache assert len(i.cache['representation']) == 2 # Modify the data i.data.lon[:, 0] = [100, 200]*u.deg # Clear the cache i.cache.clear() # This will use a second (potentially cached rep) assert_allclose(i.ra, [[100, 2], [200, 4]]*u.deg) assert_allclose(i.dec, [[10, 20], [30, 40]]*u.deg) def test_inplace_change(): from astropy.coordinates.builtin_frames import ICRS i = ICRS(1*u.deg, 2*u.deg) # Add an in frame units version of the rep to the cache. repr(i) # Check that repr() has added a rep to the cache assert len(i.cache['representation']) == 2 # Modify the data i.data.lon[()] = 10*u.deg # Clear the cache i.cache.clear() # This will use a second (potentially cached rep) assert i.ra == 10 * u.deg assert i.dec == 2 * u.deg def test_representation_with_multiple_differentials(): from astropy.coordinates.builtin_frames import ICRS dif1 = r.CartesianDifferential([1, 2, 3]*u.km/u.s) dif2 = r.CartesianDifferential([1, 2, 3]*u.km/u.s**2) rep = r.CartesianRepresentation([1, 2, 3]*u.pc, differentials={'s': dif1, 's2': dif2}) # check warning is raised for a scalar with pytest.raises(ValueError): ICRS(rep) def test_representation_arg_backwards_compatibility(): # TODO: this test can be removed when the `representation` argument is # removed from the BaseCoordinateFrame initializer. from astropy.coordinates.builtin_frames import ICRS c1 = ICRS(x=1*u.pc, y=2*u.pc, z=3*u.pc, representation_type=r.CartesianRepresentation) c2 = ICRS(x=1*u.pc, y=2*u.pc, z=3*u.pc, representation_type=r.CartesianRepresentation) c3 = ICRS(x=1*u.pc, y=2*u.pc, z=3*u.pc, representation_type='cartesian') assert c1.x == c2.x assert c1.y == c2.y assert c1.z == c2.z assert c1.x == c3.x assert c1.y == c3.y assert c1.z == c3.z assert c1.representation_type == c1.representation_type with pytest.raises(ValueError): ICRS(x=1*u.pc, y=2*u.pc, z=3*u.pc, representation_type='cartesian', representation='cartesian') def test_missing_component_error_names(): """ This test checks that the component names are frame component names, not representation or differential names, when referenced in an exception raised when not passing in enough data. For example: ICRS(ra=10*u.deg) should state: TypeError: __init__() missing 1 required positional argument: 'dec' """ from astropy.coordinates.builtin_frames import ICRS with pytest.raises(TypeError) as e: ICRS(ra=150 * u.deg) assert "missing 1 required positional argument: 'dec'" in str(e) with pytest.raises(TypeError) as e: ICRS(ra=150*u.deg, dec=-11*u.deg, pm_ra=100*u.mas/u.yr, pm_dec=10*u.mas/u.yr) assert "pm_ra_cosdec" in str(e) def test_non_spherical_representation_unit_creation(unitphysics): from astropy.coordinates.builtin_frames import ICRS class PhysicsICRS(ICRS): default_representation = r.PhysicsSphericalRepresentation pic = PhysicsICRS(phi=1*u.deg, theta=25*u.deg, r=1*u.kpc) assert isinstance(pic.data, r.PhysicsSphericalRepresentation) picu = PhysicsICRS(phi=1*u.deg, theta=25*u.deg) assert isinstance(picu.data, unitphysics) def test_attribute_repr(): from astropy.coordinates.attributes import Attribute from astropy.coordinates.baseframe import BaseCoordinateFrame class Spam: def _astropy_repr_in_frame(self): return "TEST REPR" class TestFrame(BaseCoordinateFrame): attrtest = Attribute(default=Spam()) assert "TEST REPR" in repr(TestFrame())
a4c5586aa11c1ee30a8c3281296350e45e9ed76db4a80add8384b658407b72f6
""" This file tests the behavior of subclasses of Representation and Frames """ from copy import deepcopy from collections import OrderedDict import pytest from astropy.coordinates import Longitude, Latitude from astropy.coordinates.representation import (REPRESENTATION_CLASSES, SphericalRepresentation, UnitSphericalRepresentation, _invalidate_reprdiff_cls_hash) from astropy.coordinates.baseframe import frame_transform_graph from astropy.coordinates.transformations import FunctionTransform from astropy.coordinates import ICRS from astropy.coordinates.baseframe import RepresentationMapping import astropy.units as u import astropy.coordinates # Classes setup, borrowed from SunPy. # Here we define the classes *inside* the tests to make sure that we can wipe # the slate clean when the tests have finished running. def setup_function(func): func.REPRESENTATION_CLASSES_ORIG = deepcopy(REPRESENTATION_CLASSES) def teardown_function(func): REPRESENTATION_CLASSES.clear() REPRESENTATION_CLASSES.update(func.REPRESENTATION_CLASSES_ORIG) _invalidate_reprdiff_cls_hash() @pytest.mark.remote_data def test_unit_representation_subclass(): class Longitude180(Longitude): def __new__(cls, angle, unit=None, wrap_angle=180*u.deg, **kwargs): self = super().__new__(cls, angle, unit=unit, wrap_angle=wrap_angle, **kwargs) return self class UnitSphericalWrap180Representation(UnitSphericalRepresentation): attr_classes = OrderedDict([('lon', Longitude180), ('lat', Latitude)]) class SphericalWrap180Representation(SphericalRepresentation): attr_classes = OrderedDict([('lon', Longitude180), ('lat', Latitude), ('distance', u.Quantity)]) _unit_representation = UnitSphericalWrap180Representation class MyFrame(ICRS): default_representation = SphericalWrap180Representation frame_specific_representation_info = { 'spherical': [ RepresentationMapping('lon', 'ra'), RepresentationMapping('lat', 'dec')] } frame_specific_representation_info['unitsphericalwrap180'] = \ frame_specific_representation_info['sphericalwrap180'] = \ frame_specific_representation_info['spherical'] @frame_transform_graph.transform(FunctionTransform, MyFrame, astropy.coordinates.ICRS) def myframe_to_icrs(myframe_coo, icrs): return icrs.realize_frame(myframe_coo._data) f = MyFrame(10*u.deg, 10*u.deg) assert isinstance(f._data, UnitSphericalWrap180Representation) assert isinstance(f.ra, Longitude180) g = f.transform_to(astropy.coordinates.ICRS) assert isinstance(g, astropy.coordinates.ICRS) assert isinstance(g._data, UnitSphericalWrap180Representation) frame_transform_graph.remove_transform(MyFrame, astropy.coordinates.ICRS, None)
73e8bdf441c39764971631b1d533f4fb6b523c4ef971cad7634aa57aed0122fe
import pytest import numpy as np from astropy.time import Time from astropy import units as u from astropy.constants import c from astropy.coordinates.builtin_frames import GCRS from astropy.coordinates.earth import EarthLocation from astropy.coordinates.sky_coordinate import SkyCoord from astropy.coordinates.solar_system import (get_body, get_moon, BODY_NAME_TO_KERNEL_SPEC, _apparent_position_in_true_coordinates, get_body_barycentric, get_body_barycentric_posvel) from astropy.coordinates.funcs import get_sun from astropy.tests.helper import assert_quantity_allclose from astropy.units import allclose as quantity_allclose try: import jplephem # pylint: disable=W0611 except ImportError: HAS_JPLEPHEM = False else: HAS_JPLEPHEM = True try: from skyfield.api import load # pylint: disable=W0611 except ImportError: HAS_SKYFIELD = False else: HAS_SKYFIELD = True de432s_separation_tolerance_planets = 5*u.arcsec de432s_separation_tolerance_moon = 5*u.arcsec de432s_distance_tolerance = 20*u.km skyfield_angular_separation_tolerance = 1*u.arcsec skyfield_separation_tolerance = 10*u.km @pytest.mark.remote_data @pytest.mark.skipif(str('not HAS_SKYFIELD')) def test_positions_skyfield(): """ Test positions against those generated by skyfield. """ t = Time('1980-03-25 00:00') location = None # skyfield ephemeris planets = load('de421.bsp') ts = load.timescale() mercury, jupiter, moon = planets['mercury'], planets['jupiter barycenter'], planets['moon'] earth = planets['earth'] skyfield_t = ts.from_astropy(t) if location is not None: earth = earth.topos(latitude_degrees=location.lat.to_value(u.deg), longitude_degrees=location.lon.to_value(u.deg), elevation_m=location.height.to_value(u.m)) skyfield_mercury = earth.at(skyfield_t).observe(mercury).apparent() skyfield_jupiter = earth.at(skyfield_t).observe(jupiter).apparent() skyfield_moon = earth.at(skyfield_t).observe(moon).apparent() if location is not None: obsgeoloc, obsgeovel = location.get_gcrs_posvel(t) frame = GCRS(obstime=t, obsgeoloc=obsgeoloc, obsgeovel=obsgeovel) else: frame = GCRS(obstime=t) ra, dec, dist = skyfield_mercury.radec(epoch='date') skyfield_mercury = SkyCoord(ra.to(u.deg), dec.to(u.deg), distance=dist.to(u.km), frame=frame) ra, dec, dist = skyfield_jupiter.radec(epoch='date') skyfield_jupiter = SkyCoord(ra.to(u.deg), dec.to(u.deg), distance=dist.to(u.km), frame=frame) ra, dec, dist = skyfield_moon.radec(epoch='date') skyfield_moon = SkyCoord(ra.to(u.deg), dec.to(u.deg), distance=dist.to(u.km), frame=frame) moon_astropy = get_moon(t, location, ephemeris='de430') mercury_astropy = get_body('mercury', t, location, ephemeris='de430') jupiter_astropy = get_body('jupiter', t, location, ephemeris='de430') # convert to true equator and equinox jupiter_astropy = _apparent_position_in_true_coordinates(jupiter_astropy) mercury_astropy = _apparent_position_in_true_coordinates(mercury_astropy) moon_astropy = _apparent_position_in_true_coordinates(moon_astropy) assert (moon_astropy.separation(skyfield_moon) < skyfield_angular_separation_tolerance) assert (moon_astropy.separation_3d(skyfield_moon) < skyfield_separation_tolerance) assert (jupiter_astropy.separation(skyfield_jupiter) < skyfield_angular_separation_tolerance) assert (jupiter_astropy.separation_3d(skyfield_jupiter) < skyfield_separation_tolerance) assert (mercury_astropy.separation(skyfield_mercury) < skyfield_angular_separation_tolerance) assert (mercury_astropy.separation_3d(skyfield_mercury) < skyfield_separation_tolerance) class TestPositionsGeocentric: """ Test positions against those generated by JPL Horizons accessed on 2016-03-28, with refraction turned on. """ def setup(self): self.t = Time('1980-03-25 00:00') self.frame = GCRS(obstime=self.t) # Results returned by JPL Horizons web interface self.horizons = { 'mercury': SkyCoord(ra='22h41m47.78s', dec='-08d29m32.0s', distance=c*6.323037*u.min, frame=self.frame), 'moon': SkyCoord(ra='07h32m02.62s', dec='+18d34m05.0s', distance=c*0.021921*u.min, frame=self.frame), 'jupiter': SkyCoord(ra='10h17m12.82s', dec='+12d02m57.0s', distance=c*37.694557*u.min, frame=self.frame), 'sun': SkyCoord(ra='00h16m31.00s', dec='+01d47m16.9s', distance=c*8.294858*u.min, frame=self.frame)} @pytest.mark.parametrize(('body', 'sep_tol', 'dist_tol'), (('mercury', 7.*u.arcsec, 1000*u.km), ('jupiter', 78.*u.arcsec, 76000*u.km), ('moon', 20.*u.arcsec, 80*u.km), ('sun', 5.*u.arcsec, 11.*u.km))) def test_erfa_planet(self, body, sep_tol, dist_tol): """Test predictions using erfa/plan94. Accuracies are maximum deviations listed in erfa/plan94.c, for Jupiter and Mercury, and that quoted in Meeus "Astronomical Algorithms" (1998) for the Moon. """ astropy = get_body(body, self.t, ephemeris='builtin') horizons = self.horizons[body] # convert to true equator and equinox astropy = _apparent_position_in_true_coordinates(astropy) # Assert sky coordinates are close. assert astropy.separation(horizons) < sep_tol # Assert distances are close. assert_quantity_allclose(astropy.distance, horizons.distance, atol=dist_tol) @pytest.mark.remote_data @pytest.mark.skipif('not HAS_JPLEPHEM') @pytest.mark.parametrize('body', ('mercury', 'jupiter', 'sun')) def test_de432s_planet(self, body): astropy = get_body(body, self.t, ephemeris='de432s') horizons = self.horizons[body] # convert to true equator and equinox astropy = _apparent_position_in_true_coordinates(astropy) # Assert sky coordinates are close. assert (astropy.separation(horizons) < de432s_separation_tolerance_planets) # Assert distances are close. assert_quantity_allclose(astropy.distance, horizons.distance, atol=de432s_distance_tolerance) @pytest.mark.remote_data @pytest.mark.skipif('not HAS_JPLEPHEM') def test_de432s_moon(self): astropy = get_moon(self.t, ephemeris='de432s') horizons = self.horizons['moon'] # convert to true equator and equinox astropy = _apparent_position_in_true_coordinates(astropy) # Assert sky coordinates are close. assert (astropy.separation(horizons) < de432s_separation_tolerance_moon) # Assert distances are close. assert_quantity_allclose(astropy.distance, horizons.distance, atol=de432s_distance_tolerance) @pytest.mark.remote_data class TestPositionKittPeak: """ Test positions against those generated by JPL Horizons accessed on 2016-03-28, with refraction turned on. """ def setup(self): kitt_peak = EarthLocation.from_geodetic(lon=-111.6*u.deg, lat=31.963333333333342*u.deg, height=2120*u.m) self.t = Time('2014-09-25T00:00', location=kitt_peak) obsgeoloc, obsgeovel = kitt_peak.get_gcrs_posvel(self.t) self.frame = GCRS(obstime=self.t, obsgeoloc=obsgeoloc, obsgeovel=obsgeovel) # Results returned by JPL Horizons web interface self.horizons = { 'mercury': SkyCoord(ra='13h38m58.50s', dec='-13d34m42.6s', distance=c*7.699020*u.min, frame=self.frame), 'moon': SkyCoord(ra='12h33m12.85s', dec='-05d17m54.4s', distance=c*0.022054*u.min, frame=self.frame), 'jupiter': SkyCoord(ra='09h09m55.55s', dec='+16d51m57.8s', distance=c*49.244937*u.min, frame=self.frame)} @pytest.mark.parametrize(('body', 'sep_tol', 'dist_tol'), (('mercury', 7.*u.arcsec, 500*u.km), ('jupiter', 78.*u.arcsec, 82000*u.km))) def test_erfa_planet(self, body, sep_tol, dist_tol): """Test predictions using erfa/plan94. Accuracies are maximum deviations listed in erfa/plan94.c. """ # Add uncertainty in position of Earth dist_tol = dist_tol + 1300 * u.km astropy = get_body(body, self.t, ephemeris='builtin') horizons = self.horizons[body] # convert to true equator and equinox astropy = _apparent_position_in_true_coordinates(astropy) # Assert sky coordinates are close. assert astropy.separation(horizons) < sep_tol # Assert distances are close. assert_quantity_allclose(astropy.distance, horizons.distance, atol=dist_tol) @pytest.mark.remote_data @pytest.mark.skipif('not HAS_JPLEPHEM') @pytest.mark.parametrize('body', ('mercury', 'jupiter')) def test_de432s_planet(self, body): astropy = get_body(body, self.t, ephemeris='de432s') horizons = self.horizons[body] # convert to true equator and equinox astropy = _apparent_position_in_true_coordinates(astropy) # Assert sky coordinates are close. assert (astropy.separation(horizons) < de432s_separation_tolerance_planets) # Assert distances are close. assert_quantity_allclose(astropy.distance, horizons.distance, atol=de432s_distance_tolerance) @pytest.mark.remote_data @pytest.mark.skipif('not HAS_JPLEPHEM') def test_de432s_moon(self): astropy = get_moon(self.t, ephemeris='de432s') horizons = self.horizons['moon'] # convert to true equator and equinox astropy = _apparent_position_in_true_coordinates(astropy) # Assert sky coordinates are close. assert (astropy.separation(horizons) < de432s_separation_tolerance_moon) # Assert distances are close. assert_quantity_allclose(astropy.distance, horizons.distance, atol=de432s_distance_tolerance) @pytest.mark.remote_data @pytest.mark.skipif('not HAS_JPLEPHEM') @pytest.mark.parametrize('bodyname', ('mercury', 'jupiter')) def test_custom_kernel_spec_body(self, bodyname): """ Checks that giving a kernel specifier instead of a body name works """ coord_by_name = get_body(bodyname, self.t, ephemeris='de432s') kspec = BODY_NAME_TO_KERNEL_SPEC[bodyname] coord_by_kspec = get_body(kspec, self.t, ephemeris='de432s') assert_quantity_allclose(coord_by_name.ra, coord_by_kspec.ra) assert_quantity_allclose(coord_by_name.dec, coord_by_kspec.dec) assert_quantity_allclose(coord_by_name.distance, coord_by_kspec.distance) @pytest.mark.remote_data @pytest.mark.skipif('not HAS_JPLEPHEM') @pytest.mark.parametrize('time', (Time('1960-01-12 00:00'), Time('1980-03-25 00:00'), Time('2010-10-13 00:00'))) def test_get_sun_consistency(time): """ Test that the sun from JPL and the builtin get_sun match """ sun_jpl_gcrs = get_body('sun', time, ephemeris='de432s') builtin_get_sun = get_sun(time) sep = builtin_get_sun.separation(sun_jpl_gcrs) assert sep < 0.1*u.arcsec def test_get_moon_nonscalar_regression(): """ Test that the builtin ephemeris works with non-scalar times. See Issue #5069. """ times = Time(["2015-08-28 03:30", "2015-09-05 10:30"]) # the following line will raise an Exception if the bug recurs. get_moon(times, ephemeris='builtin') def test_barycentric_pos_posvel_same(): # Check that the two routines give identical results. ep1 = get_body_barycentric('earth', Time('2016-03-20T12:30:00')) ep2, _ = get_body_barycentric_posvel('earth', Time('2016-03-20T12:30:00')) assert np.all(ep1.xyz == ep2.xyz) def test_earth_barycentric_velocity_rough(): # Check that a time near the equinox gives roughly the right result. ep, ev = get_body_barycentric_posvel('earth', Time('2016-03-20T12:30:00')) assert_quantity_allclose(ep.xyz, [-1., 0., 0.]*u.AU, atol=0.01*u.AU) expected = u.Quantity([0.*u.one, np.cos(23.5*u.deg), np.sin(23.5*u.deg)]) * -30. * u.km / u.s assert_quantity_allclose(ev.xyz, expected, atol=1.*u.km/u.s) def test_earth_barycentric_velocity_multi_d(): # Might as well test it with a multidimensional array too. t = Time('2016-03-20T12:30:00') + np.arange(8.).reshape(2, 2, 2) * u.yr / 2. ep, ev = get_body_barycentric_posvel('earth', t) # note: assert_quantity_allclose doesn't like the shape mismatch. # this is a problem with np.testing.assert_allclose. assert quantity_allclose(ep.get_xyz(xyz_axis=-1), [[-1., 0., 0.], [+1., 0., 0.]]*u.AU, atol=0.06*u.AU) expected = u.Quantity([0.*u.one, np.cos(23.5*u.deg), np.sin(23.5*u.deg)]) * ([[-30.], [30.]] * u.km / u.s) assert quantity_allclose(ev.get_xyz(xyz_axis=-1), expected, atol=2.*u.km/u.s) @pytest.mark.remote_data @pytest.mark.skipif('not HAS_JPLEPHEM') @pytest.mark.parametrize(('body', 'pos_tol', 'vel_tol'), (('mercury', 1000.*u.km, 1.*u.km/u.s), ('jupiter', 100000.*u.km, 2.*u.km/u.s), ('earth', 10*u.km, 10*u.mm/u.s))) def test_barycentric_velocity_consistency(body, pos_tol, vel_tol): # Tolerances are about 1.5 times the rms listed for plan94 and epv00, # except for Mercury (which nominally is 334 km rms) t = Time('2016-03-20T12:30:00') ep, ev = get_body_barycentric_posvel(body, t, ephemeris='builtin') dp, dv = get_body_barycentric_posvel(body, t, ephemeris='de432s') assert_quantity_allclose(ep.xyz, dp.xyz, atol=pos_tol) assert_quantity_allclose(ev.xyz, dv.xyz, atol=vel_tol) # Might as well test it with a multidimensional array too. t = Time('2016-03-20T12:30:00') + np.arange(8.).reshape(2, 2, 2) * u.yr / 2. ep, ev = get_body_barycentric_posvel(body, t, ephemeris='builtin') dp, dv = get_body_barycentric_posvel(body, t, ephemeris='de432s') assert_quantity_allclose(ep.xyz, dp.xyz, atol=pos_tol) assert_quantity_allclose(ev.xyz, dv.xyz, atol=vel_tol)
81733c70d799d556b81a018b0806652e65c007ba4bde9a1363b684f4657a5720
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst import numpy as np import pytest from astropy import units as u from astropy.coordinates import transformations as t from astropy.coordinates.builtin_frames import ICRS, FK5, FK4, FK4NoETerms, Galactic, AltAz from astropy.coordinates import representation as r from astropy.coordinates.baseframe import frame_transform_graph from astropy.tests.helper import (assert_quantity_allclose as assert_allclose, catch_warnings) from astropy.time import Time from astropy.units import allclose as quantity_allclose # Coordinates just for these tests. class TCoo1(ICRS): pass class TCoo2(ICRS): pass class TCoo3(ICRS): pass def test_transform_classes(): """ Tests the class-based/OO syntax for creating transforms """ tfun = lambda c, f: f.__class__(ra=c.ra, dec=c.dec) trans1 = t.FunctionTransform(tfun, TCoo1, TCoo2, register_graph=frame_transform_graph) c1 = TCoo1(ra=1*u.radian, dec=0.5*u.radian) c2 = c1.transform_to(TCoo2) assert_allclose(c2.ra.radian, 1) assert_allclose(c2.dec.radian, 0.5) def matfunc(coo, fr): return [[1, 0, 0], [0, coo.ra.degree, 0], [0, 0, 1]] trans2 = t.DynamicMatrixTransform(matfunc, TCoo1, TCoo2) trans2.register(frame_transform_graph) c3 = TCoo1(ra=1*u.deg, dec=2*u.deg) c4 = c3.transform_to(TCoo2) assert_allclose(c4.ra.degree, 1) assert_allclose(c4.ra.degree, 1) # be sure to unregister the second one - no need for trans1 because it # already got unregistered when trans2 was created. trans2.unregister(frame_transform_graph) def test_transform_decos(): """ Tests the decorator syntax for creating transforms """ c1 = TCoo1(ra=1*u.deg, dec=2*u.deg) @frame_transform_graph.transform(t.FunctionTransform, TCoo1, TCoo2) def trans(coo1, f): return TCoo2(ra=coo1.ra, dec=coo1.dec * 2) c2 = c1.transform_to(TCoo2) assert_allclose(c2.ra.degree, 1) assert_allclose(c2.dec.degree, 4) c3 = TCoo1(r.CartesianRepresentation(x=1*u.pc, y=1*u.pc, z=2*u.pc)) @frame_transform_graph.transform(t.StaticMatrixTransform, TCoo1, TCoo2) def matrix(): return [[2, 0, 0], [0, 1, 0], [0, 0, 1]] c4 = c3.transform_to(TCoo2) assert_allclose(c4.cartesian.x, 2*u.pc) assert_allclose(c4.cartesian.y, 1*u.pc) assert_allclose(c4.cartesian.z, 2*u.pc) def test_shortest_path(): class FakeTransform: def __init__(self, pri): self.priority = pri g = t.TransformGraph() # cheating by adding graph elements directly that are not classes - the # graphing algorithm still works fine with integers - it just isn't a valid # TransformGraph # the graph looks is a down-going diamond graph with the lower-right slightly # heavier and a cycle from the bottom to the top # also, a pair of nodes isolated from 1 g._graph[1][2] = FakeTransform(1) g._graph[1][3] = FakeTransform(1) g._graph[2][4] = FakeTransform(1) g._graph[3][4] = FakeTransform(2) g._graph[4][1] = FakeTransform(5) g._graph[5][6] = FakeTransform(1) path, d = g.find_shortest_path(1, 2) assert path == [1, 2] assert d == 1 path, d = g.find_shortest_path(1, 3) assert path == [1, 3] assert d == 1 path, d = g.find_shortest_path(1, 4) print('Cached paths:', g._shortestpaths) assert path == [1, 2, 4] assert d == 2 # unreachable path, d = g.find_shortest_path(1, 5) assert path is None assert d == float('inf') path, d = g.find_shortest_path(5, 6) assert path == [5, 6] assert d == 1 def test_sphere_cart(): """ Tests the spherical <-> cartesian transform functions """ from astropy.utils import NumpyRNGContext from astropy.coordinates import spherical_to_cartesian, cartesian_to_spherical x, y, z = spherical_to_cartesian(1, 0, 0) assert_allclose(x, 1) assert_allclose(y, 0) assert_allclose(z, 0) x, y, z = spherical_to_cartesian(0, 1, 1) assert_allclose(x, 0) assert_allclose(y, 0) assert_allclose(z, 0) x, y, z = spherical_to_cartesian(5, 0, np.arcsin(4. / 5.)) assert_allclose(x, 3) assert_allclose(y, 4) assert_allclose(z, 0) r, lat, lon = cartesian_to_spherical(0, 1, 0) assert_allclose(r, 1) assert_allclose(lat, 0 * u.deg) assert_allclose(lon, np.pi / 2 * u.rad) # test round-tripping with NumpyRNGContext(13579): x, y, z = np.random.randn(3, 5) r, lat, lon = cartesian_to_spherical(x, y, z) x2, y2, z2 = spherical_to_cartesian(r, lat, lon) assert_allclose(x, x2) assert_allclose(y, y2) assert_allclose(z, z2) def test_transform_path_pri(): """ This checks that the transformation path prioritization works by making sure the ICRS -> Gal transformation always goes through FK5 and not FK4. """ frame_transform_graph.invalidate_cache() tpath, td = frame_transform_graph.find_shortest_path(ICRS, Galactic) assert tpath == [ICRS, FK5, Galactic] assert td == 2 # but direct from FK4 to Galactic should still be possible tpath, td = frame_transform_graph.find_shortest_path(FK4, Galactic) assert tpath == [FK4, FK4NoETerms, Galactic] assert td == 2 def test_obstime(): """ Checks to make sure observation time is accounted for at least in FK4 <-> ICRS transformations """ b1950 = Time('B1950') j1975 = Time('J1975') fk4_50 = FK4(ra=1*u.deg, dec=2*u.deg, obstime=b1950) fk4_75 = FK4(ra=1*u.deg, dec=2*u.deg, obstime=j1975) icrs_50 = fk4_50.transform_to(ICRS) icrs_75 = fk4_75.transform_to(ICRS) # now check that the resulting coordinates are *different* - they should be, # because the obstime is different assert icrs_50.ra.degree != icrs_75.ra.degree assert icrs_50.dec.degree != icrs_75.dec.degree # ------------------------------------------------------------------------------ # Affine transform tests and helpers: # just acting as a namespace class transfunc: rep = r.CartesianRepresentation(np.arange(3)*u.pc) dif = r.CartesianDifferential(*np.arange(3, 6)*u.pc/u.Myr) rep0 = r.CartesianRepresentation(np.zeros(3)*u.pc) @classmethod def both(cls, coo, fr): # exchange x <-> z and offset M = np.array([[0., 0., 1.], [0., 1., 0.], [1., 0., 0.]]) return M, cls.rep.with_differentials(cls.dif) @classmethod def just_matrix(cls, coo, fr): # exchange x <-> z and offset M = np.array([[0., 0., 1.], [0., 1., 0.], [1., 0., 0.]]) return M, None @classmethod def no_matrix(cls, coo, fr): return None, cls.rep.with_differentials(cls.dif) @classmethod def no_pos(cls, coo, fr): return None, cls.rep0.with_differentials(cls.dif) @classmethod def no_vel(cls, coo, fr): return None, cls.rep @pytest.mark.parametrize('transfunc', [transfunc.both, transfunc.no_matrix, transfunc.no_pos, transfunc.no_vel, transfunc.just_matrix]) @pytest.mark.parametrize('rep', [ r.CartesianRepresentation(5, 6, 7, unit=u.pc), r.CartesianRepresentation(5, 6, 7, unit=u.pc, differentials=r.CartesianDifferential(8, 9, 10, unit=u.pc/u.Myr)), r.CartesianRepresentation(5, 6, 7, unit=u.pc, differentials=r.CartesianDifferential(8, 9, 10, unit=u.pc/u.Myr)) .represent_as(r.CylindricalRepresentation, r.CylindricalDifferential) ]) def test_affine_transform_succeed(transfunc, rep): c = TCoo1(rep) # compute expected output M, offset = transfunc(c, TCoo2) _rep = rep.to_cartesian() diffs = dict([(k, diff.represent_as(r.CartesianDifferential, rep)) for k, diff in rep.differentials.items()]) expected_rep = _rep.with_differentials(diffs) if M is not None: expected_rep = expected_rep.transform(M) expected_pos = expected_rep.without_differentials() if offset is not None: expected_pos = expected_pos + offset.without_differentials() expected_vel = None if c.data.differentials: expected_vel = expected_rep.differentials['s'] if offset and offset.differentials: expected_vel = (expected_vel + offset.differentials['s']) # register and do the transformation and check against expected trans = t.AffineTransform(transfunc, TCoo1, TCoo2) trans.register(frame_transform_graph) c2 = c.transform_to(TCoo2) assert quantity_allclose(c2.data.to_cartesian().xyz, expected_pos.to_cartesian().xyz) if expected_vel is not None: diff = c2.data.differentials['s'].to_cartesian(base=c2.data) assert quantity_allclose(diff.xyz, expected_vel.d_xyz) trans.unregister(frame_transform_graph) # these should fail def transfunc_invalid_matrix(coo, fr): return np.eye(4), None # Leaving this open in case we want to add more functions to check for failures @pytest.mark.parametrize('transfunc', [transfunc_invalid_matrix]) def test_affine_transform_fail(transfunc): diff = r.CartesianDifferential(8, 9, 10, unit=u.pc/u.Myr) rep = r.CartesianRepresentation(5, 6, 7, unit=u.pc, differentials=diff) c = TCoo1(rep) # register and do the transformation and check against expected trans = t.AffineTransform(transfunc, TCoo1, TCoo2) trans.register(frame_transform_graph) with pytest.raises(ValueError): c2 = c.transform_to(TCoo2) trans.unregister(frame_transform_graph) def test_too_many_differentials(): dif1 = r.CartesianDifferential(*np.arange(3, 6)*u.pc/u.Myr) dif2 = r.CartesianDifferential(*np.arange(3, 6)*u.pc/u.Myr**2) rep = r.CartesianRepresentation(np.arange(3)*u.pc, differentials={'s': dif1, 's2': dif2}) with pytest.raises(ValueError): c = TCoo1(rep) # register and do the transformation and check against expected trans = t.AffineTransform(transfunc.both, TCoo1, TCoo2) trans.register(frame_transform_graph) # Check that if frame somehow gets through to transformation, multiple # differentials are caught c = TCoo1(rep.without_differentials()) c._data = c._data.with_differentials({'s': dif1, 's2': dif2}) with pytest.raises(ValueError): c2 = c.transform_to(TCoo2) trans.unregister(frame_transform_graph) # A matrix transform of a unit spherical with differentials should work @pytest.mark.parametrize('rep', [ r.UnitSphericalRepresentation(lon=15*u.degree, lat=-11*u.degree, differentials=r.SphericalDifferential(d_lon=15*u.mas/u.yr, d_lat=11*u.mas/u.yr, d_distance=-110*u.km/u.s)), r.UnitSphericalRepresentation(lon=15*u.degree, lat=-11*u.degree, differentials={'s': r.RadialDifferential(d_distance=-110*u.km/u.s)}), r.SphericalRepresentation(lon=15*u.degree, lat=-11*u.degree, distance=150*u.pc, differentials={'s': r.RadialDifferential(d_distance=-110*u.km/u.s)}) ]) def test_unit_spherical_with_differentials(rep): c = TCoo1(rep) # register and do the transformation and check against expected trans = t.AffineTransform(transfunc.just_matrix, TCoo1, TCoo2) trans.register(frame_transform_graph) c2 = c.transform_to(TCoo2) assert 's' in rep.differentials assert isinstance(c2.data.differentials['s'], rep.differentials['s'].__class__) if isinstance(rep.differentials['s'], r.RadialDifferential): assert c2.data.differentials['s'] is rep.differentials['s'] trans.unregister(frame_transform_graph) # should fail if we have to do offsets trans = t.AffineTransform(transfunc.both, TCoo1, TCoo2) trans.register(frame_transform_graph) with pytest.raises(TypeError): c.transform_to(TCoo2) trans.unregister(frame_transform_graph) @pytest.mark.remote_data def test_vel_transformation_obstime_err(): # TODO: replace after a final decision on PR #6280 from astropy.coordinates.sites import get_builtin_sites diff = r.CartesianDifferential([.1, .2, .3]*u.km/u.s) rep = r.CartesianRepresentation([1, 2, 3]*u.au, differentials=diff) loc = get_builtin_sites()['example_site'] aaf = AltAz(obstime='J2010', location=loc) aaf2 = AltAz(obstime=aaf.obstime + 3*u.day, location=loc) aaf3 = AltAz(obstime=aaf.obstime + np.arange(3)*u.day, location=loc) aaf4 = AltAz(obstime=aaf.obstime, location=loc) aa = aaf.realize_frame(rep) with pytest.raises(NotImplementedError) as exc: aa.transform_to(aaf2) assert 'cannot transform' in exc.value.args[0] with pytest.raises(NotImplementedError) as exc: aa.transform_to(aaf3) assert 'cannot transform' in exc.value.args[0] aa.transform_to(aaf4) aa.transform_to(ICRS()) def test_function_transform_with_differentials(): tfun = lambda c, f: f.__class__(ra=c.ra, dec=c.dec) ftrans = t.FunctionTransform(tfun, TCoo3, TCoo2, register_graph=frame_transform_graph) t3 = TCoo3(ra=1*u.deg, dec=2*u.deg, pm_ra_cosdec=1*u.marcsec/u.yr, pm_dec=1*u.marcsec/u.yr,) with catch_warnings() as w: t2 = t3.transform_to(TCoo2) assert len(w) == 1 assert 'they have been dropped' in str(w[0].message) def test_frame_override_component_with_attribute(): """ It was previously possible to define a frame with an attribute with the same name as a component. We don't want to allow this! """ from astropy.coordinates.baseframe import BaseCoordinateFrame from astropy.coordinates.attributes import Attribute class BorkedFrame(BaseCoordinateFrame): ra = Attribute(default=150) dec = Attribute(default=150) def trans_func(coo1, f): pass trans = t.FunctionTransform(trans_func, BorkedFrame, ICRS) with pytest.raises(ValueError) as exc: trans.register(frame_transform_graph) assert ('BorkedFrame' in exc.value.args[0] and "'ra'" in exc.value.args[0] and "'dec'" in exc.value.args[0]) def test_static_matrix_combine_paths(): """ Check that combined staticmatrixtransform matrices provide the same transformation as using an intermediate transformation. This is somewhat of a regression test for #7706 """ from astropy.coordinates.baseframe import BaseCoordinateFrame from astropy.coordinates.matrix_utilities import rotation_matrix class AFrame(BaseCoordinateFrame): default_representation = r.SphericalRepresentation default_differential = r.SphericalCosLatDifferential t1 = t.StaticMatrixTransform(rotation_matrix(30.*u.deg, 'z'), ICRS, AFrame) t1.register(frame_transform_graph) t2 = t.StaticMatrixTransform(rotation_matrix(30.*u.deg, 'z').T, AFrame, ICRS) t2.register(frame_transform_graph) class BFrame(BaseCoordinateFrame): default_representation = r.SphericalRepresentation default_differential = r.SphericalCosLatDifferential t3 = t.StaticMatrixTransform(rotation_matrix(30.*u.deg, 'x'), ICRS, BFrame) t3.register(frame_transform_graph) t4 = t.StaticMatrixTransform(rotation_matrix(30.*u.deg, 'x').T, BFrame, ICRS) t4.register(frame_transform_graph) c = Galactic(123*u.deg, 45*u.deg) c1 = c.transform_to(BFrame) # direct c2 = c.transform_to(AFrame).transform_to(BFrame) # thru A c3 = c.transform_to(ICRS).transform_to(BFrame) # thru ICRS assert quantity_allclose(c1.lon, c2.lon) assert quantity_allclose(c1.lat, c2.lat) assert quantity_allclose(c1.lon, c3.lon) assert quantity_allclose(c1.lat, c3.lat) for t_ in [t1, t2, t3, t4]: t_.unregister(frame_transform_graph)
2bdff7b8a32f35f9a6729dd235a810476e3424c35bf40edf134d20479df02dd8
# Licensed under a 3-clause BSD style license - see LICENSE.rst import pytest import numpy as np from astropy import units as u from astropy.coordinates import (SphericalRepresentation, Longitude, Latitude, SphericalDifferential) class TestManipulation(): """Manipulation of Representation shapes. Checking that attributes are manipulated correctly. Even more exhaustive tests are done in time.tests.test_methods """ def setup(self): lon = Longitude(np.arange(0, 24, 4), u.hourangle) lat = Latitude(np.arange(-90, 91, 30), u.deg) # With same-sized arrays self.s0 = SphericalRepresentation( lon[:, np.newaxis] * np.ones(lat.shape), lat * np.ones(lon.shape)[:, np.newaxis], np.ones(lon.shape + lat.shape) * u.kpc) self.diff = SphericalDifferential( d_lon=np.ones(self.s0.shape)*u.mas/u.yr, d_lat=np.ones(self.s0.shape)*u.mas/u.yr, d_distance=np.ones(self.s0.shape)*u.km/u.s) self.s0 = self.s0.with_differentials(self.diff) # With unequal arrays -> these will be broadcasted. self.s1 = SphericalRepresentation(lon[:, np.newaxis], lat, 1. * u.kpc, differentials=self.diff) # For completeness on some tests, also a cartesian one self.c0 = self.s0.to_cartesian() def test_ravel(self): s0_ravel = self.s0.ravel() assert type(s0_ravel) is type(self.s0) assert s0_ravel.shape == (self.s0.size,) assert np.all(s0_ravel.lon == self.s0.lon.ravel()) assert np.may_share_memory(s0_ravel.lon, self.s0.lon) assert np.may_share_memory(s0_ravel.lat, self.s0.lat) assert np.may_share_memory(s0_ravel.distance, self.s0.distance) assert s0_ravel.differentials['s'].shape == (self.s0.size,) # Since s1 was broadcast, ravel needs to make a copy. s1_ravel = self.s1.ravel() assert type(s1_ravel) is type(self.s1) assert s1_ravel.shape == (self.s1.size,) assert s1_ravel.differentials['s'].shape == (self.s1.size,) assert np.all(s1_ravel.lon == self.s1.lon.ravel()) assert not np.may_share_memory(s1_ravel.lat, self.s1.lat) def test_copy(self): s0_copy = self.s0.copy() s0_copy_diff = s0_copy.differentials['s'] assert s0_copy.shape == self.s0.shape assert np.all(s0_copy.lon == self.s0.lon) assert np.all(s0_copy.lat == self.s0.lat) # Check copy was made of internal data. assert not np.may_share_memory(s0_copy.distance, self.s0.distance) assert not np.may_share_memory(s0_copy_diff.d_lon, self.diff.d_lon) def test_flatten(self): s0_flatten = self.s0.flatten() s0_diff = s0_flatten.differentials['s'] assert s0_flatten.shape == (self.s0.size,) assert s0_diff.shape == (self.s0.size,) assert np.all(s0_flatten.lon == self.s0.lon.flatten()) assert np.all(s0_diff.d_lon == self.diff.d_lon.flatten()) # Flatten always copies. assert not np.may_share_memory(s0_flatten.distance, self.s0.distance) assert not np.may_share_memory(s0_diff.d_lon, self.diff.d_lon) s1_flatten = self.s1.flatten() assert s1_flatten.shape == (self.s1.size,) assert np.all(s1_flatten.lon == self.s1.lon.flatten()) assert not np.may_share_memory(s1_flatten.lat, self.s1.lat) def test_transpose(self): s0_transpose = self.s0.transpose() s0_diff = s0_transpose.differentials['s'] assert s0_transpose.shape == (7, 6) assert s0_diff.shape == s0_transpose.shape assert np.all(s0_transpose.lon == self.s0.lon.transpose()) assert np.all(s0_diff.d_lon == self.diff.d_lon.transpose()) assert np.may_share_memory(s0_transpose.distance, self.s0.distance) assert np.may_share_memory(s0_diff.d_lon, self.diff.d_lon) s1_transpose = self.s1.transpose() s1_diff = s1_transpose.differentials['s'] assert s1_transpose.shape == (7, 6) assert s1_diff.shape == s1_transpose.shape assert np.all(s1_transpose.lat == self.s1.lat.transpose()) assert np.all(s1_diff.d_lon == self.diff.d_lon.transpose()) assert np.may_share_memory(s1_transpose.lat, self.s1.lat) assert np.may_share_memory(s1_diff.d_lon, self.diff.d_lon) # Only one check on T, since it just calls transpose anyway. # Doing it on the CartesianRepresentation just for variety's sake. c0_T = self.c0.T assert c0_T.shape == (7, 6) assert np.all(c0_T.x == self.c0.x.T) assert np.may_share_memory(c0_T.y, self.c0.y) def test_diagonal(self): s0_diagonal = self.s0.diagonal() s0_diff = s0_diagonal.differentials['s'] assert s0_diagonal.shape == (6,) assert s0_diff.shape == s0_diagonal.shape assert np.all(s0_diagonal.lat == self.s0.lat.diagonal()) assert np.all(s0_diff.d_lon == self.diff.d_lon.diagonal()) assert np.may_share_memory(s0_diagonal.lat, self.s0.lat) assert np.may_share_memory(s0_diff.d_lon, self.diff.d_lon) def test_swapaxes(self): s1_swapaxes = self.s1.swapaxes(0, 1) s1_diff = s1_swapaxes.differentials['s'] assert s1_swapaxes.shape == (7, 6) assert s1_diff.shape == s1_swapaxes.shape assert np.all(s1_swapaxes.lat == self.s1.lat.swapaxes(0, 1)) assert np.all(s1_diff.d_lon == self.diff.d_lon.swapaxes(0, 1)) assert np.may_share_memory(s1_swapaxes.lat, self.s1.lat) assert np.may_share_memory(s1_diff.d_lon, self.diff.d_lon) def test_reshape(self): s0_reshape = self.s0.reshape(2, 3, 7) s0_diff = s0_reshape.differentials['s'] assert s0_reshape.shape == (2, 3, 7) assert s0_diff.shape == s0_reshape.shape assert np.all(s0_reshape.lon == self.s0.lon.reshape(2, 3, 7)) assert np.all(s0_reshape.lat == self.s0.lat.reshape(2, 3, 7)) assert np.all(s0_reshape.distance == self.s0.distance.reshape(2, 3, 7)) assert np.may_share_memory(s0_reshape.lon, self.s0.lon) assert np.may_share_memory(s0_reshape.lat, self.s0.lat) assert np.may_share_memory(s0_reshape.distance, self.s0.distance) s1_reshape = self.s1.reshape(3, 2, 7) s1_diff = s1_reshape.differentials['s'] assert s1_reshape.shape == (3, 2, 7) assert s1_diff.shape == s1_reshape.shape assert np.all(s1_reshape.lat == self.s1.lat.reshape(3, 2, 7)) assert np.all(s1_diff.d_lon == self.diff.d_lon.reshape(3, 2, 7)) assert np.may_share_memory(s1_reshape.lat, self.s1.lat) assert np.may_share_memory(s1_diff.d_lon, self.diff.d_lon) # For reshape(3, 14), copying is necessary for lon, lat, but not for d s1_reshape2 = self.s1.reshape(3, 14) assert s1_reshape2.shape == (3, 14) assert np.all(s1_reshape2.lon == self.s1.lon.reshape(3, 14)) assert not np.may_share_memory(s1_reshape2.lon, self.s1.lon) assert s1_reshape2.distance.shape == (3, 14) assert np.may_share_memory(s1_reshape2.distance, self.s1.distance) def test_shape_setting(self): # Shape-setting should be on the object itself, since copying removes # zero-strides due to broadcasting. We reset the objects at the end. self.s0.shape = (2, 3, 7) assert self.s0.shape == (2, 3, 7) assert self.s0.lon.shape == (2, 3, 7) assert self.s0.lat.shape == (2, 3, 7) assert self.s0.distance.shape == (2, 3, 7) assert self.diff.shape == (2, 3, 7) assert self.diff.d_lon.shape == (2, 3, 7) assert self.diff.d_lat.shape == (2, 3, 7) assert self.diff.d_distance.shape == (2, 3, 7) # this works with the broadcasting. self.s1.shape = (2, 3, 7) assert self.s1.shape == (2, 3, 7) assert self.s1.lon.shape == (2, 3, 7) assert self.s1.lat.shape == (2, 3, 7) assert self.s1.distance.shape == (2, 3, 7) assert self.s1.distance.strides == (0, 0, 0) # but this one does not. oldshape = self.s1.shape with pytest.raises(AttributeError): self.s1.shape = (42,) assert self.s1.shape == oldshape assert self.s1.lon.shape == oldshape assert self.s1.lat.shape == oldshape assert self.s1.distance.shape == oldshape # Finally, a more complicated one that checks that things get reset # properly if it is not the first component that fails. s2 = SphericalRepresentation(self.s1.lon.copy(), self.s1.lat, self.s1.distance, copy=False) assert 0 not in s2.lon.strides assert 0 in s2.lat.strides with pytest.raises(AttributeError): s2.shape = (42,) assert s2.shape == oldshape assert s2.lon.shape == oldshape assert s2.lat.shape == oldshape assert s2.distance.shape == oldshape assert 0 not in s2.lon.strides assert 0 in s2.lat.strides self.setup() def test_squeeze(self): s0_squeeze = self.s0.reshape(3, 1, 2, 1, 7).squeeze() s0_diff = s0_squeeze.differentials['s'] assert s0_squeeze.shape == (3, 2, 7) assert s0_diff.shape == s0_squeeze.shape assert np.all(s0_squeeze.lat == self.s0.lat.reshape(3, 2, 7)) assert np.all(s0_diff.d_lon == self.diff.d_lon.reshape(3, 2, 7)) assert np.may_share_memory(s0_squeeze.lat, self.s0.lat) def test_add_dimension(self): s0_adddim = self.s0[:, np.newaxis, :] s0_diff = s0_adddim.differentials['s'] assert s0_adddim.shape == (6, 1, 7) assert s0_diff.shape == s0_adddim.shape assert np.all(s0_adddim.lon == self.s0.lon[:, np.newaxis, :]) assert np.all(s0_diff.d_lon == self.diff.d_lon[:, np.newaxis, :]) assert np.may_share_memory(s0_adddim.lat, self.s0.lat) def test_take(self): s0_take = self.s0.take((5, 2)) s0_diff = s0_take.differentials['s'] assert s0_take.shape == (2,) assert s0_diff.shape == s0_take.shape assert np.all(s0_take.lon == self.s0.lon.take((5, 2))) assert np.all(s0_diff.d_lon == self.diff.d_lon.take((5, 2))) def test_broadcast_to(self): s0_broadcast = self.s0._apply(np.broadcast_to, (3, 6, 7), subok=True) s0_diff = s0_broadcast.differentials['s'] assert type(s0_broadcast) is type(self.s0) assert s0_broadcast.shape == (3, 6, 7) assert s0_diff.shape == s0_broadcast.shape assert np.all(s0_broadcast.lon == self.s0.lon) assert np.all(s0_broadcast.lat == self.s0.lat) assert np.all(s0_broadcast.distance == self.s0.distance) assert np.may_share_memory(s0_broadcast.lon, self.s0.lon) assert np.may_share_memory(s0_broadcast.lat, self.s0.lat) assert np.may_share_memory(s0_broadcast.distance, self.s0.distance) s1_broadcast = self.s1._apply(np.broadcast_to, shape=(3, 6, 7), subok=True) s1_diff = s1_broadcast.differentials['s'] assert s1_broadcast.shape == (3, 6, 7) assert s1_diff.shape == s1_broadcast.shape assert np.all(s1_broadcast.lat == self.s1.lat) assert np.all(s1_broadcast.lon == self.s1.lon) assert np.all(s1_broadcast.distance == self.s1.distance) assert s1_broadcast.distance.shape == (3, 6, 7) assert np.may_share_memory(s1_broadcast.lat, self.s1.lat) assert np.may_share_memory(s1_broadcast.lon, self.s1.lon) assert np.may_share_memory(s1_broadcast.distance, self.s1.distance) # A final test that "may_share_memory" equals "does_share_memory" # Do this on a copy, to keep self.s0 unchanged. sc = self.s0.copy() assert not np.may_share_memory(sc.lon, self.s0.lon) assert not np.may_share_memory(sc.lat, self.s0.lat) sc_broadcast = sc._apply(np.broadcast_to, (3, 6, 7), subok=True) assert np.may_share_memory(sc_broadcast.lon, sc.lon) # Can only write to copy, not to broadcast version. sc.lon[0, 0] = 22. * u.hourangle assert np.all(sc_broadcast.lon[:, 0, 0] == 22. * u.hourangle)
7566c933a11f5cba0d5bb7d9f73697f69886d9b821e3a40c596b96469e7461d6
import pytest import numpy as np from astropy.tests.helper import assert_quantity_allclose from astropy import units as u from astropy.time import Time from astropy.coordinates import EarthLocation, SkyCoord, Angle from astropy.coordinates.sites import get_builtin_sites @pytest.mark.remote_data @pytest.mark.parametrize('kind', ['heliocentric', 'barycentric']) def test_basic(kind): t0 = Time('2015-1-1') loc = get_builtin_sites()['example_site'] sc = SkyCoord(0, 0, unit=u.deg, obstime=t0, location=loc) rvc0 = sc.radial_velocity_correction(kind) assert rvc0.shape == () assert rvc0.unit.is_equivalent(u.km/u.s) scs = SkyCoord(0, 0, unit=u.deg, obstime=t0 + np.arange(10)*u.day, location=loc) rvcs = scs.radial_velocity_correction(kind) assert rvcs.shape == (10,) assert rvcs.unit.is_equivalent(u.km/u.s) test_input_time = Time(2457244.5, format='jd') # test_input_loc = EarthLocation.of_site('Cerro Paranal') # to avoid the network hit we just copy here what that yields test_input_loc = EarthLocation.from_geodetic(lon=-70.403*u.deg, lat=-24.6252*u.deg, height=2635*u.m) @pytest.mark.remote_data def test_helio_iraf(): """ Compare the heliocentric correction to the IRAF rvcorrect. `generate_IRAF_input` function is provided to show how the comparison data was produced """ # this is based on running IRAF with the output of `generate_IRAF_input` below rvcorr_result = """ # RVCORRECT: Observatory parameters for European Southern Observatory: Paranal # latitude = -24:37.5 # longitude = 70:24.2 # altitude = 2635 ## HJD VOBS VHELIO VLSR VDIURNAL VLUNAR VANNUAL VSOLAR 2457244.50120 0.00 -10.36 -20.35 -0.034 -0.001 -10.325 -9.993 2457244.50025 0.00 -14.20 -23.86 -0.115 -0.004 -14.085 -9.656 2457244.50278 0.00 -2.29 -11.75 0.115 0.004 -2.413 -9.459 2457244.50025 0.00 -14.20 -23.86 -0.115 -0.004 -14.085 -9.656 2457244.49929 0.00 -17.41 -26.30 -0.192 -0.006 -17.214 -8.888 2457244.50317 0.00 -17.19 -17.44 0.078 0.001 -17.269 -0.253 2457244.50348 0.00 2.35 -6.21 0.192 0.006 2.156 -8.560 2457244.49959 0.00 2.13 -15.06 -0.078 -0.000 2.211 -17.194 2457244.49929 0.00 -17.41 -26.30 -0.192 -0.006 -17.214 -8.888 2457244.49835 0.00 -19.84 -27.56 -0.259 -0.008 -19.573 -7.721 2457244.50186 0.00 -24.47 -22.16 -0.038 -0.004 -24.433 2.313 2457244.50470 0.00 -11.11 -8.57 0.221 0.005 -11.332 2.534 2457244.50402 0.00 6.90 -0.38 0.259 0.008 6.629 -7.277 2457244.50051 0.00 11.53 -5.78 0.038 0.004 11.489 -17.311 2457244.49768 0.00 -1.84 -19.37 -0.221 -0.004 -1.612 -17.533 2457244.49835 0.00 -19.84 -27.56 -0.259 -0.008 -19.573 -7.721 2457244.49749 0.00 -21.38 -27.59 -0.315 -0.010 -21.056 -6.209 2457244.50109 0.00 -27.69 -22.90 -0.096 -0.006 -27.584 4.785 2457244.50457 0.00 -17.00 -9.30 0.196 0.003 -17.201 7.704 2457244.50532 0.00 2.62 2.97 0.340 0.009 2.276 0.349 2457244.50277 0.00 16.42 4.67 0.228 0.009 16.178 -11.741 2457244.49884 0.00 13.98 -5.48 -0.056 0.002 14.039 -19.463 2457244.49649 0.00 -2.84 -19.84 -0.297 -0.007 -2.533 -17.000 2457244.49749 0.00 -21.38 -27.59 -0.315 -0.010 -21.056 -6.209 2457244.49675 0.00 -21.97 -26.39 -0.357 -0.011 -21.598 -4.419 2457244.50025 0.00 -29.30 -22.47 -0.149 -0.008 -29.146 6.831 2457244.50398 0.00 -21.55 -9.88 0.146 0.001 -21.700 11.670 2457244.50577 0.00 -3.26 4.00 0.356 0.009 -3.623 7.263 2457244.50456 0.00 14.87 11.06 0.357 0.011 14.497 -3.808 2457244.50106 0.00 22.20 7.14 0.149 0.008 22.045 -15.058 2457244.49732 0.00 14.45 -5.44 -0.146 -0.001 14.600 -19.897 2457244.49554 0.00 -3.84 -19.33 -0.356 -0.008 -3.478 -15.491 2457244.49675 0.00 -21.97 -26.39 -0.357 -0.011 -21.598 -4.419 2457244.49615 0.00 -21.57 -24.00 -0.383 -0.012 -21.172 -2.432 2457244.49942 0.00 -29.36 -20.83 -0.193 -0.009 -29.157 8.527 2457244.50312 0.00 -24.26 -9.75 0.088 -0.001 -24.348 14.511 2457244.50552 0.00 -8.66 4.06 0.327 0.007 -8.996 12.721 2457244.50549 0.00 10.14 14.13 0.413 0.012 9.715 3.994 2457244.50305 0.00 23.35 15.76 0.306 0.011 23.031 -7.586 2457244.49933 0.00 24.78 8.18 0.056 0.006 24.721 -16.601 2457244.49609 0.00 13.77 -5.06 -0.221 -0.003 13.994 -18.832 2457244.49483 0.00 -4.53 -17.77 -0.394 -0.010 -4.131 -13.237 2457244.49615 0.00 -21.57 -24.00 -0.383 -0.012 -21.172 -2.432 2457244.49572 0.00 -20.20 -20.54 -0.392 -0.013 -19.799 -0.335 2457244.49907 0.00 -28.17 -17.30 -0.197 -0.009 -27.966 10.874 2457244.50285 0.00 -22.96 -5.96 0.090 -0.001 -23.048 16.995 2457244.50531 0.00 -7.00 8.16 0.335 0.007 -7.345 15.164 2457244.50528 0.00 12.23 18.47 0.423 0.012 11.795 6.238 2457244.50278 0.00 25.74 20.13 0.313 0.012 25.416 -5.607 2457244.49898 0.00 27.21 12.38 0.057 0.006 27.144 -14.829 2457244.49566 0.00 15.94 -1.17 -0.226 -0.003 16.172 -17.111 2457244.49437 0.00 -2.78 -14.17 -0.403 -0.010 -2.368 -11.387 2457244.49572 0.00 -20.20 -20.54 -0.392 -0.013 -19.799 -0.335 2457244.49548 0.00 -17.94 -16.16 -0.383 -0.012 -17.541 1.776 2457244.49875 0.00 -25.73 -12.99 -0.193 -0.009 -25.525 12.734 2457244.50246 0.00 -20.63 -1.91 0.088 -0.001 -20.716 18.719 2457244.50485 0.00 -5.03 11.90 0.327 0.007 -5.365 16.928 2457244.50482 0.00 13.77 21.97 0.413 0.012 13.347 8.202 2457244.50238 0.00 26.98 23.60 0.306 0.011 26.663 -3.378 2457244.49867 0.00 28.41 16.02 0.056 0.005 28.353 -12.393 2457244.49542 0.00 17.40 2.78 -0.221 -0.003 17.625 -14.625 2457244.49416 0.00 -0.90 -9.93 -0.394 -0.010 -0.499 -9.029 2457244.49548 0.00 -17.94 -16.16 -0.383 -0.012 -17.541 1.776 2457244.49544 0.00 -14.87 -11.06 -0.357 -0.011 -14.497 3.808 2457244.49894 0.00 -22.20 -7.14 -0.149 -0.008 -22.045 15.058 2457244.50268 0.00 -14.45 5.44 0.146 0.001 -14.600 19.897 2457244.50446 0.00 3.84 19.33 0.356 0.008 3.478 15.491 2457244.50325 0.00 21.97 26.39 0.357 0.011 21.598 4.419 2457244.49975 0.00 29.30 22.47 0.149 0.008 29.146 -6.831 2457244.49602 0.00 21.55 9.88 -0.146 -0.001 21.700 -11.670 2457244.49423 0.00 3.26 -4.00 -0.356 -0.009 3.623 -7.263 2457244.49544 0.00 -14.87 -11.06 -0.357 -0.011 -14.497 3.808 2457244.49561 0.00 -11.13 -5.46 -0.315 -0.010 -10.805 5.670 2457244.49921 0.00 -17.43 -0.77 -0.096 -0.006 -17.333 16.664 2457244.50269 0.00 -6.75 12.83 0.196 0.003 -6.949 19.583 2457244.50344 0.00 12.88 25.10 0.340 0.009 12.527 12.227 2457244.50089 0.00 26.67 26.80 0.228 0.009 26.430 0.137 2457244.49696 0.00 24.24 16.65 -0.056 0.002 24.290 -7.584 2457244.49461 0.00 7.42 2.29 -0.297 -0.007 7.719 -5.122 2457244.49561 0.00 -11.13 -5.46 -0.315 -0.010 -10.805 5.670 2457244.49598 0.00 -6.90 0.38 -0.259 -0.008 -6.629 7.277 2457244.49949 0.00 -11.53 5.78 -0.038 -0.004 -11.489 17.311 2457244.50232 0.00 1.84 19.37 0.221 0.004 1.612 17.533 2457244.50165 0.00 19.84 27.56 0.259 0.008 19.573 7.721 2457244.49814 0.00 24.47 22.16 0.038 0.004 24.433 -2.313 2457244.49530 0.00 11.11 8.57 -0.221 -0.005 11.332 -2.534 2457244.49598 0.00 -6.90 0.38 -0.259 -0.008 -6.629 7.277 2457244.49652 0.00 -2.35 6.21 -0.192 -0.006 -2.156 8.560 2457244.50041 0.00 -2.13 15.06 0.078 0.000 -2.211 17.194 2457244.50071 0.00 17.41 26.30 0.192 0.006 17.214 8.888 2457244.49683 0.00 17.19 17.44 -0.078 -0.001 17.269 0.253 2457244.49652 0.00 -2.35 6.21 -0.192 -0.006 -2.156 8.560 2457244.49722 0.00 2.29 11.75 -0.115 -0.004 2.413 9.459 2457244.49975 0.00 14.20 23.86 0.115 0.004 14.085 9.656 2457244.49722 0.00 2.29 11.75 -0.115 -0.004 2.413 9.459 2457244.49805 0.00 6.84 16.77 -0.034 -0.001 6.874 9.935 """ vhs_iraf = [] for line in rvcorr_result.strip().split('\n'): if not line.strip().startswith('#'): vhs_iraf.append(float(line.split()[2])) vhs_iraf = vhs_iraf*u.km/u.s targets = SkyCoord(_get_test_input_radecs(), obstime=test_input_time, location=test_input_loc) vhs_astropy = targets.radial_velocity_correction('heliocentric') assert_quantity_allclose(vhs_astropy, vhs_iraf, atol=150*u.m/u.s) return vhs_astropy, vhs_iraf # for interactively examination def generate_IRAF_input(writefn=None): dt = test_input_time.utc.datetime coos = _get_test_input_radecs() lines = [] for ra, dec in zip(coos.ra, coos.dec): rastr = Angle(ra).to_string(u.hour, sep=':') decstr = Angle(dec).to_string(u.deg, sep=':') msg = '{yr} {mo} {day} {uth}:{utmin} {ra} {dec}' lines.append(msg.format(yr=dt.year, mo=dt.month, day=dt.day, uth=dt.hour, utmin=dt.minute, ra=rastr, dec=decstr)) if writefn: with open(writefn, 'w') as f: for l in lines: f.write(l) else: for l in lines: print(l) print('Run IRAF as:\nastutil\nrvcorrect f=<filename> observatory=Paranal') def _get_test_input_radecs(): ras = [] decs = [] for dec in np.linspace(-85, 85, 15): nra = int(np.round(10*np.cos(dec*u.deg)).value) ras1 = np.linspace(-180, 180-1e-6, nra) ras.extend(ras1) decs.extend([dec]*len(ras1)) return SkyCoord(ra=ras, dec=decs, unit=u.deg) @pytest.mark.remote_data def test_barycorr(): # this is the result of calling _get_barycorr_bvcs barycorr_bvcs = u.Quantity([ -10335.93326096, -14198.47605491, -2237.60012494, -14198.47595363, -17425.46512587, -17131.70901174, 2424.37095076, 2130.61519166, -17425.46495779, -19872.50026998, -24442.37091097, -11017.08975893, 6978.0622355, 11547.93333743, -1877.34772637, -19872.50004258, -21430.08240017, -27669.14280689, -16917.08506807, 2729.57222968, 16476.49569232, 13971.97171764, -2898.04250914, -21430.08212368, -22028.51337105, -29301.92349394, -21481.13036199, -3147.44828909, 14959.50065514, 22232.91155425, 14412.11903105, -3921.56359768, -22028.51305781, -21641.01479409, -29373.0512649, -24205.90521765, -8557.34138828, 10250.50350732, 23417.2299926, 24781.98057941, 13706.17339044, -4627.70005932, -21641.01445812, -20284.92627505, -28193.91696959, -22908.51624166, -6901.82132125, 12336.45758056, 25804.51614607, 27200.50029664, 15871.21385688, -2882.24738355, -20284.9259314, -18020.92947805, -25752.96564978, -20585.81957567, -4937.25573801, 13870.58916957, 27037.31568441, 28402.06636994, 17326.25977035, -1007.62209045, -18020.92914212, -14950.33284575, -22223.74260839, -14402.94943965, 3930.73265119, 22037.68163353, 29311.09265126, 21490.30070307, 3156.62229843, -14950.33253252, -11210.53846867, -17449.59867676, -6697.54090389, 12949.11642965, 26696.03999586, 24191.5164355, 7321.50355488, -11210.53819218, -6968.89359681, -11538.76423011, 1886.51695238, 19881.66902396, 24451.54039956, 11026.26000765, -6968.89336945, -2415.20201758, -2121.44599781, 17434.63406085, 17140.87871753, -2415.2018495, 2246.76923076, 14207.64513054, 2246.76933194, 6808.40787728], u.m/u.s) # this tries the *other* way of calling radial_velocity_correction relative # to the IRAF tests targets = _get_test_input_radecs() bvcs_astropy = targets.radial_velocity_correction(obstime=test_input_time, location=test_input_loc, kind='barycentric') assert_quantity_allclose(bvcs_astropy, barycorr_bvcs, atol=10*u.mm/u.s) return bvcs_astropy, barycorr_bvcs # for interactively examination def _get_barycorr_bvcs(coos, loc, injupyter=False): """ Gets the barycentric correction of the test data from the http://astroutils.astronomy.ohio-state.edu/exofast/barycorr.html web site. Requires the https://github.com/tronsgaard/barycorr python interface to that site. Provided to reproduce the test data above, but not required to actually run the tests. """ import barycorr from astropy.utils.console import ProgressBar bvcs = [] for ra, dec in ProgressBar(list(zip(coos.ra.deg, coos.dec.deg)), ipython_widget=injupyter): res = barycorr.bvc(test_input_time.utc.jd, ra, dec, lat=loc.geodetic[1].deg, lon=loc.geodetic[0].deg, elevation=loc.geodetic[2].to(u.m).value) bvcs.append(res) return bvcs*u.m/u.s @pytest.mark.remote_data def test_rvcorr_multiple_obstimes_onskycoord(): loc = EarthLocation(-2309223 * u.m, -3695529 * u.m, -4641767 * u.m) arrtime = Time('2005-03-21 00:00:00') + np.linspace(-1, 1, 10)*u.day sc = SkyCoord(1*u.deg, 2*u.deg, 100*u.kpc, obstime=arrtime, location=loc) rvcbary_sc2 = sc.radial_velocity_correction(kind='barycentric') assert len(rvcbary_sc2) == 10 # check the multiple-obstime and multi- mode sc = SkyCoord(([1]*10)*u.deg, 2*u.deg, 100*u.kpc, obstime=arrtime, location=loc) rvcbary_sc3 = sc.radial_velocity_correction(kind='barycentric') assert len(rvcbary_sc3) == 10 @pytest.mark.remote_data def test_invalid_argument_combos(): loc = EarthLocation(-2309223 * u.m, -3695529 * u.m, -4641767 * u.m) time = Time('2005-03-21 00:00:00') timel = Time('2005-03-21 00:00:00', location=loc) scwattrs = SkyCoord(1*u.deg, 2*u.deg, obstime=time, location=loc) scwoattrs = SkyCoord(1*u.deg, 2*u.deg) scwattrs.radial_velocity_correction() with pytest.raises(ValueError): scwattrs.radial_velocity_correction(obstime=time, location=loc) with pytest.raises(TypeError): scwoattrs.radial_velocity_correction(obstime=time) scwoattrs.radial_velocity_correction(obstime=time, location=loc) with pytest.raises(TypeError): scwoattrs.radial_velocity_correction() with pytest.raises(ValueError): scwattrs.radial_velocity_correction(timel)
54070e3741e30ed250d0f6a59dca7557c1a7dfdce23bebdf45668bce7ec47840
# Licensed under a 3-clause BSD style license - see LICENSE.rst import numpy as np import pytest from astropy import units as u from astropy.coordinates import Longitude, Latitude, EarthLocation, SkyCoord # test on frame with most complicated frame attributes. from astropy.coordinates.builtin_frames import ICRS, AltAz, GCRS from astropy.time import Time @pytest.mark.remote_data class TestManipulation(): """Manipulation of Frame shapes. Checking that attributes are manipulated correctly. Even more exhaustive tests are done in time.tests.test_methods """ def setup(self): lon = Longitude(np.arange(0, 24, 4), u.hourangle) lat = Latitude(np.arange(-90, 91, 30), u.deg) # With same-sized arrays, no attributes self.s0 = ICRS(lon[:, np.newaxis] * np.ones(lat.shape), lat * np.ones(lon.shape)[:, np.newaxis]) # Make an AltAz frame since that has many types of attributes. # Match one axis with times. self.obstime = (Time('2012-01-01') + np.arange(len(lon))[:, np.newaxis] * u.s) # And another with location. self.location = EarthLocation(20.*u.deg, lat, 100*u.m) # Ensure we have a quantity scalar. self.pressure = 1000 * u.hPa # As well as an array. self.temperature = np.random.uniform( 0., 20., size=(lon.size, lat.size)) * u.deg_C self.s1 = AltAz(az=lon[:, np.newaxis], alt=lat, obstime=self.obstime, location=self.location, pressure=self.pressure, temperature=self.temperature) # For some tests, also try a GCRS, since that has representation # attributes. We match the second dimension (via the location) self.obsgeoloc, self.obsgeovel = self.location.get_gcrs_posvel( self.obstime[0, 0]) self.s2 = GCRS(ra=lon[:, np.newaxis], dec=lat, obstime=self.obstime, obsgeoloc=self.obsgeoloc, obsgeovel=self.obsgeovel) # For completeness, also some tests on an empty frame. self.s3 = GCRS(obstime=self.obstime, obsgeoloc=self.obsgeoloc, obsgeovel=self.obsgeovel) # And make a SkyCoord self.sc = SkyCoord(ra=lon[:, np.newaxis], dec=lat, frame=self.s3) def test_ravel(self): s0_ravel = self.s0.ravel() assert s0_ravel.shape == (self.s0.size,) assert np.all(s0_ravel.data.lon == self.s0.data.lon.ravel()) assert np.may_share_memory(s0_ravel.data.lon, self.s0.data.lon) assert np.may_share_memory(s0_ravel.data.lat, self.s0.data.lat) # Since s1 lon, lat were broadcast, ravel needs to make a copy. s1_ravel = self.s1.ravel() assert s1_ravel.shape == (self.s1.size,) assert np.all(s1_ravel.data.lon == self.s1.data.lon.ravel()) assert not np.may_share_memory(s1_ravel.data.lat, self.s1.data.lat) assert np.all(s1_ravel.obstime == self.s1.obstime.ravel()) assert not np.may_share_memory(s1_ravel.obstime.jd1, self.s1.obstime.jd1) assert np.all(s1_ravel.location == self.s1.location.ravel()) assert not np.may_share_memory(s1_ravel.location, self.s1.location) assert np.all(s1_ravel.temperature == self.s1.temperature.ravel()) assert np.may_share_memory(s1_ravel.temperature, self.s1.temperature) assert s1_ravel.pressure == self.s1.pressure s2_ravel = self.s2.ravel() assert s2_ravel.shape == (self.s2.size,) assert np.all(s2_ravel.data.lon == self.s2.data.lon.ravel()) assert not np.may_share_memory(s2_ravel.data.lat, self.s2.data.lat) assert np.all(s2_ravel.obstime == self.s2.obstime.ravel()) assert not np.may_share_memory(s2_ravel.obstime.jd1, self.s2.obstime.jd1) # CartesianRepresentation do not allow direct comparisons, as this is # too tricky to get right in the face of rounding issues. Here, though, # it cannot be an issue, so we compare the xyz quantities. assert np.all(s2_ravel.obsgeoloc.xyz == self.s2.obsgeoloc.ravel().xyz) assert not np.may_share_memory(s2_ravel.obsgeoloc.x, self.s2.obsgeoloc.x) s3_ravel = self.s3.ravel() assert s3_ravel.shape == (42,) # cannot use .size on frame w/o data. assert np.all(s3_ravel.obstime == self.s3.obstime.ravel()) assert not np.may_share_memory(s3_ravel.obstime.jd1, self.s3.obstime.jd1) assert np.all(s3_ravel.obsgeoloc.xyz == self.s3.obsgeoloc.ravel().xyz) assert not np.may_share_memory(s3_ravel.obsgeoloc.x, self.s3.obsgeoloc.x) sc_ravel = self.sc.ravel() assert sc_ravel.shape == (self.sc.size,) assert np.all(sc_ravel.data.lon == self.sc.data.lon.ravel()) assert not np.may_share_memory(sc_ravel.data.lat, self.sc.data.lat) assert np.all(sc_ravel.obstime == self.sc.obstime.ravel()) assert not np.may_share_memory(sc_ravel.obstime.jd1, self.sc.obstime.jd1) assert np.all(sc_ravel.obsgeoloc.xyz == self.sc.obsgeoloc.ravel().xyz) assert not np.may_share_memory(sc_ravel.obsgeoloc.x, self.sc.obsgeoloc.x) def test_flatten(self): s0_flatten = self.s0.flatten() assert s0_flatten.shape == (self.s0.size,) assert np.all(s0_flatten.data.lon == self.s0.data.lon.flatten()) # Flatten always copies. assert not np.may_share_memory(s0_flatten.data.lat, self.s0.data.lat) s1_flatten = self.s1.flatten() assert s1_flatten.shape == (self.s1.size,) assert np.all(s1_flatten.data.lat == self.s1.data.lat.flatten()) assert not np.may_share_memory(s1_flatten.data.lon, self.s1.data.lat) assert np.all(s1_flatten.obstime == self.s1.obstime.flatten()) assert not np.may_share_memory(s1_flatten.obstime.jd1, self.s1.obstime.jd1) assert np.all(s1_flatten.location == self.s1.location.flatten()) assert not np.may_share_memory(s1_flatten.location, self.s1.location) assert np.all(s1_flatten.temperature == self.s1.temperature.flatten()) assert not np.may_share_memory(s1_flatten.temperature, self.s1.temperature) assert s1_flatten.pressure == self.s1.pressure def test_transpose(self): s0_transpose = self.s0.transpose() assert s0_transpose.shape == (7, 6) assert np.all(s0_transpose.data.lon == self.s0.data.lon.transpose()) assert np.may_share_memory(s0_transpose.data.lat, self.s0.data.lat) s1_transpose = self.s1.transpose() assert s1_transpose.shape == (7, 6) assert np.all(s1_transpose.data.lat == self.s1.data.lat.transpose()) assert np.may_share_memory(s1_transpose.data.lon, self.s1.data.lon) assert np.all(s1_transpose.obstime == self.s1.obstime.transpose()) assert np.may_share_memory(s1_transpose.obstime.jd1, self.s1.obstime.jd1) assert np.all(s1_transpose.location == self.s1.location.transpose()) assert np.may_share_memory(s1_transpose.location, self.s1.location) assert np.all(s1_transpose.temperature == self.s1.temperature.transpose()) assert np.may_share_memory(s1_transpose.temperature, self.s1.temperature) assert s1_transpose.pressure == self.s1.pressure # Only one check on T, since it just calls transpose anyway. s1_T = self.s1.T assert s1_T.shape == (7, 6) assert np.all(s1_T.temperature == self.s1.temperature.T) assert np.may_share_memory(s1_T.location, self.s1.location) def test_diagonal(self): s0_diagonal = self.s0.diagonal() assert s0_diagonal.shape == (6,) assert np.all(s0_diagonal.data.lat == self.s0.data.lat.diagonal()) assert np.may_share_memory(s0_diagonal.data.lat, self.s0.data.lat) def test_swapaxes(self): s1_swapaxes = self.s1.swapaxes(0, 1) assert s1_swapaxes.shape == (7, 6) assert np.all(s1_swapaxes.data.lat == self.s1.data.lat.swapaxes(0, 1)) assert np.may_share_memory(s1_swapaxes.data.lat, self.s1.data.lat) assert np.all(s1_swapaxes.obstime == self.s1.obstime.swapaxes(0, 1)) assert np.may_share_memory(s1_swapaxes.obstime.jd1, self.s1.obstime.jd1) assert np.all(s1_swapaxes.location == self.s1.location.swapaxes(0, 1)) assert s1_swapaxes.location.shape == (7, 6) assert np.may_share_memory(s1_swapaxes.location, self.s1.location) assert np.all(s1_swapaxes.temperature == self.s1.temperature.swapaxes(0, 1)) assert np.may_share_memory(s1_swapaxes.temperature, self.s1.temperature) assert s1_swapaxes.pressure == self.s1.pressure def test_reshape(self): s0_reshape = self.s0.reshape(2, 3, 7) assert s0_reshape.shape == (2, 3, 7) assert np.all(s0_reshape.data.lon == self.s0.data.lon.reshape(2, 3, 7)) assert np.all(s0_reshape.data.lat == self.s0.data.lat.reshape(2, 3, 7)) assert np.may_share_memory(s0_reshape.data.lon, self.s0.data.lon) assert np.may_share_memory(s0_reshape.data.lat, self.s0.data.lat) s1_reshape = self.s1.reshape(3, 2, 7) assert s1_reshape.shape == (3, 2, 7) assert np.all(s1_reshape.data.lat == self.s1.data.lat.reshape(3, 2, 7)) assert np.may_share_memory(s1_reshape.data.lat, self.s1.data.lat) assert np.all(s1_reshape.obstime == self.s1.obstime.reshape(3, 2, 7)) assert np.may_share_memory(s1_reshape.obstime.jd1, self.s1.obstime.jd1) assert np.all(s1_reshape.location == self.s1.location.reshape(3, 2, 7)) assert np.may_share_memory(s1_reshape.location, self.s1.location) assert np.all(s1_reshape.temperature == self.s1.temperature.reshape(3, 2, 7)) assert np.may_share_memory(s1_reshape.temperature, self.s1.temperature) assert s1_reshape.pressure == self.s1.pressure # For reshape(3, 14), copying is necessary for lon, lat, location, time s1_reshape2 = self.s1.reshape(3, 14) assert s1_reshape2.shape == (3, 14) assert np.all(s1_reshape2.data.lon == self.s1.data.lon.reshape(3, 14)) assert not np.may_share_memory(s1_reshape2.data.lon, self.s1.data.lon) assert np.all(s1_reshape2.obstime == self.s1.obstime.reshape(3, 14)) assert not np.may_share_memory(s1_reshape2.obstime.jd1, self.s1.obstime.jd1) assert np.all(s1_reshape2.location == self.s1.location.reshape(3, 14)) assert not np.may_share_memory(s1_reshape2.location, self.s1.location) assert np.all(s1_reshape2.temperature == self.s1.temperature.reshape(3, 14)) assert np.may_share_memory(s1_reshape2.temperature, self.s1.temperature) assert s1_reshape2.pressure == self.s1.pressure s2_reshape = self.s2.reshape(3, 2, 7) assert s2_reshape.shape == (3, 2, 7) assert np.all(s2_reshape.data.lon == self.s2.data.lon.reshape(3, 2, 7)) assert np.may_share_memory(s2_reshape.data.lat, self.s2.data.lat) assert np.all(s2_reshape.obstime == self.s2.obstime.reshape(3, 2, 7)) assert np.may_share_memory(s2_reshape.obstime.jd1, self.s2.obstime.jd1) assert np.all(s2_reshape.obsgeoloc.xyz == self.s2.obsgeoloc.reshape(3, 2, 7).xyz) assert np.may_share_memory(s2_reshape.obsgeoloc.x, self.s2.obsgeoloc.x) s3_reshape = self.s3.reshape(3, 2, 7) assert s3_reshape.shape == (3, 2, 7) assert np.all(s3_reshape.obstime == self.s3.obstime.reshape(3, 2, 7)) assert np.may_share_memory(s3_reshape.obstime.jd1, self.s3.obstime.jd1) assert np.all(s3_reshape.obsgeoloc.xyz == self.s3.obsgeoloc.reshape(3, 2, 7).xyz) assert np.may_share_memory(s3_reshape.obsgeoloc.x, self.s3.obsgeoloc.x) sc_reshape = self.sc.reshape(3, 2, 7) assert sc_reshape.shape == (3, 2, 7) assert np.all(sc_reshape.data.lon == self.sc.data.lon.reshape(3, 2, 7)) assert np.may_share_memory(sc_reshape.data.lat, self.sc.data.lat) assert np.all(sc_reshape.obstime == self.sc.obstime.reshape(3, 2, 7)) assert np.may_share_memory(sc_reshape.obstime.jd1, self.sc.obstime.jd1) assert np.all(sc_reshape.obsgeoloc.xyz == self.sc.obsgeoloc.reshape(3, 2, 7).xyz) assert np.may_share_memory(sc_reshape.obsgeoloc.x, self.sc.obsgeoloc.x) # For reshape(3, 14), the arrays all need to be copied. sc_reshape2 = self.sc.reshape(3, 14) assert sc_reshape2.shape == (3, 14) assert np.all(sc_reshape2.data.lon == self.sc.data.lon.reshape(3, 14)) assert not np.may_share_memory(sc_reshape2.data.lat, self.sc.data.lat) assert np.all(sc_reshape2.obstime == self.sc.obstime.reshape(3, 14)) assert not np.may_share_memory(sc_reshape2.obstime.jd1, self.sc.obstime.jd1) assert np.all(sc_reshape2.obsgeoloc.xyz == self.sc.obsgeoloc.reshape(3, 14).xyz) assert not np.may_share_memory(sc_reshape2.obsgeoloc.x, self.sc.obsgeoloc.x) def test_squeeze(self): s0_squeeze = self.s0.reshape(3, 1, 2, 1, 7).squeeze() assert s0_squeeze.shape == (3, 2, 7) assert np.all(s0_squeeze.data.lat == self.s0.data.lat.reshape(3, 2, 7)) assert np.may_share_memory(s0_squeeze.data.lat, self.s0.data.lat) def test_add_dimension(self): s0_adddim = self.s0[:, np.newaxis, :] assert s0_adddim.shape == (6, 1, 7) assert np.all(s0_adddim.data.lon == self.s0.data.lon[:, np.newaxis, :]) assert np.may_share_memory(s0_adddim.data.lat, self.s0.data.lat) def test_take(self): s0_take = self.s0.take((5, 2)) assert s0_take.shape == (2,) assert np.all(s0_take.data.lon == self.s0.data.lon.take((5, 2)))
d62b69cf6cce600732f9a875d38867f7df63d072b7750fc8059db43e81d8a63d
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst import pytest import numpy as np from astropy import units as u from astropy.coordinates.builtin_frames import ICRS, Galactic, Galactocentric from astropy.coordinates import builtin_frames as bf from astropy.units import allclose as quantity_allclose from astropy.coordinates.errors import ConvertError from astropy.coordinates import representation as r def test_api(): # transform observed Barycentric velocities to full-space Galactocentric gc_frame = Galactocentric() icrs = ICRS(ra=151.*u.deg, dec=-16*u.deg, distance=101*u.pc, pm_ra_cosdec=21*u.mas/u.yr, pm_dec=-71*u.mas/u.yr, radial_velocity=71*u.km/u.s) icrs.transform_to(gc_frame) # transform a set of ICRS proper motions to Galactic icrs = ICRS(ra=151.*u.deg, dec=-16*u.deg, pm_ra_cosdec=21*u.mas/u.yr, pm_dec=-71*u.mas/u.yr) icrs.transform_to(Galactic) # transform a Barycentric RV to a GSR RV icrs = ICRS(ra=151.*u.deg, dec=-16*u.deg, distance=1.*u.pc, pm_ra_cosdec=0*u.mas/u.yr, pm_dec=0*u.mas/u.yr, radial_velocity=71*u.km/u.s) icrs.transform_to(Galactocentric) all_kwargs = [ dict(ra=37.4*u.deg, dec=-55.8*u.deg), dict(ra=37.4*u.deg, dec=-55.8*u.deg, distance=150*u.pc), dict(ra=37.4*u.deg, dec=-55.8*u.deg, pm_ra_cosdec=-21.2*u.mas/u.yr, pm_dec=17.1*u.mas/u.yr), dict(ra=37.4*u.deg, dec=-55.8*u.deg, distance=150*u.pc, pm_ra_cosdec=-21.2*u.mas/u.yr, pm_dec=17.1*u.mas/u.yr), dict(ra=37.4*u.deg, dec=-55.8*u.deg, radial_velocity=105.7*u.km/u.s), dict(ra=37.4*u.deg, dec=-55.8*u.deg, distance=150*u.pc, radial_velocity=105.7*u.km/u.s), dict(ra=37.4*u.deg, dec=-55.8*u.deg, radial_velocity=105.7*u.km/u.s, pm_ra_cosdec=-21.2*u.mas/u.yr, pm_dec=17.1*u.mas/u.yr), dict(ra=37.4*u.deg, dec=-55.8*u.deg, distance=150*u.pc, pm_ra_cosdec=-21.2*u.mas/u.yr, pm_dec=17.1*u.mas/u.yr, radial_velocity=105.7*u.km/u.s), # Now test other representation/differential types: dict(x=100.*u.pc, y=200*u.pc, z=300*u.pc, representation_type='cartesian'), dict(x=100.*u.pc, y=200*u.pc, z=300*u.pc, representation_type=r.CartesianRepresentation), dict(x=100.*u.pc, y=200*u.pc, z=300*u.pc, v_x=100.*u.km/u.s, v_y=200*u.km/u.s, v_z=300*u.km/u.s, representation_type=r.CartesianRepresentation, differential_type=r.CartesianDifferential), dict(x=100.*u.pc, y=200*u.pc, z=300*u.pc, v_x=100.*u.km/u.s, v_y=200*u.km/u.s, v_z=300*u.km/u.s, representation_type=r.CartesianRepresentation, differential_type='cartesian'), ] @pytest.mark.parametrize('kwargs', all_kwargs) def test_all_arg_options(kwargs): # Above is a list of all possible valid combinations of arguments. # Here we do a simple thing and just verify that passing them in, we have # access to the relevant attributes from the resulting object icrs = ICRS(**kwargs) gal = icrs.transform_to(Galactic) repr_gal = repr(gal) for k in kwargs: if k == 'differential_type': continue getattr(icrs, k) if 'pm_ra_cosdec' in kwargs: # should have both assert 'pm_l_cosb' in repr_gal assert 'pm_b' in repr_gal assert 'mas / yr' in repr_gal if 'radial_velocity' not in kwargs: assert 'radial_velocity' not in repr_gal if 'radial_velocity' in kwargs: assert 'radial_velocity' in repr_gal assert 'km / s' in repr_gal if 'pm_ra_cosdec' not in kwargs: assert 'pm_l_cosb' not in repr_gal assert 'pm_b' not in repr_gal @pytest.mark.parametrize('cls,lon,lat', [ [bf.ICRS, 'ra', 'dec'], [bf.FK4, 'ra', 'dec'], [bf.FK4NoETerms, 'ra', 'dec'], [bf.FK5, 'ra', 'dec'], [bf.GCRS, 'ra', 'dec'], [bf.HCRS, 'ra', 'dec'], [bf.LSR, 'ra', 'dec'], [bf.CIRS, 'ra', 'dec'], [bf.Galactic, 'l', 'b'], [bf.AltAz, 'az', 'alt'], [bf.Supergalactic, 'sgl', 'sgb'], [bf.GalacticLSR, 'l', 'b'], [bf.HeliocentricMeanEcliptic, 'lon', 'lat'], [bf.GeocentricMeanEcliptic, 'lon', 'lat'], [bf.BarycentricMeanEcliptic, 'lon', 'lat'], [bf.PrecessedGeocentric, 'ra', 'dec'] ]) def test_expected_arg_names(cls, lon, lat): kwargs = {lon: 37.4*u.deg, lat: -55.8*u.deg, 'distance': 150*u.pc, 'pm_{0}_cos{1}'.format(lon, lat): -21.2*u.mas/u.yr, 'pm_{0}'.format(lat): 17.1*u.mas/u.yr, 'radial_velocity': 105.7*u.km/u.s} frame = cls(**kwargs) # these data are extracted from the vizier copy of XHIP: # http://vizier.u-strasbg.fr/viz-bin/VizieR-3?-source=+V/137A/XHIP _xhip_head = """ ------ ------------ ------------ -------- -------- ------------ ------------ ------- -------- -------- ------- ------ ------ ------ R D pmRA pmDE Di pmGLon pmGLat RV U V W HIP AJ2000 (deg) EJ2000 (deg) (mas/yr) (mas/yr) GLon (deg) GLat (deg) st (pc) (mas/yr) (mas/yr) (km/s) (km/s) (km/s) (km/s) ------ ------------ ------------ -------- -------- ------------ ------------ ------- -------- -------- ------- ------ ------ ------ """[1:-1] _xhip_data = """ 19 000.05331690 +38.30408633 -3.17 -15.37 112.00026470 -23.47789171 247.12 -6.40 -14.33 6.30 7.3 2.0 -17.9 20 000.06295067 +23.52928427 36.11 -22.48 108.02779304 -37.85659811 95.90 29.35 -30.78 37.80 -19.3 16.1 -34.2 21 000.06623581 +08.00723430 61.48 -0.23 101.69697120 -52.74179515 183.68 58.06 -20.23 -11.72 -45.2 -30.9 -1.3 24917 080.09698238 -33.39874984 -4.30 13.40 236.92324669 -32.58047131 107.38 -14.03 -1.15 36.10 -22.4 -21.3 -19.9 59207 182.13915108 +65.34963517 18.17 5.49 130.04157185 51.18258601 56.00 -18.98 -0.49 5.70 1.5 6.1 4.4 87992 269.60730667 +36.87462906 -89.58 72.46 62.98053142 25.90148234 129.60 45.64 105.79 -4.00 -39.5 -15.8 56.7 115110 349.72322473 -28.74087144 48.86 -9.25 23.00447250 -69.52799804 116.87 -8.37 -49.02 15.00 -16.8 -12.2 -23.6 """[1:-1] # in principal we could parse the above as a table, but doing it "manually" # makes this test less tied to Table working correctly @pytest.mark.parametrize('hip,ra,dec,pmra,pmdec,glon,glat,dist,pmglon,pmglat,rv,U,V,W', [[float(val) for val in row.split()] for row in _xhip_data.split('\n')]) def test_xhip_galactic(hip, ra, dec, pmra, pmdec, glon, glat, dist, pmglon, pmglat, rv, U, V, W): i = ICRS(ra*u.deg, dec*u.deg, dist*u.pc, pm_ra_cosdec=pmra*u.marcsec/u.yr, pm_dec=pmdec*u.marcsec/u.yr, radial_velocity=rv*u.km/u.s) g = i.transform_to(Galactic) # precision is limited by 2-deciimal digit string representation of pms assert quantity_allclose(g.pm_l_cosb, pmglon*u.marcsec/u.yr, atol=.01*u.marcsec/u.yr) assert quantity_allclose(g.pm_b, pmglat*u.marcsec/u.yr, atol=.01*u.marcsec/u.yr) # make sure UVW also makes sense uvwg = g.cartesian.differentials['s'] # precision is limited by 1-decimal digit string representation of vels assert quantity_allclose(uvwg.d_x, U*u.km/u.s, atol=.1*u.km/u.s) assert quantity_allclose(uvwg.d_y, V*u.km/u.s, atol=.1*u.km/u.s) assert quantity_allclose(uvwg.d_z, W*u.km/u.s, atol=.1*u.km/u.s) @pytest.mark.parametrize('kwargs,expect_success', [ [dict(ra=37.4*u.deg, dec=-55.8*u.deg), False], [dict(ra=37.4*u.deg, dec=-55.8*u.deg, distance=150*u.pc), True], [dict(ra=37.4*u.deg, dec=-55.8*u.deg, pm_ra_cosdec=-21.2*u.mas/u.yr, pm_dec=17.1*u.mas/u.yr), False], [dict(ra=37.4*u.deg, dec=-55.8*u.deg, radial_velocity=105.7*u.km/u.s), False], [dict(ra=37.4*u.deg, dec=-55.8*u.deg, distance=150*u.pc, radial_velocity=105.7*u.km/u.s), False], [dict(ra=37.4*u.deg, dec=-55.8*u.deg, radial_velocity=105.7*u.km/u.s, pm_ra_cosdec=-21.2*u.mas/u.yr, pm_dec=17.1*u.mas/u.yr), False], [dict(ra=37.4*u.deg, dec=-55.8*u.deg, distance=150*u.pc, pm_ra_cosdec=-21.2*u.mas/u.yr, pm_dec=17.1*u.mas/u.yr, radial_velocity=105.7*u.km/u.s), True] ]) def test_frame_affinetransform(kwargs, expect_success): """There are already tests in test_transformations.py that check that an AffineTransform fails without full-space data, but this just checks that things work as expected at the frame level as well. """ icrs = ICRS(**kwargs) if expect_success: gc = icrs.transform_to(Galactocentric) else: with pytest.raises(ConvertError): icrs.transform_to(Galactocentric) def test_differential_type_arg(): """ Test passing in an explicit differential class to the initializer or changing the differential class via set_representation_cls """ from astropy.coordinates.builtin_frames import ICRS icrs = ICRS(ra=1*u.deg, dec=60*u.deg, pm_ra=10*u.mas/u.yr, pm_dec=-11*u.mas/u.yr, differential_type=r.UnitSphericalDifferential) assert icrs.pm_ra == 10*u.mas/u.yr icrs = ICRS(ra=1*u.deg, dec=60*u.deg, pm_ra=10*u.mas/u.yr, pm_dec=-11*u.mas/u.yr, differential_type={'s': r.UnitSphericalDifferential}) assert icrs.pm_ra == 10*u.mas/u.yr icrs = ICRS(ra=1*u.deg, dec=60*u.deg, pm_ra_cosdec=10*u.mas/u.yr, pm_dec=-11*u.mas/u.yr) icrs.set_representation_cls(s=r.UnitSphericalDifferential) assert quantity_allclose(icrs.pm_ra, 20*u.mas/u.yr) # incompatible representation and differential with pytest.raises(TypeError): ICRS(ra=1*u.deg, dec=60*u.deg, v_x=1*u.km/u.s, v_y=-2*u.km/u.s, v_z=-2*u.km/u.s, differential_type=r.CartesianDifferential) # specify both icrs = ICRS(x=1*u.pc, y=2*u.pc, z=3*u.pc, v_x=1*u.km/u.s, v_y=2*u.km/u.s, v_z=3*u.km/u.s, representation_type=r.CartesianRepresentation, differential_type=r.CartesianDifferential) assert icrs.x == 1*u.pc assert icrs.y == 2*u.pc assert icrs.z == 3*u.pc assert icrs.v_x == 1*u.km/u.s assert icrs.v_y == 2*u.km/u.s assert icrs.v_z == 3*u.km/u.s def test_slicing_preserves_differential(): icrs = ICRS(ra=37.4*u.deg, dec=-55.8*u.deg, distance=150*u.pc, pm_ra_cosdec=-21.2*u.mas/u.yr, pm_dec=17.1*u.mas/u.yr, radial_velocity=105.7*u.km/u.s) icrs2 = icrs.reshape(1,1)[:1,0] for name in icrs.representation_component_names.keys(): assert getattr(icrs, name) == getattr(icrs2, name)[0] for name in icrs.get_representation_component_names('s').keys(): assert getattr(icrs, name) == getattr(icrs2, name)[0] def test_shorthand_attributes(): # Check that attribute access works # for array data: n = 4 icrs1 = ICRS(ra=np.random.uniform(0, 360, n)*u.deg, dec=np.random.uniform(-90, 90, n)*u.deg, distance=100*u.pc, pm_ra_cosdec=np.random.normal(0, 100, n)*u.mas/u.yr, pm_dec=np.random.normal(0, 100, n)*u.mas/u.yr, radial_velocity=np.random.normal(0, 100, n)*u.km/u.s) v = icrs1.velocity pm = icrs1.proper_motion assert quantity_allclose(pm[0], icrs1.pm_ra_cosdec) assert quantity_allclose(pm[1], icrs1.pm_dec) # for scalar data: icrs2 = ICRS(ra=37.4*u.deg, dec=-55.8*u.deg, distance=150*u.pc, pm_ra_cosdec=-21.2*u.mas/u.yr, pm_dec=17.1*u.mas/u.yr, radial_velocity=105.7*u.km/u.s) v = icrs2.velocity pm = icrs2.proper_motion assert quantity_allclose(pm[0], icrs2.pm_ra_cosdec) assert quantity_allclose(pm[1], icrs2.pm_dec) # check that it fails where we expect: # no distance rv = 105.7*u.km/u.s icrs3 = ICRS(ra=37.4*u.deg, dec=-55.8*u.deg, pm_ra_cosdec=-21.2*u.mas/u.yr, pm_dec=17.1*u.mas/u.yr, radial_velocity=rv) with pytest.raises(ValueError): icrs3.velocity icrs3.set_representation_cls('cartesian') assert hasattr(icrs3, 'radial_velocity') assert quantity_allclose(icrs3.radial_velocity, rv) icrs4 = ICRS(x=30*u.pc, y=20*u.pc, z=11*u.pc, v_x=10*u.km/u.s, v_y=10*u.km/u.s, v_z=10*u.km/u.s, representation_type=r.CartesianRepresentation, differential_type=r.CartesianDifferential) icrs4.radial_velocity def test_negative_distance(): """ Regression test: #7408 Make sure that negative parallaxes turned into distances are handled right """ RA = 150 * u.deg DEC = -11*u.deg c = ICRS(ra=RA, dec=DEC, distance=(-10*u.mas).to(u.pc, u.parallax()), pm_ra_cosdec=10*u.mas/u.yr, pm_dec=10*u.mas/u.yr) assert quantity_allclose(c.ra, RA) assert quantity_allclose(c.dec, DEC) c = ICRS(ra=RA, dec=DEC, distance=(-10*u.mas).to(u.pc, u.parallax())) assert quantity_allclose(c.ra, RA) assert quantity_allclose(c.dec, DEC)
638b3ba2ef95de2e7ba2fd3ade5481267125dd2d342f1479b8c35f79d84e3d27
# -*- coding: utf-8 -*- """ Tests the Angle string formatting capabilities. SkyCoord formatting is in test_sky_coord """ from astropy.coordinates.angles import Angle from astropy import units as u def test_to_string_precision(): # There are already some tests in test_api.py, but this is a regression # test for the bug in issue #1319 which caused incorrect formatting of the # seconds for precision=0 angle = Angle(-1.23456789, unit=u.degree) assert angle.to_string(precision=3) == '-1d14m04.444s' assert angle.to_string(precision=1) == '-1d14m04.4s' assert angle.to_string(precision=0) == '-1d14m04s' angle2 = Angle(-1.23456789, unit=u.hourangle) assert angle2.to_string(precision=3, unit=u.hour) == '-1h14m04.444s' assert angle2.to_string(precision=1, unit=u.hour) == '-1h14m04.4s' assert angle2.to_string(precision=0, unit=u.hour) == '-1h14m04s' # Regression test for #7141 angle3 = Angle(-0.5, unit=u.degree) assert angle3.to_string(precision=0, fields=3) == '-0d30m00s' assert angle3.to_string(precision=0, fields=2) == '-0d30m' assert angle3.to_string(precision=0, fields=1) == '-1d' def test_to_string_decimal(): # There are already some tests in test_api.py, but this is a regression # test for the bug in issue #1323 which caused decimal formatting to not # work angle1 = Angle(2., unit=u.degree) assert angle1.to_string(decimal=True, precision=3) == '2.000' assert angle1.to_string(decimal=True, precision=1) == '2.0' assert angle1.to_string(decimal=True, precision=0) == '2' angle2 = Angle(3., unit=u.hourangle) assert angle2.to_string(decimal=True, precision=3) == '3.000' assert angle2.to_string(decimal=True, precision=1) == '3.0' assert angle2.to_string(decimal=True, precision=0) == '3' angle3 = Angle(4., unit=u.radian) assert angle3.to_string(decimal=True, precision=3) == '4.000' assert angle3.to_string(decimal=True, precision=1) == '4.0' assert angle3.to_string(decimal=True, precision=0) == '4' def test_to_string_formats(): a = Angle(1.113355, unit=u.deg) assert a.to_string(format='latex') == r'$1^\circ06{}^\prime48.078{}^{\prime\prime}$' assert a.to_string(format='unicode') == '1°06′48.078″' a = Angle(1.113355, unit=u.hour) assert a.to_string(format='latex') == r'$1^\mathrm{h}06^\mathrm{m}48.078^\mathrm{s}$' assert a.to_string(format='unicode') == '1ʰ06ᵐ48.078ˢ' a = Angle(1.113355, unit=u.radian) assert a.to_string(format='latex') == r'$1.11336\mathrm{rad}$' assert a.to_string(format='unicode') == '1.11336rad' def test_to_string_fields(): a = Angle(1.113355, unit=u.deg) assert a.to_string(fields=1) == r'1d' assert a.to_string(fields=2) == r'1d07m' assert a.to_string(fields=3) == r'1d06m48.078s' def test_to_string_padding(): a = Angle(0.5653, unit=u.deg) assert a.to_string(unit='deg', sep=':', pad=True) == r'00:33:55.08' # Test to make sure negative angles are padded correctly a = Angle(-0.5653, unit=u.deg) assert a.to_string(unit='deg', sep=':', pad=True) == r'-00:33:55.08' def test_sexagesimal_rounding_up(): a = Angle(359.9999999999, unit=u.deg) assert a.to_string(precision=None) == '360d00m00s' assert a.to_string(precision=4) == '360d00m00.0000s' assert a.to_string(precision=5) == '360d00m00.00000s' assert a.to_string(precision=6) == '360d00m00.000000s' assert a.to_string(precision=7) == '359d59m59.9999996s' a = Angle(3.999999, unit=u.deg) assert a.to_string(fields=2, precision=None) == '4d00m' assert a.to_string(fields=2, precision=1) == '4d00m' assert a.to_string(fields=2, precision=5) == '4d00m' assert a.to_string(fields=1, precision=1) == '4d' assert a.to_string(fields=1, precision=5) == '4d' def test_to_string_scalar(): a = Angle(1.113355, unit=u.deg) assert isinstance(a.to_string(), str) def test_to_string_radian_with_precision(): """ Regression test for a bug that caused ``to_string`` to crash for angles in radians when specifying the precision. """ # Check that specifying the precision works a = Angle(3., unit=u.rad) assert a.to_string(precision=3, sep='fromunit') == '3.000rad' def test_sexagesimal_round_down(): a1 = Angle(1, u.deg).to(u.hourangle) a2 = Angle(2, u.deg) assert a1.to_string() == '0h04m00s' assert a2.to_string() == '2d00m00s' def test_to_string_fields_colon(): a = Angle(1.113355, unit=u.deg) assert a.to_string(fields=2, sep=':') == '1:07' assert a.to_string(fields=3, sep=':') == '1:06:48.078' assert a.to_string(fields=1, sep=':') == '1'
7f280e33446d62e2382a3b6a2cbe6e1eac5199f8af377bb835b1a632f2d46724
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst import pytest import numpy as np from astropy import units as u from astropy.coordinates.distances import Distance from astropy.coordinates.builtin_frames import ICRS, FK5, Galactic, AltAz, SkyOffsetFrame from astropy.coordinates import SkyCoord, EarthLocation from astropy.time import Time from astropy.tests.helper import assert_quantity_allclose as assert_allclose @pytest.mark.parametrize("inradec,expectedlatlon, tolsep", [ ((45, 45)*u.deg, (0, 0)*u.deg, .001*u.arcsec), ((45, 0)*u.deg, (0, -45)*u.deg, .001*u.arcsec), ((45, 90)*u.deg, (0, 45)*u.deg, .001*u.arcsec), ((46, 45)*u.deg, (1*np.cos(45*u.deg), 0)*u.deg, 16*u.arcsec), ]) def test_skyoffset(inradec, expectedlatlon, tolsep, originradec=(45, 45)*u.deg): origin = ICRS(*originradec) skyoffset_frame = SkyOffsetFrame(origin=origin) skycoord = SkyCoord(*inradec, frame=ICRS) skycoord_inaf = skycoord.transform_to(skyoffset_frame) assert hasattr(skycoord_inaf, 'lon') assert hasattr(skycoord_inaf, 'lat') expected = SkyCoord(*expectedlatlon, frame=skyoffset_frame) assert skycoord_inaf.separation(expected) < tolsep def test_skyoffset_functional_ra(): # we do the 12)[1:-1] business because sometimes machine precision issues # lead to results that are either ~0 or ~360, which mucks up the final # comparison and leads to spurious failures. So this just avoids that by # staying away from the edges input_ra = np.linspace(0, 360, 12)[1:-1] input_dec = np.linspace(-90, 90, 12)[1:-1] icrs_coord = ICRS(ra=input_ra*u.deg, dec=input_dec*u.deg, distance=1.*u.kpc) for ra in np.linspace(0, 360, 24): # expected rotation expected = ICRS(ra=np.linspace(0-ra, 360-ra, 12)[1:-1]*u.deg, dec=np.linspace(-90, 90, 12)[1:-1]*u.deg, distance=1.*u.kpc) expected_xyz = expected.cartesian.xyz # actual transformation to the frame skyoffset_frame = SkyOffsetFrame(origin=ICRS(ra*u.deg, 0*u.deg)) actual = icrs_coord.transform_to(skyoffset_frame) actual_xyz = actual.cartesian.xyz # back to ICRS roundtrip = actual.transform_to(ICRS) roundtrip_xyz = roundtrip.cartesian.xyz # Verify assert_allclose(actual_xyz, expected_xyz, atol=1E-5*u.kpc) assert_allclose(icrs_coord.ra, roundtrip.ra, atol=1E-5*u.deg) assert_allclose(icrs_coord.dec, roundtrip.dec, atol=1E-5*u.deg) assert_allclose(icrs_coord.distance, roundtrip.distance, atol=1E-5*u.kpc) def test_skyoffset_functional_dec(): # we do the 12)[1:-1] business because sometimes machine precision issues # lead to results that are either ~0 or ~360, which mucks up the final # comparison and leads to spurious failures. So this just avoids that by # staying away from the edges input_ra = np.linspace(0, 360, 12)[1:-1] input_dec = np.linspace(-90, 90, 12)[1:-1] input_ra_rad = np.deg2rad(input_ra) input_dec_rad = np.deg2rad(input_dec) icrs_coord = ICRS(ra=input_ra*u.deg, dec=input_dec*u.deg, distance=1.*u.kpc) # Dec rotations # Done in xyz space because dec must be [-90,90] for dec in np.linspace(-90, 90, 13): # expected rotation dec_rad = -np.deg2rad(dec) expected_x = (-np.sin(input_dec_rad) * np.sin(dec_rad) + np.cos(input_ra_rad) * np.cos(input_dec_rad) * np.cos(dec_rad)) expected_y = (np.sin(input_ra_rad) * np.cos(input_dec_rad)) expected_z = (np.sin(input_dec_rad) * np.cos(dec_rad) + np.sin(dec_rad) * np.cos(input_ra_rad) * np.cos(input_dec_rad)) expected = SkyCoord(x=expected_x, y=expected_y, z=expected_z, unit='kpc', representation_type='cartesian') expected_xyz = expected.cartesian.xyz # actual transformation to the frame skyoffset_frame = SkyOffsetFrame(origin=ICRS(0*u.deg, dec*u.deg)) actual = icrs_coord.transform_to(skyoffset_frame) actual_xyz = actual.cartesian.xyz # back to ICRS roundtrip = actual.transform_to(ICRS) # Verify assert_allclose(actual_xyz, expected_xyz, atol=1E-5*u.kpc) assert_allclose(icrs_coord.ra, roundtrip.ra, atol=1E-5*u.deg) assert_allclose(icrs_coord.dec, roundtrip.dec, atol=1E-5*u.deg) assert_allclose(icrs_coord.distance, roundtrip.distance, atol=1E-5*u.kpc) def test_skyoffset_functional_ra_dec(): # we do the 12)[1:-1] business because sometimes machine precision issues # lead to results that are either ~0 or ~360, which mucks up the final # comparison and leads to spurious failures. So this just avoids that by # staying away from the edges input_ra = np.linspace(0, 360, 12)[1:-1] input_dec = np.linspace(-90, 90, 12)[1:-1] input_ra_rad = np.deg2rad(input_ra) input_dec_rad = np.deg2rad(input_dec) icrs_coord = ICRS(ra=input_ra*u.deg, dec=input_dec*u.deg, distance=1.*u.kpc) for ra in np.linspace(0, 360, 10): for dec in np.linspace(-90, 90, 5): # expected rotation dec_rad = -np.deg2rad(dec) ra_rad = np.deg2rad(ra) expected_x = (-np.sin(input_dec_rad) * np.sin(dec_rad) + np.cos(input_ra_rad) * np.cos(input_dec_rad) * np.cos(dec_rad) * np.cos(ra_rad) + np.sin(input_ra_rad) * np.cos(input_dec_rad) * np.cos(dec_rad) * np.sin(ra_rad)) expected_y = (np.sin(input_ra_rad) * np.cos(input_dec_rad) * np.cos(ra_rad) - np.cos(input_ra_rad) * np.cos(input_dec_rad) * np.sin(ra_rad)) expected_z = (np.sin(input_dec_rad) * np.cos(dec_rad) + np.sin(dec_rad) * np.cos(ra_rad) * np.cos(input_ra_rad) * np.cos(input_dec_rad) + np.sin(dec_rad) * np.sin(ra_rad) * np.sin(input_ra_rad) * np.cos(input_dec_rad)) expected = SkyCoord(x=expected_x, y=expected_y, z=expected_z, unit='kpc', representation_type='cartesian') expected_xyz = expected.cartesian.xyz # actual transformation to the frame skyoffset_frame = SkyOffsetFrame(origin=ICRS(ra*u.deg, dec*u.deg)) actual = icrs_coord.transform_to(skyoffset_frame) actual_xyz = actual.cartesian.xyz # back to ICRS roundtrip = actual.transform_to(ICRS) # Verify assert_allclose(actual_xyz, expected_xyz, atol=1E-5*u.kpc) assert_allclose(icrs_coord.ra, roundtrip.ra, atol=1E-4*u.deg) assert_allclose(icrs_coord.dec, roundtrip.dec, atol=1E-5*u.deg) assert_allclose(icrs_coord.distance, roundtrip.distance, atol=1E-5*u.kpc) def test_skycoord_skyoffset_frame(): m31 = SkyCoord(10.6847083, 41.26875, frame='icrs', unit=u.deg) m33 = SkyCoord(23.4621, 30.6599417, frame='icrs', unit=u.deg) m31_astro = m31.skyoffset_frame() m31_in_m31 = m31.transform_to(m31_astro) m33_in_m31 = m33.transform_to(m31_astro) assert_allclose([m31_in_m31.lon, m31_in_m31.lat], [0, 0]*u.deg, atol=1e-10*u.deg) assert_allclose([m33_in_m31.lon, m33_in_m31.lat], [11.13135175, -9.79084759]*u.deg) assert_allclose(m33.separation(m31), np.hypot(m33_in_m31.lon, m33_in_m31.lat), atol=.1*u.deg) # used below in the next parametrized test m31_sys = [ICRS, FK5, Galactic] m31_coo = [(10.6847929, 41.2690650), (10.6847929, 41.2690650), (121.1744050, -21.5729360)] m31_dist = Distance(770, u.kpc) convert_precision = 1 * u.arcsec roundtrip_precision = 1e-4 * u.degree dist_precision = 1e-9 * u.kpc m31_params = [] for i in range(len(m31_sys)): for j in range(len(m31_sys)): if i < j: m31_params.append((m31_sys[i], m31_sys[j], m31_coo[i], m31_coo[j])) @pytest.mark.parametrize(('fromsys', 'tosys', 'fromcoo', 'tocoo'), m31_params) def test_m31_coord_transforms(fromsys, tosys, fromcoo, tocoo): """ This tests a variety of coordinate conversions for the Chandra point-source catalog location of M31 from NED, via SkyOffsetFrames """ from_origin = fromsys(fromcoo[0]*u.deg, fromcoo[1]*u.deg, distance=m31_dist) from_pos = SkyOffsetFrame(1*u.deg, 1*u.deg, origin=from_origin) to_origin = tosys(tocoo[0]*u.deg, tocoo[1]*u.deg, distance=m31_dist) to_astroframe = SkyOffsetFrame(origin=to_origin) target_pos = from_pos.transform_to(to_astroframe) assert_allclose(to_origin.separation(target_pos), np.hypot(from_pos.lon, from_pos.lat), atol=convert_precision) roundtrip_pos = target_pos.transform_to(from_pos) assert_allclose([roundtrip_pos.lon.wrap_at(180*u.deg), roundtrip_pos.lat], [1.0*u.deg, 1.0*u.deg], atol=convert_precision) @pytest.mark.remote_data def test_altaz_attribute_transforms(): """Test transforms between AltAz frames with different attributes.""" el1 = EarthLocation(0*u.deg, 0*u.deg, 0*u.m) origin1 = AltAz(0 * u.deg, 0*u.deg, obstime=Time("2000-01-01T12:00:00"), location=el1) frame1 = SkyOffsetFrame(origin=origin1) coo1 = SkyCoord(1 * u.deg, 1 * u.deg, frame=frame1) el2 = EarthLocation(0*u.deg, 0*u.deg, 0*u.m) origin2 = AltAz(0 * u.deg, 0*u.deg, obstime=Time("2000-01-01T11:00:00"), location=el2) frame2 = SkyOffsetFrame(origin=origin2) coo2 = coo1.transform_to(frame2) coo2_expected = [1.22522446, 0.70624298] * u.deg assert_allclose([coo2.lon.wrap_at(180*u.deg), coo2.lat], coo2_expected, atol=convert_precision) el3 = EarthLocation(0*u.deg, 90*u.deg, 0*u.m) origin3 = AltAz(0 * u.deg, 90*u.deg, obstime=Time("2000-01-01T12:00:00"), location=el3) frame3 = SkyOffsetFrame(origin=origin3) coo3 = coo2.transform_to(frame3) assert_allclose([coo3.lon.wrap_at(180*u.deg), coo3.lat], [1*u.deg, 1*u.deg], atol=convert_precision) @pytest.mark.parametrize("rotation, expectedlatlon", [ (0*u.deg, [0, 1]*u.deg), (180*u.deg, [0, -1]*u.deg), (90*u.deg, [-1, 0]*u.deg), (-90*u.deg, [1, 0]*u.deg) ]) def test_rotation(rotation, expectedlatlon): origin = ICRS(45*u.deg, 45*u.deg) target = ICRS(45*u.deg, 46*u.deg) aframe = SkyOffsetFrame(origin=origin, rotation=rotation) trans = target.transform_to(aframe) assert_allclose([trans.lon.wrap_at(180*u.deg), trans.lat], expectedlatlon, atol=1e-10*u.deg) @pytest.mark.parametrize("rotation, expectedlatlon", [ (0*u.deg, [0, 1]*u.deg), (180*u.deg, [0, -1]*u.deg), (90*u.deg, [-1, 0]*u.deg), (-90*u.deg, [1, 0]*u.deg) ]) def test_skycoord_skyoffset_frame_rotation(rotation, expectedlatlon): """Test if passing a rotation argument via SkyCoord works""" origin = SkyCoord(45*u.deg, 45*u.deg) target = SkyCoord(45*u.deg, 46*u.deg) aframe = origin.skyoffset_frame(rotation=rotation) trans = target.transform_to(aframe) assert_allclose([trans.lon.wrap_at(180*u.deg), trans.lat], expectedlatlon, atol=1e-10*u.deg) def test_skyoffset_names(): origin1 = ICRS(45*u.deg, 45*u.deg) aframe1 = SkyOffsetFrame(origin=origin1) assert type(aframe1).__name__ == 'SkyOffsetICRS' origin2 = Galactic(45*u.deg, 45*u.deg) aframe2 = SkyOffsetFrame(origin=origin2) assert type(aframe2).__name__ == 'SkyOffsetGalactic' def test_skyoffset_origindata(): origin = ICRS() with pytest.raises(ValueError): SkyOffsetFrame(origin=origin) def test_skyoffset_lonwrap(): origin = ICRS(45*u.deg, 45*u.deg) sc = SkyCoord(190*u.deg, -45*u.deg, frame=SkyOffsetFrame(origin=origin)) assert sc.lon < 180 * u.deg def test_skyoffset_velocity(): c = ICRS(ra=170.9*u.deg, dec=-78.4*u.deg, pm_ra_cosdec=74.4134*u.mas/u.yr, pm_dec=-93.2342*u.mas/u.yr) skyoffset_frame = SkyOffsetFrame(origin=c) c_skyoffset = c.transform_to(skyoffset_frame) assert_allclose(c_skyoffset.pm_lon_coslat, c.pm_ra_cosdec) assert_allclose(c_skyoffset.pm_lat, c.pm_dec) @pytest.mark.parametrize("rotation, expectedpmlonlat", [ (0*u.deg, [1, 2]*u.mas/u.yr), (45*u.deg, [-2**-0.5, 3*2**-0.5]*u.mas/u.yr), (90*u.deg, [-2, 1]*u.mas/u.yr), (180*u.deg, [-1, -2]*u.mas/u.yr), (-90*u.deg, [2, -1]*u.mas/u.yr) ]) def test_skyoffset_velocity_rotation(rotation, expectedpmlonlat): sc = SkyCoord(ra=170.9*u.deg, dec=-78.4*u.deg, pm_ra_cosdec=1*u.mas/u.yr, pm_dec=2*u.mas/u.yr) c_skyoffset0 = sc.transform_to(sc.skyoffset_frame(rotation=rotation)) assert_allclose(c_skyoffset0.pm_lon_coslat, expectedpmlonlat[0]) assert_allclose(c_skyoffset0.pm_lat, expectedpmlonlat[1])
bcf7389e56983781fe8ecf4b3ce341dff2f8aea109a6f635dfcbbeb949182246
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module contains tests for the name resolve convenience module. """ import time import urllib.request import pytest import numpy as np from astropy.coordinates.name_resolve import (get_icrs_coordinates, NameResolveError, sesame_database, _parse_response, sesame_url) from astropy.coordinates.sky_coordinate import SkyCoord from astropy import units as u _cached_ngc3642 = dict() _cached_ngc3642["simbad"] = """# NGC 3642 #Q22523669 #=S=Simbad (via url): 1 %@ 503952 %I.0 NGC 3642 %C.0 LIN %C.N0 15.15.01.00 %J 170.5750583 +59.0742417 = 11:22:18.01 +59:04:27.2 %V z 1593 0.005327 [0.000060] D 2002LEDA.........0P %D 1.673 1.657 75 (32767) (I) C 2006AJ....131.1163S %T 5 =32800000 D 2011A&A...532A..74B %#B 140 #====Done (2013-Feb-12,16:37:11z)====""" _cached_ngc3642["vizier"] = """# NGC 3642 #Q22523677 #=V=VizieR (local): 1 %J 170.56 +59.08 = 11:22.2 +59:05 %I.0 {NGC} 3642 #====Done (2013-Feb-12,16:37:42z)====""" _cached_ngc3642["all"] = """# ngc3642 #Q22523722 #=S=Simbad (via url): 1 %@ 503952 %I.0 NGC 3642 %C.0 LIN %C.N0 15.15.01.00 %J 170.5750583 +59.0742417 = 11:22:18.01 +59:04:27.2 %V z 1593 0.005327 [0.000060] D 2002LEDA.........0P %D 1.673 1.657 75 (32767) (I) C 2006AJ....131.1163S %T 5 =32800000 D 2011A&A...532A..74B %#B 140 #=V=VizieR (local): 1 %J 170.56 +59.08 = 11:22.2 +59:05 %I.0 {NGC} 3642 #!N=NED : *** Could not access the server *** #====Done (2013-Feb-12,16:39:48z)====""" _cached_castor = dict() _cached_castor["all"] = """# castor #Q22524249 #=S=Simbad (via url): 1 %@ 983633 %I.0 NAME CASTOR %C.0 ** %C.N0 12.13.00.00 %J 113.649471640 +31.888282216 = 07:34:35.87 +31:53:17.8 %J.E [34.72 25.95 0] A 2007A&A...474..653V %P -191.45 -145.19 [3.95 2.95 0] A 2007A&A...474..653V %X 64.12 [3.75] A 2007A&A...474..653V %S A1V+A2Vm =0.0000D200.0030.0110000000100000 C 2001AJ....122.3466M %#B 179 #!V=VizieR (local): No table found for: castor #!N=NED: ****object name not recognized by NED name interpreter #!N=NED: ***Not recognized by NED: castor #====Done (2013-Feb-12,16:52:02z)====""" _cached_castor["simbad"] = """# castor #Q22524495 #=S=Simbad (via url): 1 %@ 983633 %I.0 NAME CASTOR %C.0 ** %C.N0 12.13.00.00 %J 113.649471640 +31.888282216 = 07:34:35.87 +31:53:17.8 %J.E [34.72 25.95 0] A 2007A&A...474..653V %P -191.45 -145.19 [3.95 2.95 0] A 2007A&A...474..653V %X 64.12 [3.75] A 2007A&A...474..653V %S A1V+A2Vm =0.0000D200.0030.0110000000100000 C 2001AJ....122.3466M %#B 179 #====Done (2013-Feb-12,17:00:39z)====""" @pytest.mark.remote_data def test_names(): # First check that sesame is up if urllib.request.urlopen("http://cdsweb.u-strasbg.fr/cgi-bin/nph-sesame").getcode() != 200: pytest.skip("SESAME appears to be down, skipping test_name_resolve.py:test_names()...") with pytest.raises(NameResolveError): get_icrs_coordinates("m87h34hhh") try: icrs = get_icrs_coordinates("NGC 3642") except NameResolveError: ra, dec = _parse_response(_cached_ngc3642["all"]) icrs = SkyCoord(ra=float(ra)*u.degree, dec=float(dec)*u.degree) icrs_true = SkyCoord(ra="11h 22m 18.014s", dec="59d 04m 27.27s") # use precision of only 1 decimal here and below because the result can # change due to Sesame server-side changes. np.testing.assert_almost_equal(icrs.ra.degree, icrs_true.ra.degree, 1) np.testing.assert_almost_equal(icrs.dec.degree, icrs_true.dec.degree, 1) try: icrs = get_icrs_coordinates("castor") except NameResolveError: ra, dec = _parse_response(_cached_castor["all"]) icrs = SkyCoord(ra=float(ra)*u.degree, dec=float(dec)*u.degree) icrs_true = SkyCoord(ra="07h 34m 35.87s", dec="+31d 53m 17.8s") np.testing.assert_almost_equal(icrs.ra.degree, icrs_true.ra.degree, 1) np.testing.assert_almost_equal(icrs.dec.degree, icrs_true.dec.degree, 1) def test_names_parse(): # a few test cases for parsing embedded coordinates from object name test_names = ['CRTS SSS100805 J194428-420209', 'MASTER OT J061451.7-272535.5', '2MASS J06495091-0737408', '1RXS J042555.8-194534', 'SDSS J132411.57+032050.5', 'DENIS-P J203137.5-000511', '2QZ J142438.9-022739', 'CXOU J141312.3-652013'] for name in test_names: sc = get_icrs_coordinates(name, parse=True) @pytest.mark.remote_data @pytest.mark.parametrize(("name", "db_dict"), [('NGC 3642', _cached_ngc3642), ('castor', _cached_castor)]) def test_database_specify(name, db_dict): # First check that at least some sesame mirror is up for url in sesame_url.get(): if urllib.request.urlopen(url).getcode() == 200: break else: pytest.skip("All SESAME mirrors appear to be down, skipping " "test_name_resolve.py:test_database_specify()...") for db in db_dict.keys(): with sesame_database.set(db): icrs = SkyCoord.from_name(name) time.sleep(1)
6823c6e0a3cf9fc1a0db6561df99e8420d3e87b6c329cd31bcca0708a4938a8d
import pickle import pytest import numpy as np from astropy.coordinates import Longitude from astropy import coordinates as coord from astropy.tests.helper import pickle_protocol, check_pickling_recovery # noqa # Can't test distances without scipy due to cosmology deps try: import scipy # pylint: disable=W0611 HAS_SCIPY = True except ImportError: HAS_SCIPY = False def test_basic(): lon1 = Longitude(1.23, "radian", wrap_angle='180d') s = pickle.dumps(lon1) lon2 = pickle.loads(s) def test_pickle_longitude_wrap_angle(): a = Longitude(1.23, "radian", wrap_angle='180d') s = pickle.dumps(a) b = pickle.loads(s) assert a.rad == b.rad assert a.wrap_angle == b.wrap_angle _names = [coord.Angle, coord.Distance, coord.DynamicMatrixTransform, coord.ICRS, coord.Latitude, coord.Longitude, coord.StaticMatrixTransform, ] _xfail = [False, not HAS_SCIPY, True, True, False, True, False] _args = [[0.0], [], [lambda *args: np.identity(3), coord.ICRS, coord.ICRS], [0, 0], [0], [0], [np.identity(3), coord.ICRS, coord.ICRS], ] _kwargs = [{'unit': 'radian'}, {'z': 0.23}, {}, {'unit': ['radian', 'radian']}, {'unit': 'radian'}, {'unit': 'radian'}, {}, ] @pytest.mark.parametrize(("name", "args", "kwargs", "xfail"), zip(_names, _args, _kwargs, _xfail)) def test_simple_object(pickle_protocol, name, args, kwargs, xfail): # Tests easily instantiated objects if xfail: pytest.xfail() original = name(*args, **kwargs) check_pickling_recovery(original, pickle_protocol)
3ca12f052a4bfb3d8e7f8596f9a32a51ffc6014aace7e296b7e310b7a4122339
import pytest import warnings # autouse makes this an all-coordinates-tests fixture # this can be eliminated if/when warnings in pytest are all turned to errors (gh issue #7928) @pytest.fixture(autouse=True) def representation_deprecation_to_error(): warnings.filterwarnings('error', 'The `representation` keyword/property name is deprecated in favor of `representation_type`') filt = warnings.filters[0] yield try: warnings.filters.remove(filt) except ValueError: pass # already removed
a0a49e3ff03eb8a07561b69ffa6cc4e493a9c846944aadff1f3dc8a278d50107
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst import pytest import numpy as np from astropy import units as u from astropy.coordinates.distances import Distance from astropy.coordinates.builtin_frames import (ICRS, FK5, FK4, FK4NoETerms, Galactic, Supergalactic, Galactocentric, HCRS, GCRS, LSR) from astropy.coordinates import SkyCoord from astropy.tests.helper import assert_quantity_allclose as assert_allclose from astropy.coordinates import EarthLocation, CartesianRepresentation from astropy.time import Time from astropy.units import allclose # used below in the next parametrized test m31_sys = [ICRS, FK5, FK4, Galactic] m31_coo = [(10.6847929, 41.2690650), (10.6847929, 41.2690650), (10.0004738, 40.9952444), (121.1744050, -21.5729360)] m31_dist = Distance(770, u.kpc) convert_precision = 1 * u.arcsec roundtrip_precision = 1e-4 * u.degree dist_precision = 1e-9 * u.kpc m31_params = [] for i in range(len(m31_sys)): for j in range(len(m31_sys)): if i < j: m31_params.append((m31_sys[i], m31_sys[j], m31_coo[i], m31_coo[j])) @pytest.mark.parametrize(('fromsys', 'tosys', 'fromcoo', 'tocoo'), m31_params) def test_m31_coord_transforms(fromsys, tosys, fromcoo, tocoo): """ This tests a variety of coordinate conversions for the Chandra point-source catalog location of M31 from NED. """ coo1 = fromsys(ra=fromcoo[0]*u.deg, dec=fromcoo[1]*u.deg, distance=m31_dist) coo2 = coo1.transform_to(tosys) if tosys is FK4: coo2_prec = coo2.transform_to(FK4(equinox=Time('B1950'))) assert (coo2_prec.spherical.lon - tocoo[0]*u.deg) < convert_precision # <1 arcsec assert (coo2_prec.spherical.lat - tocoo[1]*u.deg) < convert_precision else: assert (coo2.spherical.lon - tocoo[0]*u.deg) < convert_precision # <1 arcsec assert (coo2.spherical.lat - tocoo[1]*u.deg) < convert_precision assert coo1.distance.unit == u.kpc assert coo2.distance.unit == u.kpc assert m31_dist.unit == u.kpc assert (coo2.distance - m31_dist) < dist_precision # check round-tripping coo1_2 = coo2.transform_to(fromsys) assert (coo1_2.spherical.lon - fromcoo[0]*u.deg) < roundtrip_precision assert (coo1_2.spherical.lat - fromcoo[1]*u.deg) < roundtrip_precision assert (coo1_2.distance - m31_dist) < dist_precision def test_precession(): """ Ensures that FK4 and FK5 coordinates precess their equinoxes """ j2000 = Time('J2000') b1950 = Time('B1950') j1975 = Time('J1975') b1975 = Time('B1975') fk4 = FK4(ra=1*u.radian, dec=0.5*u.radian) assert fk4.equinox.byear == b1950.byear fk4_2 = fk4.transform_to(FK4(equinox=b1975)) assert fk4_2.equinox.byear == b1975.byear fk5 = FK5(ra=1*u.radian, dec=0.5*u.radian) assert fk5.equinox.jyear == j2000.jyear fk5_2 = fk5.transform_to(FK4(equinox=j1975)) assert fk5_2.equinox.jyear == j1975.jyear def test_fk5_galactic(): """ Check that FK5 -> Galactic gives the same as FK5 -> FK4 -> Galactic. """ fk5 = FK5(ra=1*u.deg, dec=2*u.deg) direct = fk5.transform_to(Galactic) indirect = fk5.transform_to(FK4).transform_to(Galactic) assert direct.separation(indirect).degree < 1.e-10 direct = fk5.transform_to(Galactic) indirect = fk5.transform_to(FK4NoETerms).transform_to(Galactic) assert direct.separation(indirect).degree < 1.e-10 def test_galactocentric(): # when z_sun=0, transformation should be very similar to Galactic icrs_coord = ICRS(ra=np.linspace(0, 360, 10)*u.deg, dec=np.linspace(-90, 90, 10)*u.deg, distance=1.*u.kpc) g_xyz = icrs_coord.transform_to(Galactic).cartesian.xyz gc_xyz = icrs_coord.transform_to(Galactocentric(z_sun=0*u.kpc)).cartesian.xyz diff = np.abs(g_xyz - gc_xyz) assert allclose(diff[0], 8.3*u.kpc, atol=1E-5*u.kpc) assert allclose(diff[1:], 0*u.kpc, atol=1E-5*u.kpc) # generate some test coordinates g = Galactic(l=[0, 0, 45, 315]*u.deg, b=[-45, 45, 0, 0]*u.deg, distance=[np.sqrt(2)]*4*u.kpc) xyz = g.transform_to(Galactocentric(galcen_distance=1.*u.kpc, z_sun=0.*u.pc)).cartesian.xyz true_xyz = np.array([[0, 0, -1.], [0, 0, 1], [0, 1, 0], [0, -1, 0]]).T*u.kpc assert allclose(xyz.to(u.kpc), true_xyz.to(u.kpc), atol=1E-5*u.kpc) # check that ND arrays work # from Galactocentric to Galactic x = np.linspace(-10., 10., 100) * u.kpc y = np.linspace(-10., 10., 100) * u.kpc z = np.zeros_like(x) g1 = Galactocentric(x=x, y=y, z=z) g2 = Galactocentric(x=x.reshape(100, 1, 1), y=y.reshape(100, 1, 1), z=z.reshape(100, 1, 1)) g1t = g1.transform_to(Galactic) g2t = g2.transform_to(Galactic) assert_allclose(g1t.cartesian.xyz, g2t.cartesian.xyz[:, :, 0, 0]) # from Galactic to Galactocentric l = np.linspace(15, 30., 100) * u.deg b = np.linspace(-10., 10., 100) * u.deg d = np.ones_like(l.value) * u.kpc g1 = Galactic(l=l, b=b, distance=d) g2 = Galactic(l=l.reshape(100, 1, 1), b=b.reshape(100, 1, 1), distance=d.reshape(100, 1, 1)) g1t = g1.transform_to(Galactocentric) g2t = g2.transform_to(Galactocentric) np.testing.assert_almost_equal(g1t.cartesian.xyz.value, g2t.cartesian.xyz.value[:, :, 0, 0]) def test_supergalactic(): """ Check Galactic<->Supergalactic and Galactic<->ICRS conversion. """ # Check supergalactic North pole. npole = Galactic(l=47.37*u.degree, b=+6.32*u.degree) assert allclose(npole.transform_to(Supergalactic).sgb.deg, +90, atol=1e-9) # Check the origin of supergalactic longitude. lon0 = Supergalactic(sgl=0*u.degree, sgb=0*u.degree) lon0_gal = lon0.transform_to(Galactic) assert allclose(lon0_gal.l.deg, 137.37, atol=1e-9) assert allclose(lon0_gal.b.deg, 0, atol=1e-9) # Test Galactic<->ICRS with some positions that appear in Foley et al. 2008 # (http://adsabs.harvard.edu/abs/2008A%26A...484..143F) # GRB 021219 supergalactic = Supergalactic(sgl=29.91*u.degree, sgb=+73.72*u.degree) icrs = SkyCoord('18h50m27s +31d57m17s') assert supergalactic.separation(icrs) < 0.005 * u.degree # GRB 030320 supergalactic = Supergalactic(sgl=-174.44*u.degree, sgb=+46.17*u.degree) icrs = SkyCoord('17h51m36s -25d18m52s') assert supergalactic.separation(icrs) < 0.005 * u.degree class TestHCRS(): """ Check HCRS<->ICRS coordinate conversions. Uses ICRS Solar positions predicted by get_body_barycentric; with `t1` and `tarr` as defined below, the ICRS Solar positions were predicted using, e.g. coord.ICRS(coord.get_body_barycentric(tarr, 'sun')). """ def setup(self): self.t1 = Time("2013-02-02T23:00") self.t2 = Time("2013-08-02T23:00") self.tarr = Time(["2013-02-02T23:00", "2013-08-02T23:00"]) self.sun_icrs_scalar = ICRS(ra=244.52984668*u.deg, dec=-22.36943723*u.deg, distance=406615.66347377*u.km) # array of positions corresponds to times in `tarr` self.sun_icrs_arr = ICRS(ra=[244.52989062, 271.40976248]*u.deg, dec=[-22.36943605, -25.07431079]*u.deg, distance=[406615.66347377, 375484.13558956]*u.km) # corresponding HCRS positions self.sun_hcrs_t1 = HCRS(CartesianRepresentation([0.0, 0.0, 0.0] * u.km), obstime=self.t1) twod_rep = CartesianRepresentation([[0.0, 0.0], [0.0, 0.0], [0.0, 0.0]] * u.km) self.sun_hcrs_tarr = HCRS(twod_rep, obstime=self.tarr) self.tolerance = 5*u.km def test_from_hcrs(self): # test scalar transform transformed = self.sun_hcrs_t1.transform_to(ICRS()) separation = transformed.separation_3d(self.sun_icrs_scalar) assert_allclose(separation, 0*u.km, atol=self.tolerance) # test non-scalar positions and times transformed = self.sun_hcrs_tarr.transform_to(ICRS()) separation = transformed.separation_3d(self.sun_icrs_arr) assert_allclose(separation, 0*u.km, atol=self.tolerance) def test_from_icrs(self): # scalar positions transformed = self.sun_icrs_scalar.transform_to(HCRS(obstime=self.t1)) separation = transformed.separation_3d(self.sun_hcrs_t1) assert_allclose(separation, 0*u.km, atol=self.tolerance) # nonscalar positions transformed = self.sun_icrs_arr.transform_to(HCRS(obstime=self.tarr)) separation = transformed.separation_3d(self.sun_hcrs_tarr) assert_allclose(separation, 0*u.km, atol=self.tolerance) class TestHelioBaryCentric(): """ Check GCRS<->Heliocentric and Barycentric coordinate conversions. Uses the WHT observing site (information grabbed from data/sites.json). """ def setup(self): wht = EarthLocation(342.12*u.deg, 28.758333333333333*u.deg, 2327*u.m) self.obstime = Time("2013-02-02T23:00") self.wht_itrs = wht.get_itrs(obstime=self.obstime) @pytest.mark.remote_data def test_heliocentric(self): gcrs = self.wht_itrs.transform_to(GCRS(obstime=self.obstime)) helio = gcrs.transform_to(HCRS(obstime=self.obstime)) # Check it doesn't change from previous times. previous = [-1.02597256e+11, 9.71725820e+10, 4.21268419e+10] * u.m assert_allclose(helio.cartesian.xyz, previous) # And that it agrees with SLALIB to within 14km helio_slalib = [-0.685820296, 0.6495585893, 0.2816005464] * u.au assert np.sqrt(((helio.cartesian.xyz - helio_slalib)**2).sum()) < 14. * u.km @pytest.mark.remote_data def test_barycentric(self): gcrs = self.wht_itrs.transform_to(GCRS(obstime=self.obstime)) bary = gcrs.transform_to(ICRS()) previous = [-1.02758958e+11, 9.68331109e+10, 4.19720938e+10] * u.m assert_allclose(bary.cartesian.xyz, previous) # And that it agrees with SLALIB answer to within 14km bary_slalib = [-0.6869012079, 0.6472893646, 0.2805661191] * u.au assert np.sqrt(((bary.cartesian.xyz - bary_slalib)**2).sum()) < 14. * u.km def test_lsr_sanity(): # random numbers, but zero velocity in ICRS frame icrs = ICRS(ra=15.1241*u.deg, dec=17.5143*u.deg, distance=150.12*u.pc, pm_ra_cosdec=0*u.mas/u.yr, pm_dec=0*u.mas/u.yr, radial_velocity=0*u.km/u.s) lsr = icrs.transform_to(LSR) lsr_diff = lsr.data.differentials['s'] cart_lsr_vel = lsr_diff.represent_as(CartesianRepresentation, base=lsr.data) lsr_vel = ICRS(cart_lsr_vel) gal_lsr = lsr_vel.transform_to(Galactic).cartesian.xyz assert allclose(gal_lsr.to(u.km/u.s, u.dimensionless_angles()), lsr.v_bary.d_xyz) # moving with LSR velocity lsr = LSR(ra=15.1241*u.deg, dec=17.5143*u.deg, distance=150.12*u.pc, pm_ra_cosdec=0*u.mas/u.yr, pm_dec=0*u.mas/u.yr, radial_velocity=0*u.km/u.s) icrs = lsr.transform_to(ICRS) icrs_diff = icrs.data.differentials['s'] cart_vel = icrs_diff.represent_as(CartesianRepresentation, base=icrs.data) vel = ICRS(cart_vel) gal_icrs = vel.transform_to(Galactic).cartesian.xyz assert allclose(gal_icrs.to(u.km/u.s, u.dimensionless_angles()), -lsr.v_bary.d_xyz)
ef823756f472a07446c0bafe21b5269a71701c545c00352e1c903c52a2cd301e
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """Test initalization and other aspects of Angle and subclasses""" import pytest import numpy as np from numpy.testing import assert_allclose, assert_array_equal from astropy.coordinates.angles import Longitude, Latitude, Angle from astropy import units as u from astropy.coordinates.errors import (IllegalSecondError, IllegalMinuteError, IllegalHourError, IllegalSecondWarning, IllegalMinuteWarning) def test_create_angles(): """ Tests creating and accessing Angle objects """ ''' The "angle" is a fundamental object. The internal representation is stored in radians, but this is transparent to the user. Units *must* be specified rather than a default value be assumed. This is as much for self-documenting code as anything else. Angle objects simply represent a single angular coordinate. More specific angular coordinates (e.g. Longitude, Latitude) are subclasses of Angle.''' a1 = Angle(54.12412, unit=u.degree) a2 = Angle("54.12412", unit=u.degree) a3 = Angle("54:07:26.832", unit=u.degree) a4 = Angle("54.12412 deg") a5 = Angle("54.12412 degrees") a6 = Angle("54.12412°") # because we like Unicode a7 = Angle((54, 7, 26.832), unit=u.degree) a8 = Angle("54°07'26.832\"") # (deg,min,sec) *tuples* are acceptable, but lists/arrays are *not* # because of the need to eventually support arrays of coordinates a9 = Angle([54, 7, 26.832], unit=u.degree) assert_allclose(a9.value, [54, 7, 26.832]) assert a9.unit is u.degree a10 = Angle(3.60827466667, unit=u.hour) a11 = Angle("3:36:29.7888000120", unit=u.hour) a12 = Angle((3, 36, 29.7888000120), unit=u.hour) # *must* be a tuple # Regression test for #5001 a13 = Angle((3, 36, 29.7888000120), unit='hour') Angle(0.944644098745, unit=u.radian) with pytest.raises(u.UnitsError): Angle(54.12412) # raises an exception because this is ambiguous with pytest.raises(u.UnitsError): Angle(54.12412, unit=u.m) with pytest.raises(ValueError): Angle(12.34, unit="not a unit") a14 = Angle("03h36m29.7888000120") # no trailing 's', but unambiguous a15 = Angle("5h4m3s") # single digits, no decimal assert a15.unit == u.hourangle a16 = Angle("1 d") a17 = Angle("1 degree") assert a16.degree == 1 assert a17.degree == 1 a18 = Angle("54 07.4472", unit=u.degree) a19 = Angle("54:07.4472", unit=u.degree) a20 = Angle("54d07.4472m", unit=u.degree) a21 = Angle("3h36m", unit=u.hour) a22 = Angle("3.6h", unit=u.hour) a23 = Angle("- 3h", unit=u.hour) a24 = Angle("+ 3h", unit=u.hour) # ensure the above angles that should match do assert a1 == a2 == a3 == a4 == a5 == a6 == a7 == a8 == a18 == a19 == a20 assert_allclose(a1.radian, a2.radian) assert_allclose(a2.degree, a3.degree) assert_allclose(a3.radian, a4.radian) assert_allclose(a4.radian, a5.radian) assert_allclose(a5.radian, a6.radian) assert_allclose(a6.radian, a7.radian) assert_allclose(a10.degree, a11.degree) assert a11 == a12 == a13 == a14 assert a21 == a22 assert a23 == -a24 # check for illegal ranges / values with pytest.raises(IllegalSecondError): a = Angle("12 32 99", unit=u.degree) with pytest.raises(IllegalMinuteError): a = Angle("12 99 23", unit=u.degree) with pytest.raises(IllegalSecondError): a = Angle("12 32 99", unit=u.hour) with pytest.raises(IllegalMinuteError): a = Angle("12 99 23", unit=u.hour) with pytest.raises(IllegalHourError): a = Angle("99 25 51.0", unit=u.hour) with pytest.raises(ValueError): a = Angle("12 25 51.0xxx", unit=u.hour) with pytest.raises(ValueError): a = Angle("12h34321m32.2s") assert a1 is not None def test_angle_from_view(): q = np.arange(3.) * u.deg a = q.view(Angle) assert type(a) is Angle assert a.unit is q.unit assert np.all(a == q) q2 = np.arange(4) * u.m with pytest.raises(u.UnitTypeError): q2.view(Angle) def test_angle_ops(): """ Tests operations on Angle objects """ # Angles can be added and subtracted. Multiplication and division by a # scalar is also permitted. A negative operator is also valid. All of # these operate in a single dimension. Attempting to multiply or divide two # Angle objects will return a quantity. An exception will be raised if it # is attempted to store output with a non-angular unit in an Angle [#2718]. a1 = Angle(3.60827466667, unit=u.hour) a2 = Angle("54:07:26.832", unit=u.degree) a1 + a2 # creates new Angle object a1 - a2 -a1 assert_allclose((a1 * 2).hour, 2 * 3.6082746666700003) assert abs((a1 / 3.123456).hour - 3.60827466667 / 3.123456) < 1e-10 # commutativity assert (2 * a1).hour == (a1 * 2).hour a3 = Angle(a1) # makes a *copy* of the object, but identical content as a1 assert_allclose(a1.radian, a3.radian) assert a1 is not a3 a4 = abs(-a1) assert a4.radian == a1.radian a5 = Angle(5.0, unit=u.hour) assert a5 > a1 assert a5 >= a1 assert a1 < a5 assert a1 <= a5 # check operations with non-angular result give Quantity. a6 = Angle(45., u.degree) a7 = a6 * a5 assert type(a7) is u.Quantity # but those with angular result yield Angle. # (a9 is regression test for #5327) a8 = a1 + 1.*u.deg assert type(a8) is Angle a9 = 1.*u.deg + a1 assert type(a9) is Angle with pytest.raises(TypeError): a6 *= a5 with pytest.raises(TypeError): a6 *= u.m with pytest.raises(TypeError): np.sin(a6, out=a6) def test_angle_methods(): # Most methods tested as part of the Quantity tests. # A few tests here which caused problems before: #8368 a = Angle([0., 2.], 'deg') a_mean = a.mean() assert type(a_mean) is Angle assert a_mean == 1. * u.degree a_std = a.std() assert type(a_std) is Angle assert a_std == 1. * u.degree a_var = a.var() assert type(a_var) is u.Quantity assert a_var == 1. * u.degree ** 2 a_ptp = a.ptp() assert type(a_ptp) is Angle assert a_ptp == 2. * u.degree a_max = a.max() assert type(a_max) is Angle assert a_max == 2. * u.degree a_min = a.min() assert type(a_min) is Angle assert a_min == 0. * u.degree def test_angle_convert(): """ Test unit conversion of Angle objects """ angle = Angle("54.12412", unit=u.degree) assert_allclose(angle.hour, 3.60827466667) assert_allclose(angle.radian, 0.944644098745) assert_allclose(angle.degree, 54.12412) assert len(angle.hms) == 3 assert isinstance(angle.hms, tuple) assert angle.hms[0] == 3 assert angle.hms[1] == 36 assert_allclose(angle.hms[2], 29.78879999999947) # also check that the namedtuple attribute-style access works: assert angle.hms.h == 3 assert angle.hms.m == 36 assert_allclose(angle.hms.s, 29.78879999999947) assert len(angle.dms) == 3 assert isinstance(angle.dms, tuple) assert angle.dms[0] == 54 assert angle.dms[1] == 7 assert_allclose(angle.dms[2], 26.831999999992036) # also check that the namedtuple attribute-style access works: assert angle.dms.d == 54 assert angle.dms.m == 7 assert_allclose(angle.dms.s, 26.831999999992036) assert isinstance(angle.dms[0], float) assert isinstance(angle.hms[0], float) # now make sure dms and signed_dms work right for negative angles negangle = Angle("-54.12412", unit=u.degree) assert negangle.dms.d == -54 assert negangle.dms.m == -7 assert_allclose(negangle.dms.s, -26.831999999992036) assert negangle.signed_dms.sign == -1 assert negangle.signed_dms.d == 54 assert negangle.signed_dms.m == 7 assert_allclose(negangle.signed_dms.s, 26.831999999992036) def test_angle_formatting(): """ Tests string formatting for Angle objects """ ''' The string method of Angle has this signature: def string(self, unit=DEGREE, decimal=False, sep=" ", precision=5, pad=False): The "decimal" parameter defaults to False since if you need to print the Angle as a decimal, there's no need to use the "format" method (see above). ''' angle = Angle("54.12412", unit=u.degree) # __str__ is the default `format` assert str(angle) == angle.to_string() res = 'Angle as HMS: 3h36m29.7888s' assert "Angle as HMS: {0}".format(angle.to_string(unit=u.hour)) == res res = 'Angle as HMS: 3:36:29.7888' assert "Angle as HMS: {0}".format(angle.to_string(unit=u.hour, sep=":")) == res res = 'Angle as HMS: 3:36:29.79' assert "Angle as HMS: {0}".format(angle.to_string(unit=u.hour, sep=":", precision=2)) == res # Note that you can provide one, two, or three separators passed as a # tuple or list res = 'Angle as HMS: 3h36m29.7888s' assert "Angle as HMS: {0}".format(angle.to_string(unit=u.hour, sep=("h", "m", "s"), precision=4)) == res res = 'Angle as HMS: 3-36|29.7888' assert "Angle as HMS: {0}".format(angle.to_string(unit=u.hour, sep=["-", "|"], precision=4)) == res res = 'Angle as HMS: 3-36-29.7888' assert "Angle as HMS: {0}".format(angle.to_string(unit=u.hour, sep="-", precision=4)) == res res = 'Angle as HMS: 03h36m29.7888s' assert "Angle as HMS: {0}".format(angle.to_string(unit=u.hour, precision=4, pad=True)) == res # Same as above, in degrees angle = Angle("3 36 29.78880", unit=u.degree) res = 'Angle as DMS: 3d36m29.7888s' assert "Angle as DMS: {0}".format(angle.to_string(unit=u.degree)) == res res = 'Angle as DMS: 3:36:29.7888' assert "Angle as DMS: {0}".format(angle.to_string(unit=u.degree, sep=":")) == res res = 'Angle as DMS: 3:36:29.79' assert "Angle as DMS: {0}".format(angle.to_string(unit=u.degree, sep=":", precision=2)) == res # Note that you can provide one, two, or three separators passed as a # tuple or list res = 'Angle as DMS: 3d36m29.7888s' assert "Angle as DMS: {0}".format(angle.to_string(unit=u.degree, sep=("d", "m", "s"), precision=4)) == res res = 'Angle as DMS: 3-36|29.7888' assert "Angle as DMS: {0}".format(angle.to_string(unit=u.degree, sep=["-", "|"], precision=4)) == res res = 'Angle as DMS: 3-36-29.7888' assert "Angle as DMS: {0}".format(angle.to_string(unit=u.degree, sep="-", precision=4)) == res res = 'Angle as DMS: 03d36m29.7888s' assert "Angle as DMS: {0}".format(angle.to_string(unit=u.degree, precision=4, pad=True)) == res res = 'Angle as rad: 0.0629763rad' assert "Angle as rad: {0}".format(angle.to_string(unit=u.radian)) == res res = 'Angle as rad decimal: 0.0629763' assert "Angle as rad decimal: {0}".format(angle.to_string(unit=u.radian, decimal=True)) == res # check negative angles angle = Angle(-1.23456789, unit=u.degree) angle2 = Angle(-1.23456789, unit=u.hour) assert angle.to_string() == '-1d14m04.4444s' assert angle.to_string(pad=True) == '-01d14m04.4444s' assert angle.to_string(unit=u.hour) == '-0h04m56.2963s' assert angle2.to_string(unit=u.hour, pad=True) == '-01h14m04.4444s' assert angle.to_string(unit=u.radian, decimal=True) == '-0.0215473' def test_to_string_vector(): # Regression test for the fact that vectorize doesn't work with Numpy 1.6 assert Angle([1./7., 1./7.], unit='deg').to_string()[0] == "0d08m34.2857s" assert Angle([1./7.], unit='deg').to_string()[0] == "0d08m34.2857s" assert Angle(1./7., unit='deg').to_string() == "0d08m34.2857s" def test_angle_format_roundtripping(): """ Ensures that the string representation of an angle can be used to create a new valid Angle. """ a1 = Angle(0, unit=u.radian) a2 = Angle(10, unit=u.degree) a3 = Angle(0.543, unit=u.degree) a4 = Angle('1d2m3.4s') assert Angle(str(a1)).degree == a1.degree assert Angle(str(a2)).degree == a2.degree assert Angle(str(a3)).degree == a3.degree assert Angle(str(a4)).degree == a4.degree # also check Longitude/Latitude ra = Longitude('1h2m3.4s') dec = Latitude('1d2m3.4s') assert_allclose(Angle(str(ra)).degree, ra.degree) assert_allclose(Angle(str(dec)).degree, dec.degree) def test_radec(): """ Tests creation/operations of Longitude and Latitude objects """ ''' Longitude and Latitude are objects that are subclassed from Angle. As with Angle, Longitude and Latitude can parse any unambiguous format (tuples, formatted strings, etc.). The intention is not to create an Angle subclass for every possible coordinate object (e.g. galactic l, galactic b). However, equatorial Longitude/Latitude are so prevalent in astronomy that it's worth creating ones for these units. They will be noted as "special" in the docs and use of the just the Angle class is to be used for other coordinate systems. ''' with pytest.raises(u.UnitsError): ra = Longitude("4:08:15.162342") # error - hours or degrees? with pytest.raises(u.UnitsError): ra = Longitude("-4:08:15.162342") # the "smart" initializer allows >24 to automatically do degrees, but the # Angle-based one does not # TODO: adjust in 0.3 for whatever behavior is decided on # ra = Longitude("26:34:15.345634") # unambiguous b/c hours don't go past 24 # assert_allclose(ra.degree, 26.570929342) with pytest.raises(u.UnitsError): ra = Longitude("26:34:15.345634") # ra = Longitude(68) with pytest.raises(u.UnitsError): ra = Longitude(68) with pytest.raises(u.UnitsError): ra = Longitude(12) with pytest.raises(ValueError): ra = Longitude("garbage containing a d and no units") ra = Longitude("12h43m23s") assert_allclose(ra.hour, 12.7230555556) ra = Longitude((56, 14, 52.52), unit=u.degree) # can accept tuples # TODO: again, fix based on >24 behavior # ra = Longitude((56,14,52.52)) with pytest.raises(u.UnitsError): ra = Longitude((56, 14, 52.52)) with pytest.raises(u.UnitsError): ra = Longitude((12, 14, 52)) # ambiguous w/o units ra = Longitude((12, 14, 52), unit=u.hour) ra = Longitude([56, 64, 52.2], unit=u.degree) # ...but not arrays (yet) # Units can be specified ra = Longitude("4:08:15.162342", unit=u.hour) # TODO: this was the "smart" initializer behavior - adjust in 0.3 appropriately # Where Longitude values are commonly found in hours or degrees, declination is # nearly always specified in degrees, so this is the default. # dec = Latitude("-41:08:15.162342") with pytest.raises(u.UnitsError): dec = Latitude("-41:08:15.162342") dec = Latitude("-41:08:15.162342", unit=u.degree) # same as above def test_negative_zero_dms(): # Test for DMS parser a = Angle('-00:00:10', u.deg) assert_allclose(a.degree, -10. / 3600.) # Unicode minus a = Angle('−00:00:10', u.deg) assert_allclose(a.degree, -10. / 3600.) def test_negative_zero_dm(): # Test for DM parser a = Angle('-00:10', u.deg) assert_allclose(a.degree, -10. / 60.) def test_negative_zero_hms(): # Test for HMS parser a = Angle('-00:00:10', u.hour) assert_allclose(a.hour, -10. / 3600.) def test_negative_zero_hm(): # Test for HM parser a = Angle('-00:10', u.hour) assert_allclose(a.hour, -10. / 60.) def test_negative_sixty_hm(): # Test for HM parser with pytest.warns(IllegalMinuteWarning): a = Angle('-00:60', u.hour) assert_allclose(a.hour, -1.) def test_plus_sixty_hm(): # Test for HM parser with pytest.warns(IllegalMinuteWarning): a = Angle('00:60', u.hour) assert_allclose(a.hour, 1.) def test_negative_fifty_nine_sixty_dms(): # Test for DMS parser with pytest.warns(IllegalSecondWarning): a = Angle('-00:59:60', u.deg) assert_allclose(a.degree, -1.) def test_plus_fifty_nine_sixty_dms(): # Test for DMS parser with pytest.warns(IllegalSecondWarning): a = Angle('+00:59:60', u.deg) assert_allclose(a.degree, 1.) def test_negative_sixty_dms(): # Test for DMS parser with pytest.warns(IllegalSecondWarning): a = Angle('-00:00:60', u.deg) assert_allclose(a.degree, -1. / 60.) def test_plus_sixty_dms(): # Test for DMS parser with pytest.warns(IllegalSecondWarning): a = Angle('+00:00:60', u.deg) assert_allclose(a.degree, 1. / 60.) def test_angle_to_is_angle(): with pytest.warns(IllegalSecondWarning): a = Angle('00:00:60', u.deg) assert isinstance(a, Angle) assert isinstance(a.to(u.rad), Angle) def test_angle_to_quantity(): with pytest.warns(IllegalSecondWarning): a = Angle('00:00:60', u.deg) q = u.Quantity(a) assert isinstance(q, u.Quantity) assert q.unit is u.deg def test_quantity_to_angle(): a = Angle(1.0*u.deg) assert isinstance(a, Angle) with pytest.raises(u.UnitsError): Angle(1.0*u.meter) a = Angle(1.0*u.hour) assert isinstance(a, Angle) assert a.unit is u.hourangle with pytest.raises(u.UnitsError): Angle(1.0*u.min) def test_angle_string(): with pytest.warns(IllegalSecondWarning): a = Angle('00:00:60', u.deg) assert str(a) == '0d01m00s' a = Angle('-00:00:10', u.hour) assert str(a) == '-0h00m10s' a = Angle(3.2, u.radian) assert str(a) == '3.2rad' a = Angle(4.2, u.microarcsecond) assert str(a) == '4.2uarcsec' a = Angle('1.0uarcsec') assert a.value == 1.0 assert a.unit == u.microarcsecond a = Angle("3d") assert_allclose(a.value, 3.0) assert a.unit == u.degree a = Angle('10"') assert_allclose(a.value, 10.0) assert a.unit == u.arcsecond a = Angle("10'") assert_allclose(a.value, 10.0) assert a.unit == u.arcminute def test_angle_repr(): assert 'Angle' in repr(Angle(0, u.deg)) assert 'Longitude' in repr(Longitude(0, u.deg)) assert 'Latitude' in repr(Latitude(0, u.deg)) a = Angle(0, u.deg) repr(a) def test_large_angle_representation(): """Test that angles above 360 degrees can be output as strings, in repr, str, and to_string. (regression test for #1413)""" a = Angle(350, u.deg) + Angle(350, u.deg) a.to_string() a.to_string(u.hourangle) repr(a) repr(a.to(u.hourangle)) str(a) str(a.to(u.hourangle)) def test_wrap_at_inplace(): a = Angle([-20, 150, 350, 360] * u.deg) out = a.wrap_at('180d', inplace=True) assert out is None assert np.all(a.degree == np.array([-20., 150., -10., 0.])) def test_latitude(): with pytest.raises(ValueError): lat = Latitude(['91d', '89d']) with pytest.raises(ValueError): lat = Latitude('-91d') lat = Latitude(['90d', '89d']) # check that one can get items assert lat[0] == 90 * u.deg assert lat[1] == 89 * u.deg # and that comparison with angles works assert np.all(lat == Angle(['90d', '89d'])) # check setitem works lat[1] = 45. * u.deg assert np.all(lat == Angle(['90d', '45d'])) # but not with values out of range with pytest.raises(ValueError): lat[0] = 90.001 * u.deg with pytest.raises(ValueError): lat[0] = -90.001 * u.deg # these should also not destroy input (#1851) assert np.all(lat == Angle(['90d', '45d'])) # conserve type on unit change (closes #1423) angle = lat.to('radian') assert type(angle) is Latitude # but not on calculations angle = lat - 190 * u.deg assert type(angle) is Angle assert angle[0] == -100 * u.deg lat = Latitude('80d') angle = lat / 2. assert type(angle) is Angle assert angle == 40 * u.deg angle = lat * 2. assert type(angle) is Angle assert angle == 160 * u.deg angle = -lat assert type(angle) is Angle assert angle == -80 * u.deg # Test errors when trying to interoperate with longitudes. with pytest.raises(TypeError) as excinfo: lon = Longitude(10, 'deg') lat = Latitude(lon) assert "A Latitude angle cannot be created from a Longitude angle" in str(excinfo) with pytest.raises(TypeError) as excinfo: lon = Longitude(10, 'deg') lat = Latitude([20], 'deg') lat[0] = lon assert "A Longitude angle cannot be assigned to a Latitude angle" in str(excinfo) # Check we can work around the Lat vs Long checks by casting explicitly to Angle. lon = Longitude(10, 'deg') lat = Latitude(Angle(lon)) assert lat.value == 10.0 # Check setitem. lon = Longitude(10, 'deg') lat = Latitude([20], 'deg') lat[0] = Angle(lon) assert lat.value[0] == 10.0 def test_longitude(): # Default wrapping at 360d with an array input lon = Longitude(['370d', '88d']) assert np.all(lon == Longitude(['10d', '88d'])) assert np.all(lon == Angle(['10d', '88d'])) # conserve type on unit change and keep wrap_angle (closes #1423) angle = lon.to('hourangle') assert type(angle) is Longitude assert angle.wrap_angle == lon.wrap_angle angle = lon[0] assert type(angle) is Longitude assert angle.wrap_angle == lon.wrap_angle angle = lon[1:] assert type(angle) is Longitude assert angle.wrap_angle == lon.wrap_angle # but not on calculations angle = lon / 2. assert np.all(angle == Angle(['5d', '44d'])) assert type(angle) is Angle assert not hasattr(angle, 'wrap_angle') angle = lon * 2. + 400 * u.deg assert np.all(angle == Angle(['420d', '576d'])) assert type(angle) is Angle # Test setting a mutable value and having it wrap lon[1] = -10 * u.deg assert np.all(lon == Angle(['10d', '350d'])) # Test wrapping and try hitting some edge cases lon = Longitude(np.array([0, 0.5, 1.0, 1.5, 2.0]) * np.pi, unit=u.radian) assert np.all(lon.degree == np.array([0., 90, 180, 270, 0])) lon = Longitude(np.array([0, 0.5, 1.0, 1.5, 2.0]) * np.pi, unit=u.radian, wrap_angle='180d') assert np.all(lon.degree == np.array([0., 90, -180, -90, 0])) # Wrap on setting wrap_angle property (also test auto-conversion of wrap_angle to an Angle) lon = Longitude(np.array([0, 0.5, 1.0, 1.5, 2.0]) * np.pi, unit=u.radian) lon.wrap_angle = '180d' assert np.all(lon.degree == np.array([0., 90, -180, -90, 0])) lon = Longitude('460d') assert lon == Angle('100d') lon.wrap_angle = '90d' assert lon == Angle('-260d') # check that if we initialize a longitude with another longitude, # wrap_angle is kept by default lon2 = Longitude(lon) assert lon2.wrap_angle == lon.wrap_angle # but not if we explicitly set it lon3 = Longitude(lon, wrap_angle='180d') assert lon3.wrap_angle == 180 * u.deg # check for problem reported in #2037 about Longitude initializing to -0 lon = Longitude(0, u.deg) lonstr = lon.to_string() assert not lonstr.startswith('-') # also make sure dtype is correctly conserved assert Longitude(0, u.deg, dtype=float).dtype == np.dtype(float) assert Longitude(0, u.deg, dtype=int).dtype == np.dtype(int) # Test errors when trying to interoperate with latitudes. with pytest.raises(TypeError) as excinfo: lat = Latitude(10, 'deg') lon = Longitude(lat) assert "A Longitude angle cannot be created from a Latitude angle" in str(excinfo) with pytest.raises(TypeError) as excinfo: lat = Latitude(10, 'deg') lon = Longitude([20], 'deg') lon[0] = lat assert "A Latitude angle cannot be assigned to a Longitude angle" in str(excinfo) # Check we can work around the Lat vs Long checks by casting explicitly to Angle. lat = Latitude(10, 'deg') lon = Longitude(Angle(lat)) assert lon.value == 10.0 # Check setitem. lat = Latitude(10, 'deg') lon = Longitude([20], 'deg') lon[0] = Angle(lat) assert lon.value[0] == 10.0 def test_wrap_at(): a = Angle([-20, 150, 350, 360] * u.deg) assert np.all(a.wrap_at(360 * u.deg).degree == np.array([340., 150., 350., 0.])) assert np.all(a.wrap_at(Angle(360, unit=u.deg)).degree == np.array([340., 150., 350., 0.])) assert np.all(a.wrap_at('360d').degree == np.array([340., 150., 350., 0.])) assert np.all(a.wrap_at('180d').degree == np.array([-20., 150., -10., 0.])) assert np.all(a.wrap_at(np.pi * u.rad).degree == np.array([-20., 150., -10., 0.])) # Test wrapping a scalar Angle a = Angle('190d') assert a.wrap_at('180d') == Angle('-170d') a = Angle(np.arange(-1000.0, 1000.0, 0.125), unit=u.deg) for wrap_angle in (270, 0.2, 0.0, 360.0, 500, -2000.125): aw = a.wrap_at(wrap_angle * u.deg) assert np.all(aw.degree >= wrap_angle - 360.0) assert np.all(aw.degree < wrap_angle) aw = a.to(u.rad).wrap_at(wrap_angle * u.deg) assert np.all(aw.degree >= wrap_angle - 360.0) assert np.all(aw.degree < wrap_angle) def test_is_within_bounds(): a = Angle([-20, 150, 350] * u.deg) assert a.is_within_bounds('0d', '360d') is False assert a.is_within_bounds(None, '360d') is True assert a.is_within_bounds(-30 * u.deg, None) is True a = Angle('-20d') assert a.is_within_bounds('0d', '360d') is False assert a.is_within_bounds(None, '360d') is True assert a.is_within_bounds(-30 * u.deg, None) is True def test_angle_mismatched_unit(): a = Angle('+6h7m8s', unit=u.degree) assert_allclose(a.value, 91.78333333333332) def test_regression_formatting_negative(): # Regression test for a bug that caused: # # >>> Angle(-1., unit='deg').to_string() # '-1d00m-0s' assert Angle(-0., unit='deg').to_string() == '-0d00m00s' assert Angle(-1., unit='deg').to_string() == '-1d00m00s' assert Angle(-0., unit='hour').to_string() == '-0h00m00s' assert Angle(-1., unit='hour').to_string() == '-1h00m00s' def test_empty_sep(): a = Angle('05h04m31.93830s') assert a.to_string(sep='', precision=2, pad=True) == '050431.94' def test_create_tuple(): """ Tests creation of an angle with a (d,m,s) or (h,m,s) tuple """ a1 = Angle((1, 30, 0), unit=u.degree) assert a1.value == 1.5 a1 = Angle((1, 30, 0), unit=u.hourangle) assert a1.value == 1.5 def test_list_of_quantities(): a1 = Angle([1*u.deg, 1*u.hourangle]) assert a1.unit == u.deg assert_allclose(a1.value, [1, 15]) a2 = Angle([1*u.hourangle, 1*u.deg], u.deg) assert a2.unit == u.deg assert_allclose(a2.value, [15, 1]) def test_multiply_divide(): # Issue #2273 a1 = Angle([1, 2, 3], u.deg) a2 = Angle([4, 5, 6], u.deg) a3 = a1 * a2 assert_allclose(a3.value, [4, 10, 18]) assert a3.unit == (u.deg * u.deg) a3 = a1 / a2 assert_allclose(a3.value, [.25, .4, .5]) assert a3.unit == u.dimensionless_unscaled def test_mixed_string_and_quantity(): a1 = Angle(['1d', 1. * u.deg]) assert_array_equal(a1.value, [1., 1.]) assert a1.unit == u.deg a2 = Angle(['1d', 1 * u.rad * np.pi, '3d']) assert_array_equal(a2.value, [1., 180., 3.]) assert a2.unit == u.deg def test_array_angle_tostring(): aobj = Angle([1, 2], u.deg) assert aobj.to_string().dtype.kind == 'U' assert np.all(aobj.to_string() == ['1d00m00s', '2d00m00s']) def test_wrap_at_without_new(): """ Regression test for subtle bugs from situations where an Angle is created via numpy channels that don't do the standard __new__ but instead depend on array_finalize to set state. Longitude is used because the bug was in its _wrap_angle not getting initialized correctly """ l1 = Longitude([1]*u.deg) l2 = Longitude([2]*u.deg) l = np.concatenate([l1, l2]) assert l._wrap_angle is not None def test__str__(): """ Check the __str__ method used in printing the Angle """ # scalar angle scangle = Angle('10.2345d') strscangle = scangle.__str__() assert strscangle == '10d14m04.2s' # non-scalar array angles arrangle = Angle(['10.2345d', '-20d']) strarrangle = arrangle.__str__() assert strarrangle == '[10d14m04.2s -20d00m00s]' # summarizing for large arrays, ... should appear bigarrangle = Angle(np.ones(10000), u.deg) assert '...' in bigarrangle.__str__() def test_repr_latex(): """ Check the _repr_latex_ method, used primarily by IPython notebooks """ # try with both scalar scangle = Angle(2.1, u.deg) rlscangle = scangle._repr_latex_() # and array angles arrangle = Angle([1, 2.1], u.deg) rlarrangle = arrangle._repr_latex_() assert rlscangle == r'$2^\circ06{}^\prime00{}^{\prime\prime}$' assert rlscangle.split('$')[1] in rlarrangle # make sure the ... appears for large arrays bigarrangle = Angle(np.ones(50000)/50000., u.deg) assert '...' in bigarrangle._repr_latex_() def test_angle_with_cds_units_enabled(): """Regression test for #5350 Especially the example in https://github.com/astropy/astropy/issues/5350#issuecomment-248770151 """ from astropy.units import cds # the problem is with the parser, so remove it temporarily from astropy.coordinates.angle_utilities import _AngleParser del _AngleParser._parser with cds.enable(): Angle('5d') del _AngleParser._parser Angle('5d')
9bd200c28f962f8dd8bf4c809d3ca4824b60b83b3b006139e4354578096cc680
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """ Tests for the projected separation stuff """ import pytest import numpy as np from astropy.tests.helper import assert_quantity_allclose as assert_allclose from astropy import units as u from astropy.coordinates.builtin_frames import ICRS, FK5, Galactic from astropy.coordinates import Angle, Distance # lon1, lat1, lon2, lat2 in degrees coords = [(1, 0, 0, 0), (0, 1, 0, 0), (0, 0, 1, 0), (0, 0, 0, 1), (0, 0, 10, 0), (0, 0, 90, 0), (0, 0, 180, 0), (0, 45, 0, -45), (0, 60, 0, -30), (-135, -15, 45, 15), (100, -89, -80, 89), (0, 0, 0, 0), (0, 0, 1. / 60., 1. / 60.)] correct_seps = [1, 1, 1, 1, 10, 90, 180, 90, 90, 180, 180, 0, 0.023570225877234643] correctness_margin = 2e-10 def test_angsep(): """ Tests that the angular separation object also behaves correctly. """ from astropy.coordinates.angle_utilities import angular_separation # check it both works with floats in radians, Quantities, or Angles for conv in (np.deg2rad, lambda x: u.Quantity(x, "deg"), lambda x: Angle(x, "deg")): for (lon1, lat1, lon2, lat2), corrsep in zip(coords, correct_seps): angsep = angular_separation(conv(lon1), conv(lat1), conv(lon2), conv(lat2)) assert np.fabs(angsep - conv(corrsep)) < conv(correctness_margin) def test_fk5_seps(): """ This tests if `separation` works for FK5 objects. This is a regression test for github issue #891 """ a = FK5(1.*u.deg, 1.*u.deg) b = FK5(2.*u.deg, 2.*u.deg) a.separation(b) def test_proj_separations(): """ Test angular separation functionality """ c1 = ICRS(ra=0*u.deg, dec=0*u.deg) c2 = ICRS(ra=0*u.deg, dec=1*u.deg) sep = c2.separation(c1) # returns an Angle object assert isinstance(sep, Angle) assert sep.degree == 1 assert_allclose(sep.arcminute, 60.) # these operations have ambiguous interpretations for points on a sphere with pytest.raises(TypeError): c1 + c2 with pytest.raises(TypeError): c1 - c2 ngp = Galactic(l=0*u.degree, b=90*u.degree) ncp = ICRS(ra=0*u.degree, dec=90*u.degree) # if there is a defined conversion between the relevant coordinate systems, # it will be automatically performed to get the right angular separation assert_allclose(ncp.separation(ngp.transform_to(ICRS)).degree, ncp.separation(ngp).degree) # distance from the north galactic pole to celestial pole assert_allclose(ncp.separation(ngp.transform_to(ICRS)).degree, 62.87174758503201) def test_3d_separations(): """ Test 3D separation functionality """ c1 = ICRS(ra=1*u.deg, dec=1*u.deg, distance=9*u.kpc) c2 = ICRS(ra=1*u.deg, dec=1*u.deg, distance=10*u.kpc) sep3d = c2.separation_3d(c1) assert isinstance(sep3d, Distance) assert_allclose(sep3d - 1*u.kpc, 0*u.kpc, atol=1e-12*u.kpc)
d3d32f6a21a48891f0aa4b0797a36a671addb1b75ff7277a77ef5ed3625fe164
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """ This is the APE5 coordinates API document re-written to work as a series of test functions. Note that new tests for coordinates functionality should generally *not* be added to this file - instead, add them to other appropriate test modules in this package, like ``test_sky_coord.py``, ``test_frames.py``, or ``test_representation.py``. This file is instead meant mainly to keep track of deviations from the original APE5 plan. """ import pytest import numpy as np from numpy import testing as npt from astropy.tests.helper import raises, assert_quantity_allclose as assert_allclose from astropy import units as u from astropy import time from astropy import coordinates as coords from astropy.units import allclose try: import scipy # pylint: disable=W0611 except ImportError: HAS_SCIPY = False else: HAS_SCIPY = True def test_representations_api(): from astropy.coordinates.representation import SphericalRepresentation, \ UnitSphericalRepresentation, PhysicsSphericalRepresentation, \ CartesianRepresentation from astropy.coordinates import Angle, Longitude, Latitude, Distance # <-----------------Classes for representation of coordinate data--------------> # These classes inherit from a common base class and internally contain Quantity # objects, which are arrays (although they may act as scalars, like numpy's # length-0 "arrays") # They can be initialized with a variety of ways that make intuitive sense. # Distance is optional. UnitSphericalRepresentation(lon=8*u.hour, lat=5*u.deg) UnitSphericalRepresentation(lon=8*u.hourangle, lat=5*u.deg) SphericalRepresentation(lon=8*u.hourangle, lat=5*u.deg, distance=10*u.kpc) # In the initial implementation, the lat/lon/distance arguments to the # initializer must be in order. A *possible* future change will be to allow # smarter guessing of the order. E.g. `Latitude` and `Longitude` objects can be # given in any order. UnitSphericalRepresentation(Longitude(8, u.hour), Latitude(5, u.deg)) SphericalRepresentation(Longitude(8, u.hour), Latitude(5, u.deg), Distance(10, u.kpc)) # Arrays of any of the inputs are fine UnitSphericalRepresentation(lon=[8, 9]*u.hourangle, lat=[5, 6]*u.deg) # Default is to copy arrays, but optionally, it can be a reference UnitSphericalRepresentation(lon=[8, 9]*u.hourangle, lat=[5, 6]*u.deg, copy=False) # strings are parsed by `Latitude` and `Longitude` constructors, so no need to # implement parsing in the Representation classes UnitSphericalRepresentation(lon=Angle('2h6m3.3s'), lat=Angle('0.1rad')) # Or, you can give `Quantity`s with keywords, and they will be internally # converted to Angle/Distance c1 = SphericalRepresentation(lon=8*u.hourangle, lat=5*u.deg, distance=10*u.kpc) # Can also give another representation object with the `reprobj` keyword. c2 = SphericalRepresentation.from_representation(c1) # distance, lat, and lon typically will just match in shape SphericalRepresentation(lon=[8, 9]*u.hourangle, lat=[5, 6]*u.deg, distance=[10, 11]*u.kpc) # if the inputs are not the same, if possible they will be broadcast following # numpy's standard broadcasting rules. c2 = SphericalRepresentation(lon=[8, 9]*u.hourangle, lat=[5, 6]*u.deg, distance=10*u.kpc) assert len(c2.distance) == 2 # when they can't be broadcast, it is a ValueError (same as Numpy) with raises(ValueError): c2 = UnitSphericalRepresentation(lon=[8, 9, 10]*u.hourangle, lat=[5, 6]*u.deg) # It's also possible to pass in scalar quantity lists with mixed units. These # are converted to array quantities following the same rule as `Quantity`: all # elements are converted to match the first element's units. c2 = UnitSphericalRepresentation(lon=Angle([8*u.hourangle, 135*u.deg]), lat=Angle([5*u.deg, (6*np.pi/180)*u.rad])) assert c2.lat.unit == u.deg and c2.lon.unit == u.hourangle npt.assert_almost_equal(c2.lon[1].value, 9) # The Quantity initializer itself can also be used to force the unit even if the # first element doesn't have the right unit lon = u.Quantity([120*u.deg, 135*u.deg], u.hourangle) lat = u.Quantity([(5*np.pi/180)*u.rad, 0.4*u.hourangle], u.deg) c2 = UnitSphericalRepresentation(lon, lat) # regardless of how input, the `lat` and `lon` come out as angle/distance assert isinstance(c1.lat, Angle) assert isinstance(c1.lat, Latitude) # `Latitude` is an `Angle` subclass assert isinstance(c1.distance, Distance) # but they are read-only, as representations are immutable once created with raises(AttributeError): c1.lat = Latitude(5, u.deg) # Note that it is still possible to modify the array in-place, but this is not # sanctioned by the API, as this would prevent things like caching. c2.lat[:] = [0] * u.deg # possible, but NOT SUPPORTED # To address the fact that there are various other conventions for how spherical # coordinates are defined, other conventions can be included as new classes. # Later there may be other conventions that we implement - for now just the # physics convention, as it is one of the most common cases. c3 = PhysicsSphericalRepresentation(phi=120*u.deg, theta=85*u.deg, r=3*u.kpc) # first dimension must be length-3 if a lone `Quantity` is passed in. c1 = CartesianRepresentation(np.random.randn(3, 100) * u.kpc) assert c1.xyz.shape[0] == 3 assert c1.xyz.unit == u.kpc assert c1.x.shape[0] == 100 assert c1.y.shape[0] == 100 assert c1.z.shape[0] == 100 # can also give each as separate keywords CartesianRepresentation(x=np.random.randn(100)*u.kpc, y=np.random.randn(100)*u.kpc, z=np.random.randn(100)*u.kpc) # if the units don't match but are all distances, they will automatically be # converted to match `x` xarr, yarr, zarr = np.random.randn(3, 100) c1 = CartesianRepresentation(x=xarr*u.kpc, y=yarr*u.kpc, z=zarr*u.kpc) c2 = CartesianRepresentation(x=xarr*u.kpc, y=yarr*u.kpc, z=zarr*u.pc) assert c1.xyz.unit == c2.xyz.unit == u.kpc assert_allclose((c1.z / 1000) - c2.z, 0*u.kpc, atol=1e-10*u.kpc) # representations convert into other representations via `represent_as` srep = SphericalRepresentation(lon=90*u.deg, lat=0*u.deg, distance=1*u.pc) crep = srep.represent_as(CartesianRepresentation) assert_allclose(crep.x, 0*u.pc, atol=1e-10*u.pc) assert_allclose(crep.y, 1*u.pc, atol=1e-10*u.pc) assert_allclose(crep.z, 0*u.pc, atol=1e-10*u.pc) # The functions that actually do the conversion are defined via methods on the # representation classes. This may later be expanded into a full registerable # transform graph like the coordinate frames, but initially it will be a simpler # method system def test_frame_api(): from astropy.coordinates.representation import SphericalRepresentation, \ UnitSphericalRepresentation from astropy.coordinates.builtin_frames import ICRS, FK5 # <--------------------Reference Frame/"Low-level" classes---------------------> # The low-level classes have a dual role: they act as specifiers of coordinate # frames and they *may* also contain data as one of the representation objects, # in which case they are the actual coordinate objects themselves. # They can always accept a representation as a first argument icrs = ICRS(UnitSphericalRepresentation(lon=8*u.hour, lat=5*u.deg)) # which is stored as the `data` attribute assert icrs.data.lat == 5*u.deg assert icrs.data.lon == 8*u.hourangle # Frames that require additional information like equinoxs or obstimes get them # as keyword parameters to the frame constructor. Where sensible, defaults are # used. E.g., FK5 is almost always J2000 equinox fk5 = FK5(UnitSphericalRepresentation(lon=8*u.hour, lat=5*u.deg)) J2000 = time.Time('J2000') fk5_2000 = FK5(UnitSphericalRepresentation(lon=8*u.hour, lat=5*u.deg), equinox=J2000) assert fk5.equinox == fk5_2000.equinox # the information required to specify the frame is immutable J2001 = time.Time('J2001') with raises(AttributeError): fk5.equinox = J2001 # Similar for the representation data. with raises(AttributeError): fk5.data = UnitSphericalRepresentation(lon=8*u.hour, lat=5*u.deg) # There is also a class-level attribute that lists the attributes needed to # identify the frame. These include attributes like `equinox` shown above. assert all(nm in ('equinox', 'obstime') for nm in fk5.get_frame_attr_names()) # the result of `get_frame_attr_names` is called for particularly in the # high-level class (discussed below) to allow round-tripping between various # frames. It is also part of the public API for other similar developer / # advanced users' use. # The actual position information is accessed via the representation objects assert_allclose(icrs.represent_as(SphericalRepresentation).lat, 5*u.deg) # shorthand for the above assert_allclose(icrs.spherical.lat, 5*u.deg) assert icrs.cartesian.z.value > 0 # Many frames have a "default" representation, the one in which they are # conventionally described, often with a special name for some of the # coordinates. E.g., most equatorial coordinate systems are spherical with RA and # Dec. This works simply as a shorthand for the longer form above assert_allclose(icrs.dec, 5*u.deg) assert_allclose(fk5.ra, 8*u.hourangle) assert icrs.representation_type == SphericalRepresentation # low-level classes can also be initialized with names valid for that representation # and frame: icrs_2 = ICRS(ra=8*u.hour, dec=5*u.deg, distance=1*u.kpc) assert_allclose(icrs.ra, icrs_2.ra) # and these are taken as the default if keywords are not given: # icrs_nokwarg = ICRS(8*u.hour, 5*u.deg, distance=1*u.kpc) # assert icrs_nokwarg.ra == icrs_2.ra and icrs_nokwarg.dec == icrs_2.dec # they also are capable of computing on-sky or 3d separations from each other, # which will be a direct port of the existing methods: coo1 = ICRS(ra=0*u.hour, dec=0*u.deg) coo2 = ICRS(ra=0*u.hour, dec=1*u.deg) # `separation` is the on-sky separation assert coo1.separation(coo2).degree == 1.0 # while `separation_3d` includes the 3D distance information coo3 = ICRS(ra=0*u.hour, dec=0*u.deg, distance=1*u.kpc) coo4 = ICRS(ra=0*u.hour, dec=0*u.deg, distance=2*u.kpc) assert coo3.separation_3d(coo4).kpc == 1.0 # The next example fails because `coo1` and `coo2` don't have distances with raises(ValueError): assert coo1.separation_3d(coo2).kpc == 1.0 # repr/str also shows info, with frame and data # assert repr(fk5) == '' def test_transform_api(): from astropy.coordinates.representation import UnitSphericalRepresentation from astropy.coordinates.builtin_frames import ICRS, FK5 from astropy.coordinates.baseframe import frame_transform_graph, BaseCoordinateFrame from astropy.coordinates.transformations import DynamicMatrixTransform # <------------------------Transformations-------------------------------------> # Transformation functionality is the key to the whole scheme: they transform # low-level classes from one frame to another. # (used below but defined above in the API) fk5 = FK5(ra=8*u.hour, dec=5*u.deg) # If no data (or `None`) is given, the class acts as a specifier of a frame, but # without any stored data. J2001 = time.Time('J2001') fk5_J2001_frame = FK5(equinox=J2001) # if they do not have data, the string instead is the frame specification assert repr(fk5_J2001_frame) == "<FK5 Frame (equinox=J2001.000)>" # Note that, although a frame object is immutable and can't have data added, it # can be used to create a new object that does have data by giving the # `realize_frame` method a representation: srep = UnitSphericalRepresentation(lon=8*u.hour, lat=5*u.deg) fk5_j2001_with_data = fk5_J2001_frame.realize_frame(srep) assert fk5_j2001_with_data.data is not None # Now `fk5_j2001_with_data` is in the same frame as `fk5_J2001_frame`, but it # is an actual low-level coordinate, rather than a frame without data. # These frames are primarily useful for specifying what a coordinate should be # transformed *into*, as they are used by the `transform_to` method # E.g., this snippet precesses the point to the new equinox newfk5 = fk5.transform_to(fk5_J2001_frame) assert newfk5.equinox == J2001 # classes can also be given to `transform_to`, which then uses the defaults for # the frame information: samefk5 = fk5.transform_to(FK5) # `fk5` was initialized using default `obstime` and `equinox`, so: assert_allclose(samefk5.ra, fk5.ra, atol=1e-10*u.deg) assert_allclose(samefk5.dec, fk5.dec, atol=1e-10*u.deg) # transforming to a new frame necessarily loses framespec information if that # information is not applicable to the new frame. This means transforms are not # always round-trippable: fk5_2 = FK5(ra=8*u.hour, dec=5*u.deg, equinox=J2001) ic_trans = fk5_2.transform_to(ICRS) # `ic_trans` does not have an `equinox`, so now when we transform back to FK5, # it's a *different* RA and Dec fk5_trans = ic_trans.transform_to(FK5) assert not allclose(fk5_2.ra, fk5_trans.ra, rtol=0, atol=1e-10*u.deg) # But if you explicitly give the right equinox, all is fine fk5_trans_2 = fk5_2.transform_to(FK5(equinox=J2001)) assert_allclose(fk5_2.ra, fk5_trans_2.ra, rtol=0, atol=1e-10*u.deg) # Trying to transforming a frame with no data is of course an error: with raises(ValueError): FK5(equinox=J2001).transform_to(ICRS) # To actually define a new transformation, the same scheme as in the # 0.2/0.3 coordinates framework can be re-used - a graph of transform functions # connecting various coordinate classes together. The main changes are: # 1) The transform functions now get the frame object they are transforming the # current data into. # 2) Frames with additional information need to have a way to transform between # objects of the same class, but with different framespecinfo values # An example transform function: class SomeNewSystem(BaseCoordinateFrame): pass @frame_transform_graph.transform(DynamicMatrixTransform, SomeNewSystem, FK5) def new_to_fk5(newobj, fk5frame): ot = newobj.obstime eq = fk5frame.equinox # ... build a *cartesian* transform matrix using `eq` that transforms from # the `newobj` frame as observed at `ot` to FK5 an equinox `eq` matrix = np.eye(3) return matrix # Other options for transform functions include one that simply returns the new # coordinate object, and one that returns a cartesian matrix but does *not* # require `newobj` or `fk5frame` - this allows optimization of the transform. def test_highlevel_api(): J2001 = time.Time('J2001') # <--------------------------"High-level" class--------------------------------> # The "high-level" class is intended to wrap the lower-level classes in such a # way that they can be round-tripped, as well as providing a variety of # convenience functionality. This document is not intended to show *all* of the # possible high-level functionality, rather how the high-level classes are # initialized and interact with the low-level classes # this creates an object that contains an `ICRS` low-level class, initialized # identically to the first ICRS example further up. sc = coords.SkyCoord(coords.SphericalRepresentation(lon=8 * u.hour, lat=5 * u.deg, distance=1 * u.kpc), frame='icrs') # Other representations and `system` keywords delegate to the appropriate # low-level class. The already-existing registry for user-defined coordinates # will be used by `SkyCoordinate` to figure out what various the `system` # keyword actually means. sc = coords.SkyCoord(ra=8 * u.hour, dec=5 * u.deg, frame='icrs') sc = coords.SkyCoord(l=120 * u.deg, b=5 * u.deg, frame='galactic') # High-level classes can also be initialized directly from low-level objects sc = coords.SkyCoord(coords.ICRS(ra=8 * u.hour, dec=5 * u.deg)) # The next example raises an error because the high-level class must always # have position data. with pytest.raises(ValueError): sc = coords.SkyCoord(coords.FK5(equinox=J2001)) # raises ValueError # similarly, the low-level object can always be accessed # this is how it's supposed to look, but sometimes the numbers get rounded in # funny ways # assert repr(sc.frame) == '<ICRS Coordinate: ra=120.0 deg, dec=5.0 deg>' rscf = repr(sc.frame) assert rscf.startswith('<ICRS Coordinate: (ra, dec) in deg') # and the string representation will be inherited from the low-level class. # same deal, should loook like this, but different archituectures/ python # versions may round the numbers differently # assert repr(sc) == '<SkyCoord (ICRS): ra=120.0 deg, dec=5.0 deg>' rsc = repr(sc) assert rsc.startswith('<SkyCoord (ICRS): (ra, dec) in deg') # Supports a variety of possible complex string formats sc = coords.SkyCoord('8h00m00s +5d00m00.0s', frame='icrs') # In the next example, the unit is only needed b/c units are ambiguous. In # general, we *never* accept ambiguity sc = coords.SkyCoord('8:00:00 +5:00:00.0', unit=(u.hour, u.deg), frame='icrs') # The next one would yield length-2 array coordinates, because of the comma sc = coords.SkyCoord(['8h 5d', '2°2′3″ 0.3rad'], frame='icrs') # It should also interpret common designation styles as a coordinate # NOT YET # sc = coords.SkyCoord('SDSS J123456.89-012345.6', frame='icrs') # but it should also be possible to provide formats for outputting to strings, # similar to `Time`. This can be added right away or at a later date. # transformation is done the same as for low-level classes, which it delegates to sc_fk5_j2001 = sc.transform_to(coords.FK5(equinox=J2001)) assert sc_fk5_j2001.equinox == J2001 # The key difference is that the high-level class remembers frame information # necessary for round-tripping, unlike the low-level classes: sc1 = coords.SkyCoord(ra=8 * u.hour, dec=5 * u.deg, equinox=J2001, frame='fk5') sc2 = sc1.transform_to('icrs') # The next assertion succeeds, but it doesn't mean anything for ICRS, as ICRS # isn't defined in terms of an equinox assert sc2.equinox == J2001 # But it *is* necessary once we transform to FK5 sc3 = sc2.transform_to('fk5') assert sc3.equinox == J2001 assert_allclose(sc1.ra, sc3.ra) # `SkyCoord` will also include the attribute-style access that is in the # v0.2/0.3 coordinate objects. This will *not* be in the low-level classes sc = coords.SkyCoord(ra=8 * u.hour, dec=5 * u.deg, frame='icrs') scgal = sc.galactic assert str(scgal).startswith('<SkyCoord (Galactic): (l, b)') # the existing `from_name` and `match_to_catalog_*` methods will be moved to the # high-level class as convenience functionality. # in remote-data test below! # m31icrs = coords.SkyCoord.from_name('M31', frame='icrs') # assert str(m31icrs) == '<SkyCoord (ICRS) RA=10.68471 deg, Dec=41.26875 deg>' if HAS_SCIPY: cat1 = coords.SkyCoord(ra=[1, 2]*u.hr, dec=[3, 4.01]*u.deg, distance=[5, 6]*u.kpc, frame='icrs') cat2 = coords.SkyCoord(ra=[1, 2, 2.01]*u.hr, dec=[3, 4, 5]*u.deg, distance=[5, 200, 6]*u.kpc, frame='icrs') idx1, sep2d1, dist3d1 = cat1.match_to_catalog_sky(cat2) idx2, sep2d2, dist3d2 = cat1.match_to_catalog_3d(cat2) assert np.any(idx1 != idx2) # additional convenience functionality for the future should be added as methods # on `SkyCoord`, *not* the low-level classes. @pytest.mark.remote_data def test_highlevel_api_remote(): m31icrs = coords.SkyCoord.from_name('M31', frame='icrs') m31str = str(m31icrs) assert m31str.startswith('<SkyCoord (ICRS): (ra, dec) in deg\n (') assert m31str.endswith(')>') assert '10.68' in m31str assert '41.26' in m31str # The above is essentially a replacement of the below, but tweaked so that # small/moderate changes in what `from_name` returns don't cause the tests # to fail # assert str(m31icrs) == '<SkyCoord (ICRS): (ra, dec) in deg\n (10.6847083, 41.26875)>' m31fk4 = coords.SkyCoord.from_name('M31', frame='fk4') assert m31icrs.frame != m31fk4.frame assert np.abs(m31icrs.ra - m31fk4.ra) > .5*u.deg
57e24537cccc844a4aa19359116536403faf676cb6e43bd26271e1fe4f2f3728
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """ Tests for putting velocity differentials into SkyCoord objects. Note: the skyoffset velocity tests are in a different file, in test_skyoffset_transformations.py """ import pytest import numpy as np from astropy import units as u from astropy.tests.helper import assert_quantity_allclose from astropy.coordinates import (SkyCoord, ICRS, SphericalRepresentation, SphericalDifferential, SphericalCosLatDifferential, CartesianRepresentation, CartesianDifferential, Galactic, PrecessedGeocentric) try: import scipy HAS_SCIPY = True except ImportError: HAS_SCIPY = False def test_creation_frameobjs(): i = ICRS(1*u.deg, 2*u.deg, pm_ra_cosdec=.2*u.mas/u.yr, pm_dec=.1*u.mas/u.yr) sc = SkyCoord(i) for attrnm in ['ra', 'dec', 'pm_ra_cosdec', 'pm_dec']: assert_quantity_allclose(getattr(i, attrnm), getattr(sc, attrnm)) sc_nod = SkyCoord(ICRS(1*u.deg, 2*u.deg)) for attrnm in ['ra', 'dec']: assert_quantity_allclose(getattr(sc, attrnm), getattr(sc_nod, attrnm)) def test_creation_attrs(): sc1 = SkyCoord(1*u.deg, 2*u.deg, pm_ra_cosdec=.2*u.mas/u.yr, pm_dec=.1*u.mas/u.yr, frame='fk5') assert_quantity_allclose(sc1.ra, 1*u.deg) assert_quantity_allclose(sc1.dec, 2*u.deg) assert_quantity_allclose(sc1.pm_ra_cosdec, .2*u.arcsec/u.kyr) assert_quantity_allclose(sc1.pm_dec, .1*u.arcsec/u.kyr) sc2 = SkyCoord(1*u.deg, 2*u.deg, pm_ra=.2*u.mas/u.yr, pm_dec=.1*u.mas/u.yr, differential_type=SphericalDifferential) assert_quantity_allclose(sc2.ra, 1*u.deg) assert_quantity_allclose(sc2.dec, 2*u.deg) assert_quantity_allclose(sc2.pm_ra, .2*u.arcsec/u.kyr) assert_quantity_allclose(sc2.pm_dec, .1*u.arcsec/u.kyr) sc3 = SkyCoord('1:2:3 4:5:6', pm_ra_cosdec=.2*u.mas/u.yr, pm_dec=.1*u.mas/u.yr, unit=(u.hour, u.deg)) assert_quantity_allclose(sc3.ra, 1*u.hourangle + 2*u.arcmin*15 + 3*u.arcsec*15) assert_quantity_allclose(sc3.dec, 4*u.deg + 5*u.arcmin + 6*u.arcsec) # might as well check with sillier units? assert_quantity_allclose(sc3.pm_ra_cosdec, 1.2776637006616473e-07 * u.arcmin / u.fortnight) assert_quantity_allclose(sc3.pm_dec, 6.388318503308237e-08 * u.arcmin / u.fortnight) def test_creation_copy_basic(): i = ICRS(1*u.deg, 2*u.deg, pm_ra_cosdec=.2*u.mas/u.yr, pm_dec=.1*u.mas/u.yr) sc = SkyCoord(i) sc_cpy = SkyCoord(sc) for attrnm in ['ra', 'dec', 'pm_ra_cosdec', 'pm_dec']: assert_quantity_allclose(getattr(sc, attrnm), getattr(sc_cpy, attrnm)) def test_creation_copy_rediff(): sc = SkyCoord(1*u.deg, 2*u.deg, pm_ra=.2*u.mas/u.yr, pm_dec=.1*u.mas/u.yr, differential_type=SphericalDifferential) sc_cpy = SkyCoord(sc) for attrnm in ['ra', 'dec', 'pm_ra', 'pm_dec']: assert_quantity_allclose(getattr(sc, attrnm), getattr(sc_cpy, attrnm)) sc_newdiff = SkyCoord(sc, differential_type=SphericalCosLatDifferential) reprepr = sc.represent_as(SphericalRepresentation, SphericalCosLatDifferential) assert_quantity_allclose(sc_newdiff.pm_ra_cosdec, reprepr.differentials['s'].d_lon_coslat) def test_creation_cartesian(): rep = CartesianRepresentation([10, 0., 0.]*u.pc) dif = CartesianDifferential([0, 100, 0.]*u.pc/u.Myr) rep = rep.with_differentials(dif) c = SkyCoord(rep) sdif = dif.represent_as(SphericalCosLatDifferential, rep) assert_quantity_allclose(c.pm_ra_cosdec, sdif.d_lon_coslat) def test_useful_error_missing(): sc_nod = SkyCoord(ICRS(1*u.deg, 2*u.deg)) try: sc_nod.l except AttributeError as e: # this is double-checking the *normal* behavior msg_l = e.args[0] try: sc_nod.pm_dec except Exception as e: msg_pm_dec = e.args[0] assert "has no attribute" in msg_l assert "has no associated differentials" in msg_pm_dec # ----------------------Operations on SkyCoords w/ velocities------------------- # define some fixtures to get baseline coordinates to try operations with @pytest.fixture(scope="module", params=[(False, False), (True, False), (False, True), (True, True)]) def sc(request): incldist, inclrv = request.param args = [1*u.deg, 2*u.deg] kwargs = dict(pm_dec=1*u.mas/u.yr, pm_ra_cosdec=2*u.mas/u.yr) if incldist: kwargs['distance'] = 213.4*u.pc if inclrv: kwargs['radial_velocity'] = 61*u.km/u.s return SkyCoord(*args, **kwargs) @pytest.fixture(scope="module") def scmany(): return SkyCoord(ICRS(ra=[1]*100*u.deg, dec=[2]*100*u.deg, pm_ra_cosdec=np.random.randn(100)*u.mas/u.yr, pm_dec=np.random.randn(100)*u.mas/u.yr,)) @pytest.fixture(scope="module") def sc_for_sep(): return SkyCoord(1*u.deg, 2*u.deg, pm_dec=1*u.mas/u.yr, pm_ra_cosdec=2*u.mas/u.yr) def test_separation(sc, sc_for_sep): sc.separation(sc_for_sep) def test_accessors(sc, scmany): sc.data.differentials['s'] sph = sc.spherical gal = sc.galactic if (sc.data.get_name().startswith('unit') and not sc.data.differentials['s'].get_name().startswith('unit')): # this xfail can be eliminated when issue #7028 is resolved pytest.xfail('.velocity fails if there is an RV but not distance') sc.velocity assert isinstance(sph, SphericalRepresentation) assert gal.data.differentials is not None scmany[0] sph = scmany.spherical gal = scmany.galactic assert isinstance(sph, SphericalRepresentation) assert gal.data.differentials is not None def test_transforms(sc): trans = sc.transform_to('galactic') assert isinstance(trans.frame, Galactic) def test_transforms_diff(sc): # note that arguably this *should* fail for the no-distance cases: 3D # information is necessary to truly solve this, hence the xfail if not sc.distance.unit.is_equivalent(u.m): pytest.xfail('Should fail for no-distance cases') else: trans = sc.transform_to(PrecessedGeocentric(equinox='B1975')) assert isinstance(trans.frame, PrecessedGeocentric) @pytest.mark.skipif(str('not HAS_SCIPY')) def test_matching(sc, scmany): # just check that it works and yields something idx, d2d, d3d = sc.match_to_catalog_sky(scmany) def test_position_angle(sc, sc_for_sep): sc.position_angle(sc_for_sep) def test_constellations(sc): const = sc.get_constellation() assert const == 'Pisces' def test_separation_3d_with_differentials(): c1 = SkyCoord(ra=138*u.deg, dec=-17*u.deg, distance=100*u.pc, pm_ra_cosdec=5*u.mas/u.yr, pm_dec=-7*u.mas/u.yr, radial_velocity=160*u.km/u.s) c2 = SkyCoord(ra=138*u.deg, dec=-17*u.deg, distance=105*u.pc, pm_ra_cosdec=15*u.mas/u.yr, pm_dec=-74*u.mas/u.yr, radial_velocity=-60*u.km/u.s) sep = c1.separation_3d(c2) assert_quantity_allclose(sep, 5*u.pc)
fb1056a97735e83d00ab492c07da3c7a5d776ac2a89404845bc84cd5cabaebe1
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst import numpy as np from astropy import units as u from astropy.utils import NumpyRNGContext def randomly_sample_sphere(ntosample, randomseed=12345): """ Generates a set of spherical coordinates uniformly distributed over the sphere in a way that gives the same answer for the same seed. Also generates a random distance vector on [0, 1] (no units) This simply returns (lon, lat, r) instead of a representation to avoid failures due to the representation module. """ with NumpyRNGContext(randomseed): lat = np.arcsin(np.random.rand(ntosample)*2-1) lon = np.random.rand(ntosample)*np.pi*2 r = np.random.rand(ntosample) return lon*u.rad, lat*u.rad, r
07b28d6c8c77ebc43e00b56bace51c4f06f5fdd4ced8a2d7ba35b15803101d3d
# Licensed under a 3-clause BSD style license - see LICENSE.rst import functools import pytest import numpy as np from astropy import units as u from astropy.coordinates import (PhysicsSphericalRepresentation, CartesianRepresentation, CylindricalRepresentation, SphericalRepresentation, UnitSphericalRepresentation, SphericalDifferential, CartesianDifferential, UnitSphericalDifferential, SphericalCosLatDifferential, UnitSphericalCosLatDifferential, PhysicsSphericalDifferential, CylindricalDifferential, RadialRepresentation, RadialDifferential, Longitude, Latitude) from astropy.coordinates.representation import DIFFERENTIAL_CLASSES from astropy.coordinates.angle_utilities import angular_separation from astropy.tests.helper import assert_quantity_allclose, quantity_allclose def assert_representation_allclose(actual, desired, rtol=1.e-7, atol=None, **kwargs): actual_xyz = actual.to_cartesian().get_xyz(xyz_axis=-1) desired_xyz = desired.to_cartesian().get_xyz(xyz_axis=-1) actual_xyz, desired_xyz = np.broadcast_arrays(actual_xyz, desired_xyz, subok=True) assert_quantity_allclose(actual_xyz, desired_xyz, rtol, atol, **kwargs) def assert_differential_allclose(actual, desired, rtol=1.e-7, **kwargs): assert actual.components == desired.components for component in actual.components: actual_c = getattr(actual, component) atol = 1.e-10 * actual_c.unit assert_quantity_allclose(actual_c, getattr(desired, component), rtol, atol, **kwargs) def representation_equal(first, second): return functools.reduce(np.logical_and, (getattr(first, component) == getattr(second, component) for component in first.components)) class TestArithmetic(): def setup(self): # Choose some specific coordinates, for which ``sum`` and ``dot`` # works out nicely. self.lon = Longitude(np.arange(0, 12.1, 2), u.hourangle) self.lat = Latitude(np.arange(-90, 91, 30), u.deg) self.distance = [5., 12., 4., 2., 4., 12., 5.] * u.kpc self.spherical = SphericalRepresentation(self.lon, self.lat, self.distance) self.unit_spherical = self.spherical.represent_as( UnitSphericalRepresentation) self.cartesian = self.spherical.to_cartesian() def test_norm_spherical(self): norm_s = self.spherical.norm() assert isinstance(norm_s, u.Quantity) # Just to be sure, test against getting object arrays. assert norm_s.dtype.kind == 'f' assert np.all(norm_s == self.distance) @pytest.mark.parametrize('representation', (PhysicsSphericalRepresentation, CartesianRepresentation, CylindricalRepresentation)) def test_norm(self, representation): in_rep = self.spherical.represent_as(representation) norm_rep = in_rep.norm() assert isinstance(norm_rep, u.Quantity) assert_quantity_allclose(norm_rep, self.distance) def test_norm_unitspherical(self): norm_rep = self.unit_spherical.norm() assert norm_rep.unit == u.dimensionless_unscaled assert np.all(norm_rep == 1. * u.dimensionless_unscaled) @pytest.mark.parametrize('representation', (SphericalRepresentation, PhysicsSphericalRepresentation, CartesianRepresentation, CylindricalRepresentation, UnitSphericalRepresentation)) def test_neg_pos(self, representation): in_rep = self.cartesian.represent_as(representation) pos_rep = +in_rep assert type(pos_rep) is type(in_rep) assert pos_rep is not in_rep assert np.all(representation_equal(pos_rep, in_rep)) neg_rep = -in_rep assert type(neg_rep) is type(in_rep) assert np.all(neg_rep.norm() == in_rep.norm()) in_rep_xyz = in_rep.to_cartesian().xyz assert_quantity_allclose(neg_rep.to_cartesian().xyz, -in_rep_xyz, atol=1.e-10*in_rep_xyz.unit) def test_mul_div_spherical(self): s0 = self.spherical / (1. * u.Myr) assert isinstance(s0, SphericalRepresentation) assert s0.distance.dtype.kind == 'f' assert np.all(s0.lon == self.spherical.lon) assert np.all(s0.lat == self.spherical.lat) assert np.all(s0.distance == self.distance / (1. * u.Myr)) s1 = (1./u.Myr) * self.spherical assert isinstance(s1, SphericalRepresentation) assert np.all(representation_equal(s1, s0)) s2 = self.spherical * np.array([[1.], [2.]]) assert isinstance(s2, SphericalRepresentation) assert s2.shape == (2, self.spherical.shape[0]) assert np.all(s2.lon == self.spherical.lon) assert np.all(s2.lat == self.spherical.lat) assert np.all(s2.distance == self.spherical.distance * np.array([[1.], [2.]])) s3 = np.array([[1.], [2.]]) * self.spherical assert isinstance(s3, SphericalRepresentation) assert np.all(representation_equal(s3, s2)) s4 = -self.spherical assert isinstance(s4, SphericalRepresentation) assert quantity_allclose(s4.to_cartesian().xyz, -self.spherical.to_cartesian().xyz, atol=1e-15*self.spherical.distance.unit) assert np.all(s4.distance == self.spherical.distance) s5 = +self.spherical assert s5 is not self.spherical assert np.all(representation_equal(s5, self.spherical)) @pytest.mark.parametrize('representation', (PhysicsSphericalRepresentation, CartesianRepresentation, CylindricalRepresentation)) def test_mul_div(self, representation): in_rep = self.spherical.represent_as(representation) r1 = in_rep / (1. * u.Myr) assert isinstance(r1, representation) for component in in_rep.components: in_rep_comp = getattr(in_rep, component) r1_comp = getattr(r1, component) if in_rep_comp.unit == self.distance.unit: assert np.all(r1_comp == in_rep_comp / (1.*u.Myr)) else: assert np.all(r1_comp == in_rep_comp) r2 = np.array([[1.], [2.]]) * in_rep assert isinstance(r2, representation) assert r2.shape == (2, in_rep.shape[0]) assert_quantity_allclose(r2.norm(), self.distance * np.array([[1.], [2.]])) r3 = -in_rep assert np.all(representation_equal(r3, in_rep * -1.)) with pytest.raises(TypeError): in_rep * in_rep with pytest.raises(TypeError): dict() * in_rep def test_mul_div_unit_spherical(self): s1 = self.unit_spherical * self.distance assert isinstance(s1, SphericalRepresentation) assert np.all(s1.lon == self.unit_spherical.lon) assert np.all(s1.lat == self.unit_spherical.lat) assert np.all(s1.distance == self.spherical.distance) s2 = self.unit_spherical / u.s assert isinstance(s2, SphericalRepresentation) assert np.all(s2.lon == self.unit_spherical.lon) assert np.all(s2.lat == self.unit_spherical.lat) assert np.all(s2.distance == 1./u.s) u3 = -self.unit_spherical assert isinstance(u3, UnitSphericalRepresentation) assert_quantity_allclose(u3.lon, self.unit_spherical.lon + 180.*u.deg) assert np.all(u3.lat == -self.unit_spherical.lat) assert_quantity_allclose(u3.to_cartesian().xyz, -self.unit_spherical.to_cartesian().xyz, atol=1.e-10*u.dimensionless_unscaled) u4 = +self.unit_spherical assert isinstance(u4, UnitSphericalRepresentation) assert u4 is not self.unit_spherical assert np.all(representation_equal(u4, self.unit_spherical)) def test_add_sub_cartesian(self): c1 = self.cartesian + self.cartesian assert isinstance(c1, CartesianRepresentation) assert c1.x.dtype.kind == 'f' assert np.all(representation_equal(c1, 2. * self.cartesian)) with pytest.raises(TypeError): self.cartesian + 10.*u.m with pytest.raises(u.UnitsError): self.cartesian + (self.cartesian / u.s) c2 = self.cartesian - self.cartesian assert isinstance(c2, CartesianRepresentation) assert np.all(representation_equal( c2, CartesianRepresentation(0.*u.m, 0.*u.m, 0.*u.m))) c3 = self.cartesian - self.cartesian / 2. assert isinstance(c3, CartesianRepresentation) assert np.all(representation_equal(c3, self.cartesian / 2.)) @pytest.mark.parametrize('representation', (PhysicsSphericalRepresentation, SphericalRepresentation, CylindricalRepresentation)) def test_add_sub(self, representation): in_rep = self.cartesian.represent_as(representation) r1 = in_rep + in_rep assert isinstance(r1, representation) expected = 2. * in_rep for component in in_rep.components: assert_quantity_allclose(getattr(r1, component), getattr(expected, component)) with pytest.raises(TypeError): 10.*u.m + in_rep with pytest.raises(u.UnitsError): in_rep + (in_rep / u.s) r2 = in_rep - in_rep assert isinstance(r2, representation) assert np.all(representation_equal( r2.to_cartesian(), CartesianRepresentation(0.*u.m, 0.*u.m, 0.*u.m))) r3 = in_rep - in_rep / 2. assert isinstance(r3, representation) expected = in_rep / 2. assert_representation_allclose(r3, expected) def test_add_sub_unit_spherical(self): s1 = self.unit_spherical + self.unit_spherical assert isinstance(s1, SphericalRepresentation) expected = 2. * self.unit_spherical for component in s1.components: assert_quantity_allclose(getattr(s1, component), getattr(expected, component)) with pytest.raises(TypeError): 10.*u.m - self.unit_spherical with pytest.raises(u.UnitsError): self.unit_spherical + (self.unit_spherical / u.s) s2 = self.unit_spherical - self.unit_spherical / 2. assert isinstance(s2, SphericalRepresentation) expected = self.unit_spherical / 2. for component in s2.components: assert_quantity_allclose(getattr(s2, component), getattr(expected, component)) @pytest.mark.parametrize('representation', (CartesianRepresentation, PhysicsSphericalRepresentation, SphericalRepresentation, CylindricalRepresentation)) def test_sum_mean(self, representation): in_rep = self.spherical.represent_as(representation) r_sum = in_rep.sum() assert isinstance(r_sum, representation) expected = SphericalRepresentation( 90. * u.deg, 0. * u.deg, 14. * u.kpc).represent_as(representation) for component in expected.components: exp_component = getattr(expected, component) assert_quantity_allclose(getattr(r_sum, component), exp_component, atol=1e-10*exp_component.unit) r_mean = in_rep.mean() assert isinstance(r_mean, representation) expected = expected / len(in_rep) for component in expected.components: exp_component = getattr(expected, component) assert_quantity_allclose(getattr(r_mean, component), exp_component, atol=1e-10*exp_component.unit) def test_sum_mean_unit_spherical(self): s_sum = self.unit_spherical.sum() assert isinstance(s_sum, SphericalRepresentation) expected = SphericalRepresentation( 90. * u.deg, 0. * u.deg, 3. * u.dimensionless_unscaled) for component in expected.components: exp_component = getattr(expected, component) assert_quantity_allclose(getattr(s_sum, component), exp_component, atol=1e-10*exp_component.unit) s_mean = self.unit_spherical.mean() assert isinstance(s_mean, SphericalRepresentation) expected = expected / len(self.unit_spherical) for component in expected.components: exp_component = getattr(expected, component) assert_quantity_allclose(getattr(s_mean, component), exp_component, atol=1e-10*exp_component.unit) @pytest.mark.parametrize('representation', (CartesianRepresentation, PhysicsSphericalRepresentation, SphericalRepresentation, CylindricalRepresentation)) def test_dot(self, representation): in_rep = self.cartesian.represent_as(representation) r_dot_r = in_rep.dot(in_rep) assert isinstance(r_dot_r, u.Quantity) assert r_dot_r.shape == in_rep.shape assert_quantity_allclose(np.sqrt(r_dot_r), self.distance) r_dot_r_rev = in_rep.dot(in_rep[::-1]) assert isinstance(r_dot_r_rev, u.Quantity) assert r_dot_r_rev.shape == in_rep.shape expected = [-25., -126., 2., 4., 2., -126., -25.] * u.kpc**2 assert_quantity_allclose(r_dot_r_rev, expected) for axis in 'xyz': project = CartesianRepresentation(*( (1. if axis == _axis else 0.) * u.dimensionless_unscaled for _axis in 'xyz')) assert_quantity_allclose(in_rep.dot(project), getattr(self.cartesian, axis), atol=1.*u.upc) with pytest.raises(TypeError): in_rep.dot(self.cartesian.xyz) def test_dot_unit_spherical(self): u_dot_u = self.unit_spherical.dot(self.unit_spherical) assert isinstance(u_dot_u, u.Quantity) assert u_dot_u.shape == self.unit_spherical.shape assert_quantity_allclose(u_dot_u, 1.*u.dimensionless_unscaled) cartesian = self.unit_spherical.to_cartesian() for axis in 'xyz': project = CartesianRepresentation(*( (1. if axis == _axis else 0.) * u.dimensionless_unscaled for _axis in 'xyz')) assert_quantity_allclose(self.unit_spherical.dot(project), getattr(cartesian, axis), atol=1.e-10) @pytest.mark.parametrize('representation', (CartesianRepresentation, PhysicsSphericalRepresentation, SphericalRepresentation, CylindricalRepresentation)) def test_cross(self, representation): in_rep = self.cartesian.represent_as(representation) r_cross_r = in_rep.cross(in_rep) assert isinstance(r_cross_r, representation) assert_quantity_allclose(r_cross_r.norm(), 0.*u.kpc**2, atol=1.*u.mpc**2) r_cross_r_rev = in_rep.cross(in_rep[::-1]) sep = angular_separation(self.lon, self.lat, self.lon[::-1], self.lat[::-1]) expected = self.distance * self.distance[::-1] * np.sin(sep) assert_quantity_allclose(r_cross_r_rev.norm(), expected, atol=1.*u.mpc**2) unit_vectors = CartesianRepresentation( [1., 0., 0.]*u.one, [0., 1., 0.]*u.one, [0., 0., 1.]*u.one)[:, np.newaxis] r_cross_uv = in_rep.cross(unit_vectors) assert r_cross_uv.shape == (3, 7) assert_quantity_allclose(r_cross_uv.dot(unit_vectors), 0.*u.kpc, atol=1.*u.upc) assert_quantity_allclose(r_cross_uv.dot(in_rep), 0.*u.kpc**2, atol=1.*u.mpc**2) zeros = np.zeros(len(in_rep)) * u.kpc expected = CartesianRepresentation( u.Quantity((zeros, -self.cartesian.z, self.cartesian.y)), u.Quantity((self.cartesian.z, zeros, -self.cartesian.x)), u.Quantity((-self.cartesian.y, self.cartesian.x, zeros))) # Comparison with spherical is hard since some distances are zero, # implying the angles are undefined. r_cross_uv_cartesian = r_cross_uv.to_cartesian() assert_representation_allclose(r_cross_uv_cartesian, expected, atol=1.*u.upc) # A final check, with the side benefit of ensuring __div__ and norm # work on multi-D representations. r_cross_uv_by_distance = r_cross_uv / self.distance uv_sph = unit_vectors.represent_as(UnitSphericalRepresentation) sep = angular_separation(self.lon, self.lat, uv_sph.lon, uv_sph.lat) assert_quantity_allclose(r_cross_uv_by_distance.norm(), np.sin(sep), atol=1e-9) with pytest.raises(TypeError): in_rep.cross(self.cartesian.xyz) def test_cross_unit_spherical(self): u_cross_u = self.unit_spherical.cross(self.unit_spherical) assert isinstance(u_cross_u, SphericalRepresentation) assert_quantity_allclose(u_cross_u.norm(), 0.*u.one, atol=1.e-10*u.one) u_cross_u_rev = self.unit_spherical.cross(self.unit_spherical[::-1]) assert isinstance(u_cross_u_rev, SphericalRepresentation) sep = angular_separation(self.lon, self.lat, self.lon[::-1], self.lat[::-1]) expected = np.sin(sep) assert_quantity_allclose(u_cross_u_rev.norm(), expected, atol=1.e-10*u.one) class TestUnitVectorsAndScales(): @staticmethod def check_unit_vectors(e): for v in e.values(): assert type(v) is CartesianRepresentation assert_quantity_allclose(v.norm(), 1. * u.one) return e @staticmethod def check_scale_factors(sf, rep): unit = rep.norm().unit for c, f in sf.items(): assert type(f) is u.Quantity assert (f.unit * getattr(rep, c).unit).is_equivalent(unit) def test_spherical(self): s = SphericalRepresentation(lon=[0., 6., 21.] * u.hourangle, lat=[0., -30., 85.] * u.deg, distance=[1, 2, 3] * u.kpc) e = s.unit_vectors() self.check_unit_vectors(e) sf = s.scale_factors() self.check_scale_factors(sf, s) s_lon = s + s.distance * 1e-5 * np.cos(s.lat) * e['lon'] assert_quantity_allclose(s_lon.lon, s.lon + 1e-5*u.rad, atol=1e-10*u.rad) assert_quantity_allclose(s_lon.lat, s.lat, atol=1e-10*u.rad) assert_quantity_allclose(s_lon.distance, s.distance) s_lon2 = s + 1e-5 * u.radian * sf['lon'] * e['lon'] assert_representation_allclose(s_lon2, s_lon) s_lat = s + s.distance * 1e-5 * e['lat'] assert_quantity_allclose(s_lat.lon, s.lon) assert_quantity_allclose(s_lat.lat, s.lat + 1e-5*u.rad, atol=1e-10*u.rad) assert_quantity_allclose(s_lon.distance, s.distance) s_lat2 = s + 1.e-5 * u.radian * sf['lat'] * e['lat'] assert_representation_allclose(s_lat2, s_lat) s_distance = s + 1. * u.pc * e['distance'] assert_quantity_allclose(s_distance.lon, s.lon, atol=1e-10*u.rad) assert_quantity_allclose(s_distance.lat, s.lat, atol=1e-10*u.rad) assert_quantity_allclose(s_distance.distance, s.distance + 1.*u.pc) s_distance2 = s + 1. * u.pc * sf['distance'] * e['distance'] assert_representation_allclose(s_distance2, s_distance) def test_unit_spherical(self): s = UnitSphericalRepresentation(lon=[0., 6., 21.] * u.hourangle, lat=[0., -30., 85.] * u.deg) e = s.unit_vectors() self.check_unit_vectors(e) sf = s.scale_factors() self.check_scale_factors(sf, s) s_lon = s + 1e-5 * np.cos(s.lat) * e['lon'] assert_quantity_allclose(s_lon.lon, s.lon + 1e-5*u.rad, atol=1e-10*u.rad) assert_quantity_allclose(s_lon.lat, s.lat, atol=1e-10*u.rad) s_lon2 = s + 1e-5 * u.radian * sf['lon'] * e['lon'] assert_representation_allclose(s_lon2, s_lon) s_lat = s + 1e-5 * e['lat'] assert_quantity_allclose(s_lat.lon, s.lon) assert_quantity_allclose(s_lat.lat, s.lat + 1e-5*u.rad, atol=1e-10*u.rad) s_lat2 = s + 1.e-5 * u.radian * sf['lat'] * e['lat'] assert_representation_allclose(s_lat2, s_lat) def test_radial(self): r = RadialRepresentation(10.*u.kpc) with pytest.raises(NotImplementedError): r.unit_vectors() sf = r.scale_factors() assert np.all(sf['distance'] == 1.*u.one) assert np.all(r.norm() == r.distance) with pytest.raises(TypeError): r + r def test_physical_spherical(self): s = PhysicsSphericalRepresentation(phi=[0., 6., 21.] * u.hourangle, theta=[90., 120., 5.] * u.deg, r=[1, 2, 3] * u.kpc) e = s.unit_vectors() self.check_unit_vectors(e) sf = s.scale_factors() self.check_scale_factors(sf, s) s_phi = s + s.r * 1e-5 * np.sin(s.theta) * e['phi'] assert_quantity_allclose(s_phi.phi, s.phi + 1e-5*u.rad, atol=1e-10*u.rad) assert_quantity_allclose(s_phi.theta, s.theta, atol=1e-10*u.rad) assert_quantity_allclose(s_phi.r, s.r) s_phi2 = s + 1e-5 * u.radian * sf['phi'] * e['phi'] assert_representation_allclose(s_phi2, s_phi) s_theta = s + s.r * 1e-5 * e['theta'] assert_quantity_allclose(s_theta.phi, s.phi) assert_quantity_allclose(s_theta.theta, s.theta + 1e-5*u.rad, atol=1e-10*u.rad) assert_quantity_allclose(s_theta.r, s.r) s_theta2 = s + 1.e-5 * u.radian * sf['theta'] * e['theta'] assert_representation_allclose(s_theta2, s_theta) s_r = s + 1. * u.pc * e['r'] assert_quantity_allclose(s_r.phi, s.phi, atol=1e-10*u.rad) assert_quantity_allclose(s_r.theta, s.theta, atol=1e-10*u.rad) assert_quantity_allclose(s_r.r, s.r + 1.*u.pc) s_r2 = s + 1. * u.pc * sf['r'] * e['r'] assert_representation_allclose(s_r2, s_r) def test_cartesian(self): s = CartesianRepresentation(x=[1, 2, 3] * u.pc, y=[2, 3, 4] * u.Mpc, z=[3, 4, 5] * u.kpc) e = s.unit_vectors() sf = s.scale_factors() for v, expected in zip(e.values(), ([1., 0., 0.] * u.one, [0., 1., 0.] * u.one, [0., 0., 1.] * u.one)): assert np.all(v.get_xyz(xyz_axis=-1) == expected) for f in sf.values(): assert np.all(f == 1.*u.one) def test_cylindrical(self): s = CylindricalRepresentation(rho=[1, 2, 3] * u.pc, phi=[0., 90., -45.] * u.deg, z=[3, 4, 5] * u.kpc) e = s.unit_vectors() self.check_unit_vectors(e) sf = s.scale_factors() self.check_scale_factors(sf, s) s_rho = s + 1. * u.pc * e['rho'] assert_quantity_allclose(s_rho.rho, s.rho + 1.*u.pc) assert_quantity_allclose(s_rho.phi, s.phi) assert_quantity_allclose(s_rho.z, s.z) s_rho2 = s + 1. * u.pc * sf['rho'] * e['rho'] assert_representation_allclose(s_rho2, s_rho) s_phi = s + s.rho * 1e-5 * e['phi'] assert_quantity_allclose(s_phi.rho, s.rho) assert_quantity_allclose(s_phi.phi, s.phi + 1e-5*u.rad) assert_quantity_allclose(s_phi.z, s.z) s_phi2 = s + 1e-5 * u.radian * sf['phi'] * e['phi'] assert_representation_allclose(s_phi2, s_phi) s_z = s + 1. * u.pc * e['z'] assert_quantity_allclose(s_z.rho, s.rho) assert_quantity_allclose(s_z.phi, s.phi, atol=1e-10*u.rad) assert_quantity_allclose(s_z.z, s.z + 1.*u.pc) s_z2 = s + 1. * u.pc * sf['z'] * e['z'] assert_representation_allclose(s_z2, s_z) @pytest.mark.parametrize('omit_coslat', [False, True], scope='class') class TestSphericalDifferential(): # these test cases are subclassed for SphericalCosLatDifferential, # hence some tests depend on omit_coslat. def _setup(self, omit_coslat): if omit_coslat: self.SD_cls = SphericalCosLatDifferential else: self.SD_cls = SphericalDifferential s = SphericalRepresentation(lon=[0., 6., 21.] * u.hourangle, lat=[0., -30., 85.] * u.deg, distance=[1, 2, 3] * u.kpc) self.s = s self.e = s.unit_vectors() self.sf = s.scale_factors(omit_coslat=omit_coslat) def test_name_coslat(self, omit_coslat): self._setup(omit_coslat) if omit_coslat: assert self.SD_cls is SphericalCosLatDifferential assert self.SD_cls.get_name() == 'sphericalcoslat' else: assert self.SD_cls is SphericalDifferential assert self.SD_cls.get_name() == 'spherical' assert self.SD_cls.get_name() in DIFFERENTIAL_CLASSES def test_simple_differentials(self, omit_coslat): self._setup(omit_coslat) s, e, sf = self.s, self.e, self.sf o_lon = self.SD_cls(1.*u.arcsec, 0.*u.arcsec, 0.*u.kpc) o_lonc = o_lon.to_cartesian(base=s) o_lon2 = self.SD_cls.from_cartesian(o_lonc, base=s) assert_differential_allclose(o_lon, o_lon2) # simple check by hand for first element. # lat[0] is 0, so cos(lat) term doesn't matter. assert_quantity_allclose(o_lonc[0].xyz, [0., np.pi/180./3600., 0.]*u.kpc) # check all using unit vectors and scale factors. s_lon = s + 1.*u.arcsec * sf['lon'] * e['lon'] assert_representation_allclose(o_lonc, s_lon - s, atol=1*u.npc) s_lon2 = s + o_lon assert_representation_allclose(s_lon2, s_lon, atol=1*u.npc) o_lat = self.SD_cls(0.*u.arcsec, 1.*u.arcsec, 0.*u.kpc) o_latc = o_lat.to_cartesian(base=s) assert_quantity_allclose(o_latc[0].xyz, [0., 0., np.pi/180./3600.]*u.kpc, atol=1.*u.npc) s_lat = s + 1.*u.arcsec * sf['lat'] * e['lat'] assert_representation_allclose(o_latc, s_lat - s, atol=1*u.npc) s_lat2 = s + o_lat assert_representation_allclose(s_lat2, s_lat, atol=1*u.npc) o_distance = self.SD_cls(0.*u.arcsec, 0.*u.arcsec, 1.*u.mpc) o_distancec = o_distance.to_cartesian(base=s) assert_quantity_allclose(o_distancec[0].xyz, [1e-6, 0., 0.]*u.kpc, atol=1.*u.npc) s_distance = s + 1.*u.mpc * sf['distance'] * e['distance'] assert_representation_allclose(o_distancec, s_distance - s, atol=1*u.npc) s_distance2 = s + o_distance assert_representation_allclose(s_distance2, s_distance) def test_differential_arithmetic(self, omit_coslat): self._setup(omit_coslat) s = self.s o_lon = self.SD_cls(1.*u.arcsec, 0.*u.arcsec, 0.*u.kpc) o_lon_by_2 = o_lon / 2. assert_representation_allclose(o_lon_by_2.to_cartesian(s) * 2., o_lon.to_cartesian(s), atol=1e-10*u.kpc) assert_representation_allclose(s + o_lon, s + 2 * o_lon_by_2, atol=1e-10*u.kpc) o_lon_rec = o_lon_by_2 + o_lon_by_2 assert_representation_allclose(s + o_lon, s + o_lon_rec, atol=1e-10*u.kpc) o_lon_0 = o_lon - o_lon for c in o_lon_0.components: assert np.all(getattr(o_lon_0, c) == 0.) o_lon2 = self.SD_cls(1*u.mas/u.yr, 0*u.mas/u.yr, 0*u.km/u.s) assert_quantity_allclose(o_lon2.norm(s)[0], 4.74*u.km/u.s, atol=0.01*u.km/u.s) assert_representation_allclose(o_lon2.to_cartesian(s) * 1000.*u.yr, o_lon.to_cartesian(s), atol=1e-10*u.kpc) s_off = s + o_lon s_off2 = s + o_lon2 * 1000.*u.yr assert_representation_allclose(s_off, s_off2, atol=1e-10*u.kpc) factor = 1e5 * u.radian/u.arcsec if not omit_coslat: factor = factor / np.cos(s.lat) s_off_big = s + o_lon * factor assert_representation_allclose( s_off_big, SphericalRepresentation(s.lon + 90.*u.deg, 0.*u.deg, 1e5*s.distance), atol=5.*u.kpc) o_lon3c = CartesianRepresentation(0., 4.74047, 0., unit=u.km/u.s) o_lon3 = self.SD_cls.from_cartesian(o_lon3c, base=s) expected0 = self.SD_cls(1.*u.mas/u.yr, 0.*u.mas/u.yr, 0.*u.km/u.s) assert_differential_allclose(o_lon3[0], expected0) s_off_big2 = s + o_lon3 * 1e5 * u.yr * u.radian/u.mas assert_representation_allclose( s_off_big2, SphericalRepresentation(90.*u.deg, 0.*u.deg, 1e5*u.kpc), atol=5.*u.kpc) with pytest.raises(TypeError): o_lon - s with pytest.raises(TypeError): s.to_cartesian() + o_lon def test_differential_init_errors(self, omit_coslat): self._setup(omit_coslat) s = self.s with pytest.raises(u.UnitsError): self.SD_cls(1.*u.arcsec, 0., 0.) with pytest.raises(TypeError): self.SD_cls(1.*u.arcsec, 0.*u.arcsec, 0.*u.kpc, False, False) with pytest.raises(TypeError): self.SD_cls(1.*u.arcsec, 0.*u.arcsec, 0.*u.kpc, copy=False, d_lat=0.*u.arcsec) with pytest.raises(TypeError): self.SD_cls(1.*u.arcsec, 0.*u.arcsec, 0.*u.kpc, copy=False, flying='circus') with pytest.raises(ValueError): self.SD_cls(np.ones(2)*u.arcsec, np.zeros(3)*u.arcsec, np.zeros(2)*u.kpc) with pytest.raises(u.UnitsError): self.SD_cls(1.*u.arcsec, 1.*u.s, 0.*u.kpc) with pytest.raises(u.UnitsError): self.SD_cls(1.*u.kpc, 1.*u.arcsec, 0.*u.kpc) o = self.SD_cls(1.*u.arcsec, 1.*u.arcsec, 0.*u.km/u.s) with pytest.raises(u.UnitsError): o.to_cartesian(s) with pytest.raises(AttributeError): o.d_lat = 0.*u.arcsec with pytest.raises(AttributeError): del o.d_lat o = self.SD_cls(1.*u.arcsec, 1.*u.arcsec, 0.*u.km) with pytest.raises(TypeError): o.to_cartesian() c = CartesianRepresentation(10., 0., 0., unit=u.km) with pytest.raises(TypeError): self.SD_cls.to_cartesian(c) with pytest.raises(TypeError): self.SD_cls.from_cartesian(c) with pytest.raises(TypeError): self.SD_cls.from_cartesian(c, SphericalRepresentation) with pytest.raises(TypeError): self.SD_cls.from_cartesian(c, c) @pytest.mark.parametrize('omit_coslat', [False, True], scope='class') class TestUnitSphericalDifferential(): def _setup(self, omit_coslat): if omit_coslat: self.USD_cls = UnitSphericalCosLatDifferential else: self.USD_cls = UnitSphericalDifferential s = UnitSphericalRepresentation(lon=[0., 6., 21.] * u.hourangle, lat=[0., -30., 85.] * u.deg) self.s = s self.e = s.unit_vectors() self.sf = s.scale_factors(omit_coslat=omit_coslat) def test_name_coslat(self, omit_coslat): self._setup(omit_coslat) if omit_coslat: assert self.USD_cls is UnitSphericalCosLatDifferential assert self.USD_cls.get_name() == 'unitsphericalcoslat' else: assert self.USD_cls is UnitSphericalDifferential assert self.USD_cls.get_name() == 'unitspherical' assert self.USD_cls.get_name() in DIFFERENTIAL_CLASSES def test_simple_differentials(self, omit_coslat): self._setup(omit_coslat) s, e, sf = self.s, self.e, self.sf o_lon = self.USD_cls(1.*u.arcsec, 0.*u.arcsec) o_lonc = o_lon.to_cartesian(base=s) o_lon2 = self.USD_cls.from_cartesian(o_lonc, base=s) assert_differential_allclose(o_lon, o_lon2) # simple check by hand for first element # (lat[0]=0, so works for both normal and CosLat differential) assert_quantity_allclose(o_lonc[0].xyz, [0., np.pi/180./3600., 0.]*u.one) # check all using unit vectors and scale factors. s_lon = s + 1.*u.arcsec * sf['lon'] * e['lon'] assert type(s_lon) is SphericalRepresentation assert_representation_allclose(o_lonc, s_lon - s, atol=1e-10*u.one) s_lon2 = s + o_lon assert_representation_allclose(s_lon2, s_lon, atol=1e-10*u.one) o_lat = self.USD_cls(0.*u.arcsec, 1.*u.arcsec) o_latc = o_lat.to_cartesian(base=s) assert_quantity_allclose(o_latc[0].xyz, [0., 0., np.pi/180./3600.]*u.one, atol=1e-10*u.one) s_lat = s + 1.*u.arcsec * sf['lat'] * e['lat'] assert type(s_lat) is SphericalRepresentation assert_representation_allclose(o_latc, s_lat - s, atol=1e-10*u.one) s_lat2 = s + o_lat assert_representation_allclose(s_lat2, s_lat, atol=1e-10*u.one) def test_differential_arithmetic(self, omit_coslat): self._setup(omit_coslat) s = self.s o_lon = self.USD_cls(1.*u.arcsec, 0.*u.arcsec) o_lon_by_2 = o_lon / 2. assert type(o_lon_by_2) is self.USD_cls assert_representation_allclose(o_lon_by_2.to_cartesian(s) * 2., o_lon.to_cartesian(s), atol=1e-10*u.one) s_lon = s + o_lon s_lon2 = s + 2 * o_lon_by_2 assert type(s_lon) is SphericalRepresentation assert_representation_allclose(s_lon, s_lon2, atol=1e-10*u.one) o_lon_rec = o_lon_by_2 + o_lon_by_2 assert type(o_lon_rec) is self.USD_cls assert representation_equal(o_lon, o_lon_rec) assert_representation_allclose(s + o_lon, s + o_lon_rec, atol=1e-10*u.one) o_lon_0 = o_lon - o_lon assert type(o_lon_0) is self.USD_cls for c in o_lon_0.components: assert np.all(getattr(o_lon_0, c) == 0.) o_lon2 = self.USD_cls(1.*u.mas/u.yr, 0.*u.mas/u.yr) kks = u.km/u.kpc/u.s assert_quantity_allclose(o_lon2.norm(s)[0], 4.74047*kks, atol=1e-4*kks) assert_representation_allclose(o_lon2.to_cartesian(s) * 1000.*u.yr, o_lon.to_cartesian(s), atol=1e-10*u.one) s_off = s + o_lon s_off2 = s + o_lon2 * 1000.*u.yr assert_representation_allclose(s_off, s_off2, atol=1e-10*u.one) factor = 1e5 * u.radian/u.arcsec if not omit_coslat: factor = factor / np.cos(s.lat) s_off_big = s + o_lon * factor assert_representation_allclose( s_off_big, SphericalRepresentation(s.lon + 90.*u.deg, 0.*u.deg, 1e5), atol=5.*u.one) o_lon3c = CartesianRepresentation(0., 4.74047, 0., unit=kks) # This looses information!! o_lon3 = self.USD_cls.from_cartesian(o_lon3c, base=s) expected0 = self.USD_cls(1.*u.mas/u.yr, 0.*u.mas/u.yr) assert_differential_allclose(o_lon3[0], expected0) # Part of motion kept. part_kept = s.cross(CartesianRepresentation(0, 1, 0, unit=u.one)).norm() assert_quantity_allclose(o_lon3.norm(s), 4.74047*part_kept*kks, atol=1e-10*kks) # (lat[0]=0, so works for both normal and CosLat differential) s_off_big2 = s + o_lon3 * 1e5 * u.yr * u.radian/u.mas expected0 = SphericalRepresentation(90.*u.deg, 0.*u.deg, 1e5*u.one) assert_representation_allclose(s_off_big2[0], expected0, atol=5.*u.one) def test_differential_init_errors(self, omit_coslat): self._setup(omit_coslat) with pytest.raises(u.UnitsError): self.USD_cls(0.*u.deg, 10.*u.deg/u.yr) class TestRadialDifferential(): def setup(self): s = SphericalRepresentation(lon=[0., 6., 21.] * u.hourangle, lat=[0., -30., 85.] * u.deg, distance=[1, 2, 3] * u.kpc) self.s = s self.r = s.represent_as(RadialRepresentation) self.e = s.unit_vectors() self.sf = s.scale_factors() def test_name(self): assert RadialDifferential.get_name() == 'radial' assert RadialDifferential.get_name() in DIFFERENTIAL_CLASSES def test_simple_differentials(self): r, s, e, sf = self.r, self.s, self.e, self.sf o_distance = RadialDifferential(1.*u.mpc) # Can be applied to RadialRepresentation, though not most useful. r_distance = r + o_distance assert_quantity_allclose(r_distance.distance, r.distance + o_distance.d_distance) r_distance2 = o_distance + r assert_quantity_allclose(r_distance2.distance, r.distance + o_distance.d_distance) # More sense to apply it relative to spherical representation. o_distancec = o_distance.to_cartesian(base=s) assert_quantity_allclose(o_distancec[0].xyz, [1e-6, 0., 0.]*u.kpc, atol=1.*u.npc) o_recover = RadialDifferential.from_cartesian(o_distancec, base=s) assert_quantity_allclose(o_recover.d_distance, o_distance.d_distance) s_distance = s + 1.*u.mpc * sf['distance'] * e['distance'] assert_representation_allclose(o_distancec, s_distance - s, atol=1*u.npc) s_distance2 = s + o_distance assert_representation_allclose(s_distance2, s_distance) class TestPhysicsSphericalDifferential(): """Test copied from SphericalDifferential, so less extensive.""" def setup(self): s = PhysicsSphericalRepresentation(phi=[0., 90., 315.] * u.deg, theta=[90., 120., 5.] * u.deg, r=[1, 2, 3] * u.kpc) self.s = s self.e = s.unit_vectors() self.sf = s.scale_factors() def test_name(self): assert PhysicsSphericalDifferential.get_name() == 'physicsspherical' assert PhysicsSphericalDifferential.get_name() in DIFFERENTIAL_CLASSES def test_simple_differentials(self): s, e, sf = self.s, self.e, self.sf o_phi = PhysicsSphericalDifferential(1*u.arcsec, 0*u.arcsec, 0*u.kpc) o_phic = o_phi.to_cartesian(base=s) o_phi2 = PhysicsSphericalDifferential.from_cartesian(o_phic, base=s) assert_quantity_allclose(o_phi.d_phi, o_phi2.d_phi, atol=1.*u.narcsec) assert_quantity_allclose(o_phi.d_theta, o_phi2.d_theta, atol=1.*u.narcsec) assert_quantity_allclose(o_phi.d_r, o_phi2.d_r, atol=1.*u.npc) # simple check by hand for first element. assert_quantity_allclose(o_phic[0].xyz, [0., np.pi/180./3600., 0.]*u.kpc, atol=1.*u.npc) # check all using unit vectors and scale factors. s_phi = s + 1.*u.arcsec * sf['phi'] * e['phi'] assert_representation_allclose(o_phic, s_phi - s, atol=1e-10*u.kpc) o_theta = PhysicsSphericalDifferential(0*u.arcsec, 1*u.arcsec, 0*u.kpc) o_thetac = o_theta.to_cartesian(base=s) assert_quantity_allclose(o_thetac[0].xyz, [0., 0., -np.pi/180./3600.]*u.kpc, atol=1.*u.npc) s_theta = s + 1.*u.arcsec * sf['theta'] * e['theta'] assert_representation_allclose(o_thetac, s_theta - s, atol=1e-10*u.kpc) s_theta2 = s + o_theta assert_representation_allclose(s_theta2, s_theta, atol=1e-10*u.kpc) o_r = PhysicsSphericalDifferential(0*u.arcsec, 0*u.arcsec, 1*u.mpc) o_rc = o_r.to_cartesian(base=s) assert_quantity_allclose(o_rc[0].xyz, [1e-6, 0., 0.]*u.kpc, atol=1.*u.npc) s_r = s + 1.*u.mpc * sf['r'] * e['r'] assert_representation_allclose(o_rc, s_r - s, atol=1e-10*u.kpc) s_r2 = s + o_r assert_representation_allclose(s_r2, s_r) def test_differential_init_errors(self): with pytest.raises(u.UnitsError): PhysicsSphericalDifferential(1.*u.arcsec, 0., 0.) class TestCylindricalDifferential(): """Test copied from SphericalDifferential, so less extensive.""" def setup(self): s = CylindricalRepresentation(rho=[1, 2, 3] * u.kpc, phi=[0., 90., 315.] * u.deg, z=[3, 2, 1] * u.kpc) self.s = s self.e = s.unit_vectors() self.sf = s.scale_factors() def test_name(self): assert CylindricalDifferential.get_name() == 'cylindrical' assert CylindricalDifferential.get_name() in DIFFERENTIAL_CLASSES def test_simple_differentials(self): s, e, sf = self.s, self.e, self.sf o_rho = CylindricalDifferential(1.*u.mpc, 0.*u.arcsec, 0.*u.kpc) o_rhoc = o_rho.to_cartesian(base=s) assert_quantity_allclose(o_rhoc[0].xyz, [1.e-6, 0., 0.]*u.kpc) s_rho = s + 1.*u.mpc * sf['rho'] * e['rho'] assert_representation_allclose(o_rhoc, s_rho - s, atol=1e-10*u.kpc) s_rho2 = s + o_rho assert_representation_allclose(s_rho2, s_rho) o_phi = CylindricalDifferential(0.*u.kpc, 1.*u.arcsec, 0.*u.kpc) o_phic = o_phi.to_cartesian(base=s) o_phi2 = CylindricalDifferential.from_cartesian(o_phic, base=s) assert_quantity_allclose(o_phi.d_rho, o_phi2.d_rho, atol=1.*u.npc) assert_quantity_allclose(o_phi.d_phi, o_phi2.d_phi, atol=1.*u.narcsec) assert_quantity_allclose(o_phi.d_z, o_phi2.d_z, atol=1.*u.npc) # simple check by hand for first element. assert_quantity_allclose(o_phic[0].xyz, [0., np.pi/180./3600., 0.]*u.kpc) # check all using unit vectors and scale factors. s_phi = s + 1.*u.arcsec * sf['phi'] * e['phi'] assert_representation_allclose(o_phic, s_phi - s, atol=1e-10*u.kpc) o_z = CylindricalDifferential(0.*u.kpc, 0.*u.arcsec, 1.*u.mpc) o_zc = o_z.to_cartesian(base=s) assert_quantity_allclose(o_zc[0].xyz, [0., 0., 1.e-6]*u.kpc) s_z = s + 1.*u.mpc * sf['z'] * e['z'] assert_representation_allclose(o_zc, s_z - s, atol=1e-10*u.kpc) s_z2 = s + o_z assert_representation_allclose(s_z2, s_z) def test_differential_init_errors(self): with pytest.raises(u.UnitsError): CylindricalDifferential(1.*u.pc, 1.*u.arcsec, 3.*u.km/u.s) class TestCartesianDifferential(): """Test copied from SphericalDifferential, so less extensive.""" def setup(self): s = CartesianRepresentation(x=[1, 2, 3] * u.kpc, y=[2, 3, 1] * u.kpc, z=[3, 1, 2] * u.kpc) self.s = s self.e = s.unit_vectors() self.sf = s.scale_factors() def test_name(self): assert CartesianDifferential.get_name() == 'cartesian' assert CartesianDifferential.get_name() in DIFFERENTIAL_CLASSES def test_simple_differentials(self): s, e, sf = self.s, self.e, self.sf for d, differential in ( # test different inits while we're at it. ('x', CartesianDifferential(1.*u.pc, 0.*u.pc, 0.*u.pc)), ('y', CartesianDifferential([0., 1., 0.], unit=u.pc)), ('z', CartesianDifferential(np.array([[0., 0., 1.]]) * u.pc, xyz_axis=1))): o_c = differential.to_cartesian(base=s) o_c2 = differential.to_cartesian() assert np.all(representation_equal(o_c, o_c2)) assert all(np.all(getattr(differential, 'd_'+c) == getattr(o_c, c)) for c in ('x', 'y', 'z')) differential2 = CartesianDifferential.from_cartesian(o_c) assert np.all(representation_equal(differential2, differential)) differential3 = CartesianDifferential.from_cartesian(o_c, base=o_c) assert np.all(representation_equal(differential3, differential)) s_off = s + 1.*u.pc * sf[d] * e[d] assert_representation_allclose(o_c, s_off - s, atol=1e-10*u.kpc) s_off2 = s + differential assert_representation_allclose(s_off2, s_off) def test_init_failures(self): with pytest.raises(ValueError): CartesianDifferential(1.*u.kpc/u.s, 2.*u.kpc) with pytest.raises(u.UnitsError): CartesianDifferential(1.*u.kpc/u.s, 2.*u.kpc, 3.*u.kpc) with pytest.raises(ValueError): CartesianDifferential(1.*u.kpc, 2.*u.kpc, 3.*u.kpc, xyz_axis=1) class TestDifferentialConversion(): def setup(self): self.s = SphericalRepresentation(lon=[0., 6., 21.] * u.hourangle, lat=[0., -30., 85.] * u.deg, distance=[1, 2, 3] * u.kpc) @pytest.mark.parametrize('sd_cls', [SphericalDifferential, SphericalCosLatDifferential]) def test_represent_as_own_class(self, sd_cls): so = sd_cls(1.*u.deg, 2.*u.deg, 0.1*u.kpc) so2 = so.represent_as(sd_cls) assert so2 is so def test_represent_other_coslat(self): s = self.s coslat = np.cos(s.lat) so = SphericalDifferential(1.*u.deg, 2.*u.deg, 0.1*u.kpc) so_coslat = so.represent_as(SphericalCosLatDifferential, base=s) assert_quantity_allclose(so.d_lon * coslat, so_coslat.d_lon_coslat) so2 = so_coslat.represent_as(SphericalDifferential, base=s) assert np.all(representation_equal(so2, so)) so3 = SphericalDifferential.from_representation(so_coslat, base=s) assert np.all(representation_equal(so3, so)) so_coslat2 = SphericalCosLatDifferential.from_representation(so, base=s) assert np.all(representation_equal(so_coslat2, so_coslat)) # Also test UnitSpherical us = s.represent_as(UnitSphericalRepresentation) uo = so.represent_as(UnitSphericalDifferential) uo_coslat = so.represent_as(UnitSphericalCosLatDifferential, base=s) assert_quantity_allclose(uo.d_lon * coslat, uo_coslat.d_lon_coslat) uo2 = uo_coslat.represent_as(UnitSphericalDifferential, base=us) assert np.all(representation_equal(uo2, uo)) uo3 = UnitSphericalDifferential.from_representation(uo_coslat, base=us) assert np.all(representation_equal(uo3, uo)) uo_coslat2 = UnitSphericalCosLatDifferential.from_representation( uo, base=us) assert np.all(representation_equal(uo_coslat2, uo_coslat)) uo_coslat3 = uo.represent_as(UnitSphericalCosLatDifferential, base=us) assert np.all(representation_equal(uo_coslat3, uo_coslat)) @pytest.mark.parametrize('sd_cls', [SphericalDifferential, SphericalCosLatDifferential]) @pytest.mark.parametrize('r_cls', (SphericalRepresentation, UnitSphericalRepresentation, PhysicsSphericalRepresentation, CylindricalRepresentation)) def test_represent_regular_class(self, sd_cls, r_cls): so = sd_cls(1.*u.deg, 2.*u.deg, 0.1*u.kpc) r = so.represent_as(r_cls, base=self.s) c = so.to_cartesian(self.s) r_check = c.represent_as(r_cls) assert np.all(representation_equal(r, r_check)) so2 = sd_cls.from_representation(r, base=self.s) so3 = sd_cls.from_cartesian(r.to_cartesian(), self.s) assert np.all(representation_equal(so2, so3)) @pytest.mark.parametrize('sd_cls', [SphericalDifferential, SphericalCosLatDifferential]) def test_convert_physics(self, sd_cls): # Conversion needs no base for SphericalDifferential, but does # need one (to get the latitude) for SphericalCosLatDifferential. if sd_cls is SphericalDifferential: usd_cls = UnitSphericalDifferential base_s = base_u = base_p = None else: usd_cls = UnitSphericalCosLatDifferential base_s = self.s[1] base_u = base_s.represent_as(UnitSphericalRepresentation) base_p = base_s.represent_as(PhysicsSphericalRepresentation) so = sd_cls(1.*u.deg, 2.*u.deg, 0.1*u.kpc) po = so.represent_as(PhysicsSphericalDifferential, base=base_s) so2 = sd_cls.from_representation(po, base=base_s) assert_differential_allclose(so, so2) po2 = PhysicsSphericalDifferential.from_representation(so, base=base_p) assert_differential_allclose(po, po2) so3 = po.represent_as(sd_cls, base=base_p) assert_differential_allclose(so, so3) s = self.s p = s.represent_as(PhysicsSphericalRepresentation) cso = so.to_cartesian(s[1]) cpo = po.to_cartesian(p[1]) assert_representation_allclose(cso, cpo) assert_representation_allclose(s[1] + so, p[1] + po) po2 = so.represent_as(PhysicsSphericalDifferential, base=None if base_s is None else s) assert_representation_allclose(s + so, p + po2) suo = usd_cls.from_representation(so) puo = usd_cls.from_representation(po, base=base_u) assert_differential_allclose(suo, puo) suo2 = so.represent_as(usd_cls) puo2 = po.represent_as(usd_cls, base=base_p) assert_differential_allclose(suo2, puo2) assert_differential_allclose(puo, puo2) sro = RadialDifferential.from_representation(so) pro = RadialDifferential.from_representation(po) assert representation_equal(sro, pro) sro2 = so.represent_as(RadialDifferential) pro2 = po.represent_as(RadialDifferential) assert representation_equal(sro2, pro2) assert representation_equal(pro, pro2) @pytest.mark.parametrize( ('sd_cls', 'usd_cls'), [(SphericalDifferential, UnitSphericalDifferential), (SphericalCosLatDifferential, UnitSphericalCosLatDifferential)]) def test_convert_unit_spherical_radial(self, sd_cls, usd_cls): s = self.s us = s.represent_as(UnitSphericalRepresentation) rs = s.represent_as(RadialRepresentation) assert_representation_allclose(rs * us, s) uo = usd_cls(2.*u.deg, 1.*u.deg) so = uo.represent_as(sd_cls, base=s) assert_quantity_allclose(so.d_distance, 0.*u.kpc, atol=1.*u.npc) uo2 = so.represent_as(usd_cls) assert_representation_allclose(uo.to_cartesian(us), uo2.to_cartesian(us)) so1 = sd_cls(2.*u.deg, 1.*u.deg, 5.*u.pc) uo_r = so1.represent_as(usd_cls) ro_r = so1.represent_as(RadialDifferential) assert np.all(representation_equal(uo_r, uo)) assert np.all(representation_equal(ro_r, RadialDifferential(5.*u.pc))) @pytest.mark.parametrize('sd_cls', [SphericalDifferential, SphericalCosLatDifferential]) def test_convert_cylindrial(self, sd_cls): s = self.s so = sd_cls(1.*u.deg, 2.*u.deg, 0.1*u.kpc) cyo = so.represent_as(CylindricalDifferential, base=s) cy = s.represent_as(CylindricalRepresentation) so1 = cyo.represent_as(sd_cls, base=cy) assert_representation_allclose(so.to_cartesian(s), so1.to_cartesian(s)) cyo2 = CylindricalDifferential.from_representation(so, base=cy) assert_representation_allclose(cyo2.to_cartesian(base=cy), cyo.to_cartesian(base=cy)) so2 = sd_cls.from_representation(cyo2, base=s) assert_representation_allclose(so.to_cartesian(s), so2.to_cartesian(s)) @pytest.mark.parametrize('sd_cls', [SphericalDifferential, SphericalCosLatDifferential]) def test_combinations(self, sd_cls): if sd_cls is SphericalDifferential: uo = UnitSphericalDifferential(2.*u.deg, 1.*u.deg) uo_d_lon = uo.d_lon else: uo = UnitSphericalCosLatDifferential(2.*u.deg, 1.*u.deg) uo_d_lon = uo.d_lon_coslat ro = RadialDifferential(1.*u.mpc) so1 = uo + ro so1c = sd_cls(uo_d_lon, uo.d_lat, ro.d_distance) assert np.all(representation_equal(so1, so1c)) so2 = uo - ro so2c = sd_cls(uo_d_lon, uo.d_lat, -ro.d_distance) assert np.all(representation_equal(so2, so2c)) so3 = so2 + ro so3c = sd_cls(uo_d_lon, uo.d_lat, 0.*u.kpc) assert np.all(representation_equal(so3, so3c)) so4 = so1 + ro so4c = sd_cls(uo_d_lon, uo.d_lat, 2*ro.d_distance) assert np.all(representation_equal(so4, so4c)) so5 = so1 - uo so5c = sd_cls(0*u.deg, 0.*u.deg, ro.d_distance) assert np.all(representation_equal(so5, so5c)) assert_representation_allclose(self.s + (uo+ro), self.s+so1) @pytest.mark.parametrize('rep,dif', [ [CartesianRepresentation([1, 2, 3]*u.kpc), CartesianDifferential([.1, .2, .3]*u.km/u.s)], [SphericalRepresentation(90*u.deg, 0.*u.deg, 14.*u.kpc), SphericalDifferential(1.*u.deg, 2.*u.deg, 0.1*u.kpc)] ]) def test_arithmetic_with_differentials_fail(rep, dif): rep = rep.with_differentials(dif) with pytest.raises(TypeError): rep + rep with pytest.raises(TypeError): rep - rep with pytest.raises(TypeError): rep * rep with pytest.raises(TypeError): rep / rep with pytest.raises(TypeError): 10. * rep with pytest.raises(TypeError): rep / 10. with pytest.raises(TypeError): -rep
2be805ad35f8f1f4b12935fe2fb0e1273022ec47e20acbbc824988112141a228
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst import warnings import pytest import numpy as np from numpy import testing as npt from astropy import units as u from astropy.time import Time from astropy.coordinates.builtin_frames import ICRS, AltAz from astropy.coordinates.builtin_frames.utils import get_jd12 from astropy.coordinates import EarthLocation from astropy.coordinates import SkyCoord from astropy.tests.helper import catch_warnings from astropy import _erfa as erfa from astropy.utils import iers from .utils import randomly_sample_sphere # These fixtures are used in test_iau_fullstack @pytest.fixture(scope="function") def fullstack_icrs(): ra, dec, _ = randomly_sample_sphere(1000) return ICRS(ra=ra, dec=dec) @pytest.fixture(scope="function") def fullstack_fiducial_altaz(fullstack_icrs): altazframe = AltAz(location=EarthLocation(lat=0*u.deg, lon=0*u.deg, height=0*u.m), obstime=Time('J2000')) with warnings.catch_warnings(): # Ignore remote_data warning warnings.simplefilter('ignore') result = fullstack_icrs.transform_to(altazframe) return result @pytest.fixture(scope="function", params=['J2000.1', 'J2010']) def fullstack_times(request): return Time(request.param) @pytest.fixture(scope="function", params=[(0, 0, 0), (23, 0, 0), (-70, 0, 0), (0, 100, 0), (23, 0, 3000)]) def fullstack_locations(request): return EarthLocation(lat=request.param[0]*u.deg, lon=request.param[0]*u.deg, height=request.param[0]*u.m) @pytest.fixture(scope="function", params=[(0*u.bar, 0*u.deg_C, 0, 1*u.micron), (1*u.bar, 0*u.deg_C, 0*u.one, 1*u.micron), (1*u.bar, 10*u.deg_C, 0, 1*u.micron), (1*u.bar, 0*u.deg_C, 50*u.percent, 1*u.micron), (1*u.bar, 0*u.deg_C, 0, 21*u.cm)]) def fullstack_obsconditions(request): return request.param def _erfa_check(ira, idec, astrom): """ This function does the same thing the astropy layer is supposed to do, but all in erfa """ cra, cdec = erfa.atciq(ira, idec, 0, 0, 0, 0, astrom) az, zen, ha, odec, ora = erfa.atioq(cra, cdec, astrom) alt = np.pi/2-zen cra2, cdec2 = erfa.atoiq('A', az, zen, astrom) ira2, idec2 = erfa.aticq(cra2, cdec2, astrom) dct = locals() del dct['astrom'] return dct @pytest.mark.remote_data def test_iau_fullstack(fullstack_icrs, fullstack_fiducial_altaz, fullstack_times, fullstack_locations, fullstack_obsconditions): """ Test the full transform from ICRS <-> AltAz """ # create the altaz frame altazframe = AltAz(obstime=fullstack_times, location=fullstack_locations, pressure=fullstack_obsconditions[0], temperature=fullstack_obsconditions[1], relative_humidity=fullstack_obsconditions[2], obswl=fullstack_obsconditions[3]) aacoo = fullstack_icrs.transform_to(altazframe) # compare aacoo to the fiducial AltAz - should always be different assert np.all(np.abs(aacoo.alt - fullstack_fiducial_altaz.alt) > 50*u.milliarcsecond) assert np.all(np.abs(aacoo.az - fullstack_fiducial_altaz.az) > 50*u.milliarcsecond) # if the refraction correction is included, we *only* do the comparisons # where altitude >5 degrees. The SOFA guides imply that below 5 is where # where accuracy gets more problematic, and testing reveals that alt<~0 # gives garbage round-tripping, and <10 can give ~1 arcsec uncertainty if fullstack_obsconditions[0].value == 0: # but if there is no refraction correction, check everything msk = slice(None) tol = 5*u.microarcsecond else: msk = aacoo.alt > 5*u.deg # most of them aren't this bad, but some of those at low alt are offset # this much. For alt > 10, this is always better than 100 masec tol = 750*u.milliarcsecond # now make sure the full stack round-tripping works icrs2 = aacoo.transform_to(ICRS) adras = np.abs(fullstack_icrs.ra - icrs2.ra)[msk] addecs = np.abs(fullstack_icrs.dec - icrs2.dec)[msk] assert np.all(adras < tol), 'largest RA change is {0} mas, > {1}'.format(np.max(adras.arcsec*1000), tol) assert np.all(addecs < tol), 'largest Dec change is {0} mas, > {1}'.format(np.max(addecs.arcsec*1000), tol) # check that we're consistent with the ERFA alt/az result xp, yp = u.Quantity(iers.IERS_Auto.open().pm_xy(fullstack_times)).to_value(u.radian) lon = fullstack_locations.geodetic[0].to_value(u.radian) lat = fullstack_locations.geodetic[1].to_value(u.radian) height = fullstack_locations.geodetic[2].to_value(u.m) jd1, jd2 = get_jd12(fullstack_times, 'utc') pressure = fullstack_obsconditions[0].to_value(u.hPa) temperature = fullstack_obsconditions[1].to_value(u.deg_C) # Relative humidity can be a quantity or a number. relative_humidity = u.Quantity(fullstack_obsconditions[2], u.one).value obswl = fullstack_obsconditions[3].to_value(u.micron) astrom, eo = erfa.apco13(jd1, jd2, fullstack_times.delta_ut1_utc, lon, lat, height, xp, yp, pressure, temperature, relative_humidity, obswl) erfadct = _erfa_check(fullstack_icrs.ra.rad, fullstack_icrs.dec.rad, astrom) npt.assert_allclose(erfadct['alt'], aacoo.alt.radian, atol=1e-7) npt.assert_allclose(erfadct['az'], aacoo.az.radian, atol=1e-7) @pytest.mark.remote_data def test_fiducial_roudtrip(fullstack_icrs, fullstack_fiducial_altaz): """ Test the full transform from ICRS <-> AltAz """ aacoo = fullstack_icrs.transform_to(fullstack_fiducial_altaz) # make sure the round-tripping works icrs2 = aacoo.transform_to(ICRS) npt.assert_allclose(fullstack_icrs.ra.deg, icrs2.ra.deg) npt.assert_allclose(fullstack_icrs.dec.deg, icrs2.dec.deg) def test_future_altaz(): """ While this does test the full stack, it is mostly meant to check that a warning is raised when attempting to get to AltAz in the future (beyond IERS tables) """ from astropy.utils.exceptions import AstropyWarning # this is an ugly hack to get the warning to show up even if it has already # appeared from astropy.coordinates.builtin_frames import utils if hasattr(utils, '__warningregistry__'): utils.__warningregistry__.clear() with catch_warnings() as found_warnings: location = EarthLocation(lat=0*u.deg, lon=0*u.deg) t = Time('J2161') SkyCoord(1*u.deg, 2*u.deg).transform_to(AltAz(location=location, obstime=t)) # check that these message(s) appear among any other warnings. If tests are run with # --remote-data then the IERS table will be an instance of IERS_Auto which is # assured of being "fresh". In this case getting times outside the range of the # table does not raise an exception. Only if using IERS_B (which happens without # --remote-data, i.e. for all CI testing) do we expect another warning. messages_to_find = ["Tried to get polar motions for times after IERS data is valid."] if isinstance(iers.IERS_Auto.iers_table, iers.IERS_B): messages_to_find.append("(some) times are outside of range covered by IERS table.") messages_found = [False for _ in messages_to_find] for w in found_warnings: if issubclass(w.category, AstropyWarning): for i, message_to_find in enumerate(messages_to_find): if message_to_find in str(w.message): messages_found[i] = True assert all(messages_found)
16ddb2911b2f5af710e9a883354939736e3cae8208dbb44ff16b2b4d3aaa0210
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """Test replacements for ERFA functions atciqz and aticq.""" from itertools import product import pytest from astropy.tests.helper import assert_quantity_allclose as assert_allclose from astropy.time import Time from astropy import _erfa as erfa from .utils import randomly_sample_sphere from astropy.coordinates.builtin_frames.utils import get_jd12, atciqz, aticq times = [Time("2014-06-25T00:00"), Time(["2014-06-25T00:00", "2014-09-24"])] ra, dec, _ = randomly_sample_sphere(2) positions = ((ra[0], dec[0]), (ra, dec)) spacetimes = product(times, positions) @pytest.mark.parametrize('st', spacetimes) def test_atciqz_aticq(st): """Check replacements against erfa versions for consistency.""" t, pos = st jd1, jd2 = get_jd12(t, 'tdb') astrom, _ = erfa.apci13(jd1, jd2) ra, dec = pos ra = ra.value dec = dec.value assert_allclose(erfa.atciqz(ra, dec, astrom), atciqz(ra, dec, astrom)) assert_allclose(erfa.aticq(ra, dec, astrom), aticq(ra, dec, astrom))
bee8bf24f6a55e9ac8a952336c8baf99bb18b1755c816b83059428b49a87c4f0
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst import numpy as np from numpy.testing import assert_allclose, assert_array_equal from astropy import units as u from astropy.coordinates.matrix_utilities import rotation_matrix, angle_axis def test_rotation_matrix(): assert_array_equal(rotation_matrix(0*u.deg, 'x'), np.eye(3)) assert_allclose(rotation_matrix(90*u.deg, 'y'), [[0, 0, -1], [0, 1, 0], [1, 0, 0]], atol=1e-12) assert_allclose(rotation_matrix(-90*u.deg, 'z'), [[0, -1, 0], [1, 0, 0], [0, 0, 1]], atol=1e-12) assert_allclose(rotation_matrix(45*u.deg, 'x'), rotation_matrix(45*u.deg, [1, 0, 0])) assert_allclose(rotation_matrix(125*u.deg, 'y'), rotation_matrix(125*u.deg, [0, 1, 0])) assert_allclose(rotation_matrix(-30*u.deg, 'z'), rotation_matrix(-30*u.deg, [0, 0, 1])) assert_allclose(np.dot(rotation_matrix(180*u.deg, [1, 1, 0]), [1, 0, 0]), [0, 1, 0], atol=1e-12) # make sure it also works for very small angles assert_allclose(rotation_matrix(0.000001*u.deg, 'x'), rotation_matrix(0.000001*u.deg, [1, 0, 0])) def test_angle_axis(): m1 = rotation_matrix(35*u.deg, 'x') an1, ax1 = angle_axis(m1) assert an1 - 35*u.deg < 1e-10*u.deg assert_allclose(ax1, [1, 0, 0]) m2 = rotation_matrix(-89*u.deg, [1, 1, 0]) an2, ax2 = angle_axis(m2) assert an2 - 89*u.deg < 1e-10*u.deg assert_allclose(ax2, [-2**-0.5, -2**-0.5, 0])
57898674f611ec7769552755720385adc9af687e1f45f7120354c74e0d563fee
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """ Regression tests for coordinates-related bugs that don't have an obvious other place to live """ import io import copy import pytest import numpy as np from astropy import units as u from astropy.coordinates import (AltAz, EarthLocation, SkyCoord, get_sun, ICRS, GeocentricMeanEcliptic, Longitude, Latitude, GCRS, HCRS, CIRS, get_moon, FK4, FK4NoETerms, BaseCoordinateFrame, ITRS, QuantityAttribute, UnitSphericalRepresentation, SphericalRepresentation, CartesianRepresentation, FunctionTransform) from astropy.coordinates.sites import get_builtin_sites from astropy.time import Time from astropy.utils import iers from astropy.table import Table from astropy.tests.helper import assert_quantity_allclose, catch_warnings from .test_matching import HAS_SCIPY, OLDER_SCIPY from astropy.units import allclose as quantity_allclose try: import yaml # pylint: disable=W0611 HAS_YAML = True except ImportError: HAS_YAML = False def test_regression_5085(): """ PR #5085 was put in place to fix the following issue. Issue: https://github.com/astropy/astropy/issues/5069 At root was the transformation of Ecliptic coordinates with non-scalar times. """ # Note: for regression test, we need to be sure that we use UTC for the # epoch, even though more properly that should be TT; but the "expected" # values were calculated using that. j2000 = Time('J2000', scale='utc') times = Time(["2015-08-28 03:30", "2015-09-05 10:30", "2015-09-15 18:35"]) latitudes = Latitude([3.9807075, -5.00733806, 1.69539491]*u.deg) longitudes = Longitude([311.79678613, 72.86626741, 199.58698226]*u.deg) distances = u.Quantity([0.00243266, 0.0025424, 0.00271296]*u.au) coo = GeocentricMeanEcliptic(lat=latitudes, lon=longitudes, distance=distances, obstime=times, equinox=times) # expected result ras = Longitude([310.50095400, 314.67109920, 319.56507428]*u.deg) decs = Latitude([-18.25190443, -17.1556676, -15.71616522]*u.deg) distances = u.Quantity([1.78309901, 1.710874, 1.61326649]*u.au) expected_result = GCRS(ra=ras, dec=decs, distance=distances, obstime=j2000).cartesian.xyz actual_result = coo.transform_to(GCRS(obstime=j2000)).cartesian.xyz assert_quantity_allclose(expected_result, actual_result) @pytest.mark.remote_data def test_regression_3920(): """ Issue: https://github.com/astropy/astropy/issues/3920 """ loc = EarthLocation.from_geodetic(0*u.deg, 0*u.deg, 0) time = Time('2010-1-1') aa = AltAz(location=loc, obstime=time) sc = SkyCoord(10*u.deg, 3*u.deg) assert sc.transform_to(aa).shape == tuple() # That part makes sense: the input is a scalar so the output is too sc2 = SkyCoord(10*u.deg, 3*u.deg, 1*u.AU) assert sc2.transform_to(aa).shape == tuple() # in 3920 that assert fails, because the shape is (1,) # check that the same behavior occurs even if transform is from low-level classes icoo = ICRS(sc.data) icoo2 = ICRS(sc2.data) assert icoo.transform_to(aa).shape == tuple() assert icoo2.transform_to(aa).shape == tuple() @pytest.mark.remote_data def test_regression_3938(): """ Issue: https://github.com/astropy/astropy/issues/3938 """ # Set up list of targets - we don't use `from_name` here to avoid # remote_data requirements, but it does the same thing # vega = SkyCoord.from_name('Vega') vega = SkyCoord(279.23473479*u.deg, 38.78368896*u.deg) # capella = SkyCoord.from_name('Capella') capella = SkyCoord(79.17232794*u.deg, 45.99799147*u.deg) # sirius = SkyCoord.from_name('Sirius') sirius = SkyCoord(101.28715533*u.deg, -16.71611586*u.deg) targets = [vega, capella, sirius] # Feed list of targets into SkyCoord combined_coords = SkyCoord(targets) # Set up AltAz frame time = Time('2012-01-01 00:00:00') location = EarthLocation('10d', '45d', 0) aa = AltAz(location=location, obstime=time) combined_coords.transform_to(aa) # in 3938 the above yields ``UnitConversionError: '' (dimensionless) and 'pc' (length) are not convertible`` def test_regression_3998(): """ Issue: https://github.com/astropy/astropy/issues/3998 """ time = Time('2012-01-01 00:00:00') assert time.isscalar sun = get_sun(time) assert sun.isscalar # in 3998, the above yields False - `sun` is a length-1 vector assert sun.obstime is time @pytest.mark.remote_data def test_regression_4033(): """ Issue: https://github.com/astropy/astropy/issues/4033 """ # alb = SkyCoord.from_name('Albireo') alb = SkyCoord(292.68033548*u.deg, 27.95968007*u.deg) alb_wdist = SkyCoord(alb, distance=133*u.pc) # de = SkyCoord.from_name('Deneb') de = SkyCoord(310.35797975*u.deg, 45.28033881*u.deg) de_wdist = SkyCoord(de, distance=802*u.pc) aa = AltAz(location=EarthLocation(lat=45*u.deg, lon=0*u.deg), obstime='2010-1-1') deaa = de.transform_to(aa) albaa = alb.transform_to(aa) alb_wdistaa = alb_wdist.transform_to(aa) de_wdistaa = de_wdist.transform_to(aa) # these work fine sepnod = deaa.separation(albaa) sepwd = deaa.separation(alb_wdistaa) assert_quantity_allclose(sepnod, 22.2862*u.deg, rtol=1e-6) assert_quantity_allclose(sepwd, 22.2862*u.deg, rtol=1e-6) # parallax should be present when distance added assert np.abs(sepnod - sepwd) > 1*u.marcsec # in 4033, the following fail with a recursion error assert_quantity_allclose(de_wdistaa.separation(alb_wdistaa), 22.2862*u.deg, rtol=1e-3) assert_quantity_allclose(alb_wdistaa.separation(deaa), 22.2862*u.deg, rtol=1e-3) @pytest.mark.skipif(not HAS_SCIPY, reason='No Scipy') @pytest.mark.skipif(OLDER_SCIPY, reason='Scipy too old') def test_regression_4082(): """ Issue: https://github.com/astropy/astropy/issues/4082 """ from astropy.coordinates import search_around_sky, search_around_3d cat = SkyCoord([10.076, 10.00455], [18.54746, 18.54896], unit='deg') search_around_sky(cat[0:1], cat, seplimit=u.arcsec * 60, storekdtree=False) # in the issue, this raises a TypeError # also check 3d for good measure, although it's not really affected by this bug directly cat3d = SkyCoord([10.076, 10.00455]*u.deg, [18.54746, 18.54896]*u.deg, distance=[0.1, 1.5]*u.kpc) search_around_3d(cat3d[0:1], cat3d, 1*u.kpc, storekdtree=False) def test_regression_4210(): """ Issue: https://github.com/astropy/astropy/issues/4210 Related PR with actual change: https://github.com/astropy/astropy/pull/4211 """ crd = SkyCoord(0*u.deg, 0*u.deg, distance=1*u.AU) ecl = crd.geocentricmeanecliptic # bug was that "lambda", which at the time was the name of the geocentric # ecliptic longitude, is a reserved keyword. So this just makes sure the # new name is are all valid ecl.lon # and for good measure, check the other ecliptic systems are all the same # names for their attributes from astropy.coordinates.builtin_frames import ecliptic for frame_name in ecliptic.__all__: eclcls = getattr(ecliptic, frame_name) eclobj = eclcls(1*u.deg, 2*u.deg, 3*u.AU) eclobj.lat eclobj.lon eclobj.distance def test_regression_futuretimes_4302(): """ Checks that an error is not raised for future times not covered by IERS tables (at least in a simple transform like CIRS->ITRS that simply requires the UTC<->UT1 conversion). Relevant comment: https://github.com/astropy/astropy/pull/4302#discussion_r44836531 """ from astropy.utils.exceptions import AstropyWarning # this is an ugly hack to get the warning to show up even if it has already # appeared from astropy.coordinates.builtin_frames import utils if hasattr(utils, '__warningregistry__'): utils.__warningregistry__.clear() with catch_warnings() as found_warnings: future_time = Time('2511-5-1') c = CIRS(1*u.deg, 2*u.deg, obstime=future_time) c.transform_to(ITRS(obstime=future_time)) if not isinstance(iers.IERS_Auto.iers_table, iers.IERS_Auto): saw_iers_warnings = False for w in found_warnings: if issubclass(w.category, AstropyWarning): if '(some) times are outside of range covered by IERS table' in str(w.message): saw_iers_warnings = True break assert saw_iers_warnings, 'Never saw IERS warning' def test_regression_4996(): # this part is the actual regression test deltat = np.linspace(-12, 12, 1000)*u.hour times = Time('2012-7-13 00:00:00') + deltat suncoo = get_sun(times) assert suncoo.shape == (len(times),) # and this is an additional test to make sure more complex arrays work times2 = Time('2012-7-13 00:00:00') + deltat.reshape(10, 20, 5) suncoo2 = get_sun(times2) assert suncoo2.shape == times2.shape # this is intentionally not allclose - they should be *exactly* the same assert np.all(suncoo.ra.ravel() == suncoo2.ra.ravel()) def test_regression_4293(): """Really just an extra test on FK4 no e, after finding that the units were not always taken correctly. This test is against explicitly doing the transformations on pp170 of Explanatory Supplement to the Astronomical Almanac (Seidelmann, 2005). See https://github.com/astropy/astropy/pull/4293#issuecomment-234973086 """ # Check all over sky, but avoiding poles (note that FK4 did not ignore # e terms within 10∘ of the poles... see p170 of explan.supp.). ra, dec = np.meshgrid(np.arange(0, 359, 45), np.arange(-80, 81, 40)) fk4 = FK4(ra.ravel() * u.deg, dec.ravel() * u.deg) Dc = -0.065838*u.arcsec Dd = +0.335299*u.arcsec # Dc * tan(obliquity), as given on p.170 Dctano = -0.028553*u.arcsec fk4noe_dec = (fk4.dec - (Dd*np.cos(fk4.ra) - Dc*np.sin(fk4.ra))*np.sin(fk4.dec) - Dctano*np.cos(fk4.dec)) fk4noe_ra = fk4.ra - (Dc*np.cos(fk4.ra) + Dd*np.sin(fk4.ra)) / np.cos(fk4.dec) fk4noe = fk4.transform_to(FK4NoETerms) # Tolerance here just set to how well the coordinates match, which is much # better than the claimed accuracy of <1 mas for this first-order in # v_earth/c approximation. # Interestingly, if one divides by np.cos(fk4noe_dec) in the ra correction, # the match becomes good to 2 μas. assert_quantity_allclose(fk4noe.ra, fk4noe_ra, atol=11.*u.uas, rtol=0) assert_quantity_allclose(fk4noe.dec, fk4noe_dec, atol=3.*u.uas, rtol=0) @pytest.mark.remote_data def test_regression_4926(): times = Time('2010-01-1') + np.arange(20)*u.day green = get_builtin_sites()['greenwich'] # this is the regression test moon = get_moon(times, green) # this is an additional test to make sure the GCRS->ICRS transform works for complex shapes moon.transform_to(ICRS()) # and some others to increase coverage of transforms moon.transform_to(HCRS(obstime="J2000")) moon.transform_to(HCRS(obstime=times)) def test_regression_5209(): "check that distances are not lost on SkyCoord init" time = Time('2015-01-01') moon = get_moon(time) new_coord = SkyCoord([moon]) assert_quantity_allclose(new_coord[0].distance, moon.distance) @pytest.mark.remote_data def test_regression_5133(): N = 1000 np.random.seed(12345) lon = np.random.uniform(-10, 10, N) * u.deg lat = np.random.uniform(50, 52, N) * u.deg alt = np.random.uniform(0, 10., N) * u.km time = Time('2010-1-1') objects = EarthLocation.from_geodetic(lon, lat, height=alt) itrs_coo = objects.get_itrs(time) homes = [EarthLocation.from_geodetic(lon=-1 * u.deg, lat=52 * u.deg, height=h) for h in (0, 1000, 10000)*u.km] altaz_frames = [AltAz(obstime=time, location=h) for h in homes] altaz_coos = [itrs_coo.transform_to(f) for f in altaz_frames] # they should all be different for coo in altaz_coos[1:]: assert not quantity_allclose(coo.az, coo.az[0]) assert not quantity_allclose(coo.alt, coo.alt[0]) @pytest.mark.remote_data def test_itrs_vals_5133(): time = Time('2010-1-1') el = EarthLocation.from_geodetic(lon=20*u.deg, lat=45*u.deg, height=0*u.km) lons = [20, 30, 20]*u.deg lats = [44, 45, 45]*u.deg alts = [0, 0, 10]*u.km coos = [EarthLocation.from_geodetic(lon, lat, height=alt).get_itrs(time) for lon, lat, alt in zip(lons, lats, alts)] aaf = AltAz(obstime=time, location=el) aacs = [coo.transform_to(aaf) for coo in coos] assert all([coo.isscalar for coo in aacs]) # the ~1 arcsec tolerance is b/c aberration makes it not exact assert_quantity_allclose(aacs[0].az, 180*u.deg, atol=1*u.arcsec) assert aacs[0].alt < 0*u.deg assert aacs[0].distance > 50*u.km # it should *not* actually be 90 degrees, b/c constant latitude is not # straight east anywhere except the equator... but should be close-ish assert_quantity_allclose(aacs[1].az, 90*u.deg, atol=5*u.deg) assert aacs[1].alt < 0*u.deg assert aacs[1].distance > 50*u.km assert_quantity_allclose(aacs[2].alt, 90*u.deg, atol=1*u.arcsec) assert_quantity_allclose(aacs[2].distance, 10*u.km) @pytest.mark.remote_data def test_regression_simple_5133(): t = Time('J2010') obj = EarthLocation(-1*u.deg, 52*u.deg, height=[100., 0.]*u.km) home = EarthLocation(-1*u.deg, 52*u.deg, height=10.*u.km) aa = obj.get_itrs(t).transform_to(AltAz(obstime=t, location=home)) # az is more-or-less undefined for straight up or down assert_quantity_allclose(aa.alt, [90, -90]*u.deg, rtol=1e-5) assert_quantity_allclose(aa.distance, [90, 10]*u.km) def test_regression_5743(): sc = SkyCoord([5, 10], [20, 30], unit=u.deg, obstime=['2017-01-01T00:00', '2017-01-01T00:10']) assert sc[0].obstime.shape == tuple() @pytest.mark.remote_data def test_regression_5889_5890(): # ensure we can represent all Representations and transform to ND frames greenwich = EarthLocation( *u.Quantity([3980608.90246817, -102.47522911, 4966861.27310067], unit=u.m)) times = Time("2017-03-20T12:00:00") + np.linspace(-2, 2, 3)*u.hour moon = get_moon(times, location=greenwich) targets = SkyCoord([350.7*u.deg, 260.7*u.deg], [18.4*u.deg, 22.4*u.deg]) targs2d = targets[:, np.newaxis] targs2d.transform_to(moon) def test_regression_6236(): # sunpy changes its representation upon initialisation of a frame, # including via `realize_frame`. Ensure this works. class MyFrame(BaseCoordinateFrame): default_representation = CartesianRepresentation my_attr = QuantityAttribute(default=0, unit=u.m) class MySpecialFrame(MyFrame): def __init__(self, *args, **kwargs): _rep_kwarg = kwargs.get('representation_type', None) super().__init__(*args, **kwargs) if not _rep_kwarg: self.representation_type = self.default_representation self._data = self.data.represent_as(self.representation_type) rep1 = UnitSphericalRepresentation([0., 1]*u.deg, [2., 3.]*u.deg) rep2 = SphericalRepresentation([10., 11]*u.deg, [12., 13.]*u.deg, [14., 15.]*u.kpc) mf1 = MyFrame(rep1, my_attr=1.*u.km) mf2 = mf1.realize_frame(rep2) # Normally, data is stored as is, but the representation gets set to a # default, even if a different representation instance was passed in. # realize_frame should do the same. Just in case, check attrs are passed. assert mf1.data is rep1 assert mf2.data is rep2 assert mf1.representation_type is CartesianRepresentation assert mf2.representation_type is CartesianRepresentation assert mf2.my_attr == mf1.my_attr # It should be independent of whether I set the reprensentation explicitly mf3 = MyFrame(rep1, my_attr=1.*u.km, representation_type='unitspherical') mf4 = mf3.realize_frame(rep2) assert mf3.data is rep1 assert mf4.data is rep2 assert mf3.representation_type is UnitSphericalRepresentation assert mf4.representation_type is CartesianRepresentation assert mf4.my_attr == mf3.my_attr # This should be enough to help sunpy, but just to be sure, a test # even closer to what is done there, i.e., transform the representation. msf1 = MySpecialFrame(rep1, my_attr=1.*u.km) msf2 = msf1.realize_frame(rep2) assert msf1.data is not rep1 # Gets transformed to Cartesian. assert msf2.data is not rep2 assert type(msf1.data) is CartesianRepresentation assert type(msf2.data) is CartesianRepresentation assert msf1.representation_type is CartesianRepresentation assert msf2.representation_type is CartesianRepresentation assert msf2.my_attr == msf1.my_attr # And finally a test where the input is not transformed. msf3 = MySpecialFrame(rep1, my_attr=1.*u.km, representation_type='unitspherical') msf4 = msf3.realize_frame(rep2) assert msf3.data is rep1 assert msf4.data is not rep2 assert msf3.representation_type is UnitSphericalRepresentation assert msf4.representation_type is CartesianRepresentation assert msf4.my_attr == msf3.my_attr @pytest.mark.skipif(not HAS_SCIPY, reason='No Scipy') @pytest.mark.skipif(OLDER_SCIPY, reason='Scipy too old') def test_regression_6347(): sc1 = SkyCoord([1, 2]*u.deg, [3, 4]*u.deg) sc2 = SkyCoord([1.1, 2.1]*u.deg, [3.1, 4.1]*u.deg) sc0 = sc1[:0] idx1_10, idx2_10, d2d_10, d3d_10 = sc1.search_around_sky(sc2, 10*u.arcmin) idx1_1, idx2_1, d2d_1, d3d_1 = sc1.search_around_sky(sc2, 1*u.arcmin) idx1_0, idx2_0, d2d_0, d3d_0 = sc0.search_around_sky(sc2, 10*u.arcmin) assert len(d2d_10) == 2 assert len(d2d_0) == 0 assert type(d2d_0) is type(d2d_10) assert len(d2d_1) == 0 assert type(d2d_1) is type(d2d_10) @pytest.mark.skipif(not HAS_SCIPY, reason='No Scipy') @pytest.mark.skipif(OLDER_SCIPY, reason='Scipy too old') def test_regression_6347_3d(): sc1 = SkyCoord([1, 2]*u.deg, [3, 4]*u.deg, [5, 6]*u.kpc) sc2 = SkyCoord([1, 2]*u.deg, [3, 4]*u.deg, [5.1, 6.1]*u.kpc) sc0 = sc1[:0] idx1_10, idx2_10, d2d_10, d3d_10 = sc1.search_around_3d(sc2, 500*u.pc) idx1_1, idx2_1, d2d_1, d3d_1 = sc1.search_around_3d(sc2, 50*u.pc) idx1_0, idx2_0, d2d_0, d3d_0 = sc0.search_around_3d(sc2, 500*u.pc) assert len(d2d_10) > 0 assert len(d2d_0) == 0 assert type(d2d_0) is type(d2d_10) assert len(d2d_1) == 0 assert type(d2d_1) is type(d2d_10) def test_regression_6300(): """Check that importing old frame attribute names from astropy.coordinates still works. See comments at end of #6300 """ from astropy.utils.exceptions import AstropyDeprecationWarning from astropy.coordinates import CartesianRepresentation from astropy.coordinates import (TimeFrameAttribute, QuantityFrameAttribute, CartesianRepresentationFrameAttribute) with catch_warnings() as found_warnings: attr = TimeFrameAttribute(default=Time("J2000")) for w in found_warnings: if issubclass(w.category, AstropyDeprecationWarning): break else: assert False, "Deprecation warning not raised" with catch_warnings() as found_warnings: attr = QuantityFrameAttribute(default=5*u.km) for w in found_warnings: if issubclass(w.category, AstropyDeprecationWarning): break else: assert False, "Deprecation warning not raised" with catch_warnings() as found_warnings: attr = CartesianRepresentationFrameAttribute( default=CartesianRepresentation([5,6,7]*u.kpc)) for w in found_warnings: if issubclass(w.category, AstropyDeprecationWarning): break else: assert False, "Deprecation warning not raised" @pytest.mark.remote_data def test_gcrs_itrs_cartesian_repr(): # issue 6436: transformation failed if coordinate representation was # Cartesian gcrs = GCRS(CartesianRepresentation((859.07256, -4137.20368, 5295.56871), unit='km'), representation_type='cartesian') gcrs.transform_to(ITRS) @pytest.mark.skipif('not HAS_YAML') def test_regression_6446(): # this succeeds even before 6446: sc1 = SkyCoord([1, 2], [3, 4], unit='deg') t1 = Table([sc1]) sio1 = io.StringIO() t1.write(sio1, format='ascii.ecsv') # but this fails due to the 6446 bug c1 = SkyCoord(1, 3, unit='deg') c2 = SkyCoord(2, 4, unit='deg') sc2 = SkyCoord([c1, c2]) t2 = Table([sc2]) sio2 = io.StringIO() t2.write(sio2, format='ascii.ecsv') assert sio1.getvalue() == sio2.getvalue() def test_regression_6448(): """ This tests the more narrow problem reported in 6446 that 6448 is meant to fix. `test_regression_6446` also covers this, but this test is provided so that this is still tested even if YAML isn't installed. """ sc1 = SkyCoord([1, 2], [3, 4], unit='deg') # this should always succeed even prior to 6448 assert sc1.galcen_v_sun is None c1 = SkyCoord(1, 3, unit='deg') c2 = SkyCoord(2, 4, unit='deg') sc2 = SkyCoord([c1, c2]) # without 6448 this fails assert sc2.galcen_v_sun is None def test_regression_6597(): frame_name = 'galactic' c1 = SkyCoord(1, 3, unit='deg', frame=frame_name) c2 = SkyCoord(2, 4, unit='deg', frame=frame_name) sc1 = SkyCoord([c1, c2]) assert sc1.frame.name == frame_name def test_regression_6597_2(): """ This tests the more subtle flaw that #6597 indirectly uncovered: that even in the case that the frames are ra/dec, they still might be the wrong *kind* """ frame = FK4(equinox='J1949') c1 = SkyCoord(1, 3, unit='deg', frame=frame) c2 = SkyCoord(2, 4, unit='deg', frame=frame) sc1 = SkyCoord([c1, c2]) assert sc1.frame.name == frame.name @pytest.mark.remote_data def test_regression_6697(): """ Test for regression of a bug in get_gcrs_posvel that introduced errors at the 1m/s level. Comparison data is derived from calculation in PINT https://github.com/nanograv/PINT/blob/master/pint/erfautils.py """ pint_vels = CartesianRepresentation(*(348.63632871, -212.31704928, -0.60154936), unit=u.m/u.s) location = EarthLocation(*(5327448.9957829, -1718665.73869569, 3051566.90295403), unit=u.m) t = Time(2458036.161966612, format='jd') obsgeopos, obsgeovel = location.get_gcrs_posvel(t) delta = (obsgeovel-pint_vels).norm() assert delta < 1*u.cm/u.s def test_regression_8138(): sc = SkyCoord(1*u.deg, 2*u.deg) newframe = GCRS() sc2 = sc.transform_to(newframe) assert newframe.is_equivalent_frame(sc2.frame) def test_regression_8276(): from astropy.coordinates import baseframe with pytest.raises(TypeError) as excinfo: class MyFrame(BaseCoordinateFrame): a = QuantityAttribute(unit=u.m) # note that the remainder of this with clause does not get executed # because an exception is raised here. A future PR is planned to # allow the default to be left off, after which the rest of this # test will get executed, so it is being left in place. See # https://github.com/astropy/astropy/pull/8300 for more info # we save the transform graph so that it doesn't acidentally mess with other tests old_transform_graph = baseframe.frame_transform_graph try: baseframe.frame_transform_graph = copy.copy(baseframe.frame_transform_graph) # as reported in 8276, this fails right here because registering the # transform tries to create a frame attribute @baseframe.frame_transform_graph.transform(FunctionTransform, MyFrame, AltAz) def trans(my_frame_coord, altaz_frame): pass # should also be able to *create* the Frame at this point MyFrame() finally: baseframe.frame_transform_graph = old_transform_graph assert "missing 1 required positional argument: 'default'" in str(excinfo.value)
c597a5ff79722db1dd69c07d97a2b5764a331626652308789207ea1878a76c4b
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst import pytest import numpy as np from numpy import testing as npt from astropy.tests.helper import assert_quantity_allclose as assert_allclose from astropy import units as u from astropy.utils import minversion from astropy.coordinates import matching """ These are the tests for coordinate matching. Note that this requires scipy. """ try: import scipy HAS_SCIPY = True except ImportError: HAS_SCIPY = False if HAS_SCIPY and minversion(scipy, '0.12.0', inclusive=False): OLDER_SCIPY = False else: OLDER_SCIPY = True @pytest.mark.skipif(str('not HAS_SCIPY')) def test_matching_function(): from astropy.coordinates import ICRS from astropy.coordinates.matching import match_coordinates_3d # this only uses match_coordinates_3d because that's the actual implementation cmatch = ICRS([4, 2.1]*u.degree, [0, 0]*u.degree) ccatalog = ICRS([1, 2, 3, 4]*u.degree, [0, 0, 0, 0]*u.degree) idx, d2d, d3d = match_coordinates_3d(cmatch, ccatalog) npt.assert_array_equal(idx, [3, 1]) npt.assert_array_almost_equal(d2d.degree, [0, 0.1]) assert d3d.value[0] == 0 idx, d2d, d3d = match_coordinates_3d(cmatch, ccatalog, nthneighbor=2) assert np.all(idx == 2) npt.assert_array_almost_equal(d2d.degree, [1, 0.9]) npt.assert_array_less(d3d.value, 0.02) @pytest.mark.skipif(str('not HAS_SCIPY')) def test_matching_function_3d_and_sky(): from astropy.coordinates import ICRS from astropy.coordinates.matching import match_coordinates_3d, match_coordinates_sky cmatch = ICRS([4, 2.1]*u.degree, [0, 0]*u.degree, distance=[1, 5] * u.kpc) ccatalog = ICRS([1, 2, 3, 4]*u.degree, [0, 0, 0, 0]*u.degree, distance=[1, 1, 1, 5] * u.kpc) idx, d2d, d3d = match_coordinates_3d(cmatch, ccatalog) npt.assert_array_equal(idx, [2, 3]) assert_allclose(d2d, [1, 1.9] * u.deg) assert np.abs(d3d[0].to_value(u.kpc) - np.radians(1)) < 1e-6 assert np.abs(d3d[1].to_value(u.kpc) - 5*np.radians(1.9)) < 1e-5 idx, d2d, d3d = match_coordinates_sky(cmatch, ccatalog) npt.assert_array_equal(idx, [3, 1]) assert_allclose(d2d, [0, 0.1] * u.deg) assert_allclose(d3d, [4, 4.0000019] * u.kpc) @pytest.mark.parametrize('functocheck, args, defaultkdtname, bothsaved', [(matching.match_coordinates_3d, [], 'kdtree_3d', False), (matching.match_coordinates_sky, [], 'kdtree_sky', False), (matching.search_around_3d, [1*u.kpc], 'kdtree_3d', True), (matching.search_around_sky, [1*u.deg], 'kdtree_sky', False) ]) @pytest.mark.skipif(str('not HAS_SCIPY')) def test_kdtree_storage(functocheck, args, defaultkdtname, bothsaved): from astropy.coordinates import ICRS def make_scs(): cmatch = ICRS([4, 2.1]*u.degree, [0, 0]*u.degree, distance=[1, 2]*u.kpc) ccatalog = ICRS([1, 2, 3, 4]*u.degree, [0, 0, 0, 0]*u.degree, distance=[1, 2, 3, 4]*u.kpc) return cmatch, ccatalog cmatch, ccatalog = make_scs() functocheck(cmatch, ccatalog, *args, storekdtree=False) assert 'kdtree' not in ccatalog.cache assert defaultkdtname not in ccatalog.cache cmatch, ccatalog = make_scs() functocheck(cmatch, ccatalog, *args) assert defaultkdtname in ccatalog.cache assert 'kdtree' not in ccatalog.cache cmatch, ccatalog = make_scs() functocheck(cmatch, ccatalog, *args, storekdtree=True) assert 'kdtree' in ccatalog.cache assert defaultkdtname not in ccatalog.cache cmatch, ccatalog = make_scs() assert 'tislit_cheese' not in ccatalog.cache functocheck(cmatch, ccatalog, *args, storekdtree='tislit_cheese') assert 'tislit_cheese' in ccatalog.cache assert defaultkdtname not in ccatalog.cache assert 'kdtree' not in ccatalog.cache if bothsaved: assert 'tislit_cheese' in cmatch.cache assert defaultkdtname not in cmatch.cache assert 'kdtree' not in cmatch.cache else: assert 'tislit_cheese' not in cmatch.cache # now a bit of a hacky trick to make sure it at least tries to *use* it ccatalog.cache['tislit_cheese'] = 1 cmatch.cache['tislit_cheese'] = 1 with pytest.raises(TypeError) as e: functocheck(cmatch, ccatalog, *args, storekdtree='tislit_cheese') assert 'KD' in e.value.args[0] @pytest.mark.skipif(str('not HAS_SCIPY')) def test_python_kdtree(monkeypatch): from astropy.coordinates import ICRS cmatch = ICRS([4, 2.1]*u.degree, [0, 0]*u.degree, distance=[1, 2]*u.kpc) ccatalog = ICRS([1, 2, 3, 4]*u.degree, [0, 0, 0, 0]*u.degree, distance=[1, 2, 3, 4]*u.kpc) monkeypatch.delattr("scipy.spatial.cKDTree") with pytest.warns(UserWarning, match='C-based KD tree not found'): matching.match_coordinates_sky(cmatch, ccatalog) @pytest.mark.skipif(str('not HAS_SCIPY')) def test_matching_method(): from astropy.coordinates import ICRS, SkyCoord from astropy.utils import NumpyRNGContext from astropy.coordinates.matching import match_coordinates_3d, match_coordinates_sky with NumpyRNGContext(987654321): cmatch = ICRS(np.random.rand(20) * 360.*u.degree, (np.random.rand(20) * 180. - 90.)*u.degree) ccatalog = ICRS(np.random.rand(100) * 360. * u.degree, (np.random.rand(100) * 180. - 90.)*u.degree) idx1, d2d1, d3d1 = SkyCoord(cmatch).match_to_catalog_3d(ccatalog) idx2, d2d2, d3d2 = match_coordinates_3d(cmatch, ccatalog) npt.assert_array_equal(idx1, idx2) assert_allclose(d2d1, d2d2) assert_allclose(d3d1, d3d2) # should be the same as above because there's no distance, but just make sure this method works idx1, d2d1, d3d1 = SkyCoord(cmatch).match_to_catalog_sky(ccatalog) idx2, d2d2, d3d2 = match_coordinates_sky(cmatch, ccatalog) npt.assert_array_equal(idx1, idx2) assert_allclose(d2d1, d2d2) assert_allclose(d3d1, d3d2) assert len(idx1) == len(d2d1) == len(d3d1) == 20 @pytest.mark.skipif(str('not HAS_SCIPY')) @pytest.mark.skipif(str('OLDER_SCIPY')) def test_search_around(): from astropy.coordinates import ICRS, SkyCoord from astropy.coordinates.matching import search_around_sky, search_around_3d coo1 = ICRS([4, 2.1]*u.degree, [0, 0]*u.degree, distance=[1, 5] * u.kpc) coo2 = ICRS([1, 2, 3, 4]*u.degree, [0, 0, 0, 0]*u.degree, distance=[1, 1, 1, 5] * u.kpc) idx1_1deg, idx2_1deg, d2d_1deg, d3d_1deg = search_around_sky(coo1, coo2, 1.01*u.deg) idx1_0p05deg, idx2_0p05deg, d2d_0p05deg, d3d_0p05deg = search_around_sky(coo1, coo2, 0.05*u.deg) assert list(zip(idx1_1deg, idx2_1deg)) == [(0, 2), (0, 3), (1, 1), (1, 2)] assert d2d_1deg[0] == 1.0*u.deg assert_allclose(d2d_1deg, [1, 0, .1, .9]*u.deg) assert list(zip(idx1_0p05deg, idx2_0p05deg)) == [(0, 3)] idx1_1kpc, idx2_1kpc, d2d_1kpc, d3d_1kpc = search_around_3d(coo1, coo2, 1*u.kpc) idx1_sm, idx2_sm, d2d_sm, d3d_sm = search_around_3d(coo1, coo2, 0.05*u.kpc) assert list(zip(idx1_1kpc, idx2_1kpc)) == [(0, 0), (0, 1), (0, 2), (1, 3)] assert list(zip(idx1_sm, idx2_sm)) == [(0, 1), (0, 2)] assert_allclose(d2d_sm, [2, 1]*u.deg) # Test for the non-matches, #4877 coo1 = ICRS([4.1, 2.1]*u.degree, [0, 0]*u.degree, distance=[1, 5] * u.kpc) idx1, idx2, d2d, d3d = search_around_sky(coo1, coo2, 1*u.arcsec) assert idx1.size == idx2.size == d2d.size == d3d.size == 0 assert idx1.dtype == idx2.dtype == np.int assert d2d.unit == u.deg assert d3d.unit == u.kpc idx1, idx2, d2d, d3d = search_around_3d(coo1, coo2, 1*u.m) assert idx1.size == idx2.size == d2d.size == d3d.size == 0 assert idx1.dtype == idx2.dtype == np.int assert d2d.unit == u.deg assert d3d.unit == u.kpc # Test when one or both of the coordinate arrays is empty, #4875 empty = ICRS(ra=[] * u.degree, dec=[] * u.degree, distance=[] * u.kpc) idx1, idx2, d2d, d3d = search_around_sky(empty, coo2, 1*u.arcsec) assert idx1.size == idx2.size == d2d.size == d3d.size == 0 assert idx1.dtype == idx2.dtype == np.int assert d2d.unit == u.deg assert d3d.unit == u.kpc idx1, idx2, d2d, d3d = search_around_sky(coo1, empty, 1*u.arcsec) assert idx1.size == idx2.size == d2d.size == d3d.size == 0 assert idx1.dtype == idx2.dtype == np.int assert d2d.unit == u.deg assert d3d.unit == u.kpc empty = ICRS(ra=[] * u.degree, dec=[] * u.degree, distance=[] * u.kpc) idx1, idx2, d2d, d3d = search_around_sky(empty, empty[:], 1*u.arcsec) assert idx1.size == idx2.size == d2d.size == d3d.size == 0 assert idx1.dtype == idx2.dtype == np.int assert d2d.unit == u.deg assert d3d.unit == u.kpc idx1, idx2, d2d, d3d = search_around_3d(empty, coo2, 1*u.m) assert idx1.size == idx2.size == d2d.size == d3d.size == 0 assert idx1.dtype == idx2.dtype == np.int assert d2d.unit == u.deg assert d3d.unit == u.kpc idx1, idx2, d2d, d3d = search_around_3d(coo1, empty, 1*u.m) assert idx1.size == idx2.size == d2d.size == d3d.size == 0 assert idx1.dtype == idx2.dtype == np.int assert d2d.unit == u.deg assert d3d.unit == u.kpc idx1, idx2, d2d, d3d = search_around_3d(empty, empty[:], 1*u.m) assert idx1.size == idx2.size == d2d.size == d3d.size == 0 assert idx1.dtype == idx2.dtype == np.int assert d2d.unit == u.deg assert d3d.unit == u.kpc # Test that input without distance units results in a # 'dimensionless_unscaled' unit cempty = SkyCoord(ra=[], dec=[], unit=u.deg) idx1, idx2, d2d, d3d = search_around_3d(cempty, cempty[:], 1*u.m) assert d2d.unit == u.deg assert d3d.unit == u.dimensionless_unscaled idx1, idx2, d2d, d3d = search_around_sky(cempty, cempty[:], 1*u.m) assert d2d.unit == u.deg assert d3d.unit == u.dimensionless_unscaled @pytest.mark.skipif(str('not HAS_SCIPY')) @pytest.mark.skipif(str('OLDER_SCIPY')) def test_search_around_scalar(): from astropy.coordinates import SkyCoord, Angle cat = SkyCoord([1, 2, 3], [-30, 45, 8], unit="deg") target = SkyCoord('1.1 -30.1', unit="deg") with pytest.raises(ValueError) as excinfo: cat.search_around_sky(target, Angle('2d')) # make sure the error message is *specific* to search_around_sky rather than # generic as reported in #3359 assert 'search_around_sky' in str(excinfo.value) with pytest.raises(ValueError) as excinfo: cat.search_around_3d(target, Angle('2d')) assert 'search_around_3d' in str(excinfo.value) @pytest.mark.skipif(str('not HAS_SCIPY')) @pytest.mark.skipif(str('OLDER_SCIPY')) def test_match_catalog_empty(): from astropy.coordinates import SkyCoord sc1 = SkyCoord(1, 2, unit="deg") cat0 = SkyCoord([], [], unit="deg") cat1 = SkyCoord([1.1], [2.1], unit="deg") cat2 = SkyCoord([1.1, 3], [2.1, 5], unit="deg") sc1.match_to_catalog_sky(cat2) sc1.match_to_catalog_3d(cat2) sc1.match_to_catalog_sky(cat1) sc1.match_to_catalog_3d(cat1) with pytest.raises(ValueError) as excinfo: sc1.match_to_catalog_sky(cat1[0]) assert 'catalog' in str(excinfo.value) with pytest.raises(ValueError) as excinfo: sc1.match_to_catalog_3d(cat1[0]) assert 'catalog' in str(excinfo.value) with pytest.raises(ValueError) as excinfo: sc1.match_to_catalog_sky(cat0) assert 'catalog' in str(excinfo.value) with pytest.raises(ValueError) as excinfo: sc1.match_to_catalog_3d(cat0) assert 'catalog' in str(excinfo.value)
ef9b7cea8bb6b05dd9c74ce22a5c155c1faf118a6a36581d55db8d1883c670e8
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst import pytest import numpy as np from numpy import testing as npt from astropy import units as u from astropy.time import Time from astropy.tests.helper import assert_quantity_allclose as assert_allclose from astropy.coordinates import (Angle, ICRS, FK4, FK5, Galactic, SkyCoord, CartesianRepresentation) from astropy.coordinates.angle_utilities import dms_to_degrees, hms_to_hours def test_angle_arrays(): """ Test arrays values with Angle objects. """ # Tests incomplete a1 = Angle([0, 45, 90, 180, 270, 360, 720.], unit=u.degree) npt.assert_almost_equal([0., 45., 90., 180., 270., 360., 720.], a1.value) a2 = Angle(np.array([-90, -45, 0, 45, 90, 180, 270, 360]), unit=u.degree) npt.assert_almost_equal([-90, -45, 0, 45, 90, 180, 270, 360], a2.value) a3 = Angle(["12 degrees", "3 hours", "5 deg", "4rad"]) npt.assert_almost_equal([12., 45., 5., 229.18311805], a3.value) assert a3.unit == u.degree a4 = Angle(["12 degrees", "3 hours", "5 deg", "4rad"], u.radian) npt.assert_almost_equal(a4.degree, a3.value) assert a4.unit == u.radian a5 = Angle([0, 45, 90, 180, 270, 360], unit=u.degree) a6 = a5.sum() npt.assert_almost_equal(a6.value, 945.0) assert a6.unit is u.degree with pytest.raises(TypeError): # Arrays where the elements are Angle objects are not supported -- it's # really tricky to do correctly, if at all, due to the possibility of # nesting. a7 = Angle([a1, a2, a3], unit=u.degree) a8 = Angle(["04:02:02", "03:02:01", "06:02:01"], unit=u.degree) npt.assert_almost_equal(a8.value, [4.03388889, 3.03361111, 6.03361111]) a9 = Angle(np.array(["04:02:02", "03:02:01", "06:02:01"]), unit=u.degree) npt.assert_almost_equal(a9.value, a8.value) with pytest.raises(u.UnitsError): a10 = Angle(["04:02:02", "03:02:01", "06:02:01"]) def test_dms(): a1 = Angle([0, 45.5, -45.5], unit=u.degree) d, m, s = a1.dms npt.assert_almost_equal(d, [0, 45, -45]) npt.assert_almost_equal(m, [0, 30, -30]) npt.assert_almost_equal(s, [0, 0, -0]) dms = a1.dms degrees = dms_to_degrees(*dms) npt.assert_almost_equal(a1.degree, degrees) a2 = Angle(dms, unit=u.degree) npt.assert_almost_equal(a2.radian, a1.radian) def test_hms(): a1 = Angle([0, 11.5, -11.5], unit=u.hour) h, m, s = a1.hms npt.assert_almost_equal(h, [0, 11, -11]) npt.assert_almost_equal(m, [0, 30, -30]) npt.assert_almost_equal(s, [0, 0, -0]) hms = a1.hms hours = hms_to_hours(*hms) npt.assert_almost_equal(a1.hour, hours) a2 = Angle(hms, unit=u.hour) npt.assert_almost_equal(a2.radian, a1.radian) def test_array_coordinates_creation(): """ Test creating coordinates from arrays. """ c = ICRS(np.array([1, 2])*u.deg, np.array([3, 4])*u.deg) assert not c.ra.isscalar with pytest.raises(ValueError): c = ICRS(np.array([1, 2])*u.deg, np.array([3, 4, 5])*u.deg) with pytest.raises(ValueError): c = ICRS(np.array([1, 2, 4, 5])*u.deg, np.array([[3, 4], [5, 6]])*u.deg) # make sure cartesian initialization also works cart = CartesianRepresentation(x=[1., 2.]*u.kpc, y=[3., 4.]*u.kpc, z=[5., 6.]*u.kpc) c = ICRS(cart) # also ensure strings can be arrays c = SkyCoord(['1d0m0s', '2h02m00.3s'], ['3d', '4d']) # but invalid strings cannot with pytest.raises(ValueError): c = SkyCoord(Angle(['10m0s', '2h02m00.3s']), Angle(['3d', '4d'])) with pytest.raises(ValueError): c = SkyCoord(Angle(['1d0m0s', '2h02m00.3s']), Angle(['3x', '4d'])) def test_array_coordinates_distances(): """ Test creating coordinates from arrays and distances. """ # correct way ICRS(ra=np.array([1, 2])*u.deg, dec=np.array([3, 4])*u.deg, distance=[.1, .2] * u.kpc) with pytest.raises(ValueError): # scalar distance and mismatched array coordinates ICRS(ra=np.array([1, 2, 3])*u.deg, dec=np.array([[3, 4], [5, 6]])*u.deg, distance=2. * u.kpc) with pytest.raises(ValueError): # more distance values than coordinates ICRS(ra=np.array([1, 2])*u.deg, dec=np.array([3, 4])*u.deg, distance=[.1, .2, 3.] * u.kpc) @pytest.mark.parametrize(('arrshape', 'distance'), [((2, ), None), ((4, 2, 5), None), ((4, 2, 5), 2 * u.kpc)]) def test_array_coordinates_transformations(arrshape, distance): """ Test transformation on coordinates with array content (first length-2 1D, then a 3D array) """ # M31 coordinates from test_transformations raarr = np.ones(arrshape) * 10.6847929 decarr = np.ones(arrshape) * 41.2690650 if distance is not None: distance = np.ones(arrshape) * distance print(raarr, decarr, distance) c = ICRS(ra=raarr*u.deg, dec=decarr*u.deg, distance=distance) g = c.transform_to(Galactic) assert g.l.shape == arrshape npt.assert_array_almost_equal(g.l.degree, 121.17440967) npt.assert_array_almost_equal(g.b.degree, -21.57299631) if distance is not None: assert g.distance.unit == c.distance.unit # now make sure round-tripping works through FK5 c2 = c.transform_to(FK5).transform_to(ICRS) npt.assert_array_almost_equal(c.ra.radian, c2.ra.radian) npt.assert_array_almost_equal(c.dec.radian, c2.dec.radian) assert c2.ra.shape == arrshape if distance is not None: assert c2.distance.unit == c.distance.unit # also make sure it's possible to get to FK4, which uses a direct transform function. fk4 = c.transform_to(FK4) npt.assert_array_almost_equal(fk4.ra.degree, 10.0004, decimal=4) npt.assert_array_almost_equal(fk4.dec.degree, 40.9953, decimal=4) assert fk4.ra.shape == arrshape if distance is not None: assert fk4.distance.unit == c.distance.unit # now check the reverse transforms run cfk4 = fk4.transform_to(ICRS) assert cfk4.ra.shape == arrshape def test_array_precession(): """ Ensures that FK5 coordinates as arrays precess their equinoxes """ j2000 = Time('J2000') j1975 = Time('J1975') fk5 = FK5([1, 1.1]*u.radian, [0.5, 0.6]*u.radian) assert fk5.equinox.jyear == j2000.jyear fk5_2 = fk5.transform_to(FK5(equinox=j1975)) assert fk5_2.equinox.jyear == j1975.jyear npt.assert_array_less(0.05, np.abs(fk5.ra.degree - fk5_2.ra.degree)) npt.assert_array_less(0.05, np.abs(fk5.dec.degree - fk5_2.dec.degree)) def test_array_separation(): c1 = ICRS([0, 0]*u.deg, [0, 0]*u.deg) c2 = ICRS([1, 2]*u.deg, [0, 0]*u.deg) npt.assert_array_almost_equal(c1.separation(c2).degree, [1, 2]) c3 = ICRS([0, 3.]*u.deg, [0., 0]*u.deg, distance=[1, 1.] * u.kpc) c4 = ICRS([1, 1.]*u.deg, [0., 0]*u.deg, distance=[1, 1.] * u.kpc) # the 3-1 separation should be twice the 0-1 separation, but not *exactly* the same sep = c3.separation_3d(c4) sepdiff = sep[1] - (2 * sep[0]) assert abs(sepdiff.value) < 1e-5 assert sepdiff != 0 def test_array_indexing(): ra = np.linspace(0, 360, 10) dec = np.linspace(-90, 90, 10) j1975 = Time(1975, format='jyear') c1 = FK5(ra*u.deg, dec*u.deg, equinox=j1975) c2 = c1[4] assert c2.ra.degree == 160 assert c2.dec.degree == -10 c3 = c1[2:5] assert_allclose(c3.ra, [80, 120, 160] * u.deg) assert_allclose(c3.dec, [-50, -30, -10] * u.deg) c4 = c1[np.array([2, 5, 8])] assert_allclose(c4.ra, [80, 200, 320] * u.deg) assert_allclose(c4.dec, [-50, 10, 70] * u.deg) # now make sure the equinox is preserved assert c2.equinox == c1.equinox assert c3.equinox == c1.equinox assert c4.equinox == c1.equinox def test_array_len(): input_length = [1, 5] for length in input_length: ra = np.linspace(0, 360, length) dec = np.linspace(0, 90, length) c = ICRS(ra*u.deg, dec*u.deg) assert len(c) == length assert c.shape == (length,) with pytest.raises(TypeError): c = ICRS(0*u.deg, 0*u.deg) len(c) assert c.shape == tuple() def test_array_eq(): c1 = ICRS([1, 2]*u.deg, [3, 4]*u.deg) c2 = ICRS([1, 2]*u.deg, [3, 5]*u.deg) c3 = ICRS([1, 3]*u.deg, [3, 4]*u.deg) c4 = ICRS([1, 2]*u.deg, [3, 4.2]*u.deg) assert c1 == c1 assert c1 != c2 assert c1 != c3 assert c1 != c4
46cf8f4ff04c32c667f8fb55834dae7ce2e40b79c1fcc1c9cc8edb6c80784e79
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst from copy import deepcopy from collections import OrderedDict import pytest import numpy as np from numpy.testing import assert_allclose from astropy import units as u from astropy.tests.helper import (assert_quantity_allclose as assert_allclose_quantity, catch_warnings) from astropy.utils import isiterable from astropy.utils.compat import NUMPY_LT_1_14 from astropy.utils.exceptions import AstropyDeprecationWarning from astropy.coordinates.angles import Longitude, Latitude, Angle from astropy.coordinates.distances import Distance from astropy.coordinates.representation import (REPRESENTATION_CLASSES, DIFFERENTIAL_CLASSES, BaseRepresentation, SphericalRepresentation, UnitSphericalRepresentation, SphericalCosLatDifferential, CartesianRepresentation, CylindricalRepresentation, PhysicsSphericalRepresentation, CartesianDifferential, SphericalDifferential, _combine_xyz) # Preserve the original REPRESENTATION_CLASSES dict so that importing # the test file doesn't add a persistent test subclass (LogDRepresentation) def setup_function(func): func.REPRESENTATION_CLASSES_ORIG = deepcopy(REPRESENTATION_CLASSES) def teardown_function(func): REPRESENTATION_CLASSES.clear() REPRESENTATION_CLASSES.update(func.REPRESENTATION_CLASSES_ORIG) class TestSphericalRepresentation: def test_name(self): assert SphericalRepresentation.get_name() == 'spherical' assert SphericalRepresentation.get_name() in REPRESENTATION_CLASSES def test_empty_init(self): with pytest.raises(TypeError) as exc: s = SphericalRepresentation() def test_init_quantity(self): s3 = SphericalRepresentation(lon=8 * u.hourangle, lat=5 * u.deg, distance=10 * u.kpc) assert s3.lon == 8. * u.hourangle assert s3.lat == 5. * u.deg assert s3.distance == 10 * u.kpc assert isinstance(s3.lon, Longitude) assert isinstance(s3.lat, Latitude) assert isinstance(s3.distance, Distance) def test_init_lonlat(self): s2 = SphericalRepresentation(Longitude(8, u.hour), Latitude(5, u.deg), Distance(10, u.kpc)) assert s2.lon == 8. * u.hourangle assert s2.lat == 5. * u.deg assert s2.distance == 10. * u.kpc assert isinstance(s2.lon, Longitude) assert isinstance(s2.lat, Latitude) assert isinstance(s2.distance, Distance) # also test that wrap_angle is preserved s3 = SphericalRepresentation(Longitude(-90, u.degree, wrap_angle=180*u.degree), Latitude(-45, u.degree), Distance(1., u.Rsun)) assert s3.lon == -90. * u.degree assert s3.lon.wrap_angle == 180 * u.degree def test_init_array(self): s1 = SphericalRepresentation(lon=[8, 9] * u.hourangle, lat=[5, 6] * u.deg, distance=[1, 2] * u.kpc) assert_allclose(s1.lon.degree, [120, 135]) assert_allclose(s1.lat.degree, [5, 6]) assert_allclose(s1.distance.kpc, [1, 2]) assert isinstance(s1.lon, Longitude) assert isinstance(s1.lat, Latitude) assert isinstance(s1.distance, Distance) def test_init_array_nocopy(self): lon = Longitude([8, 9] * u.hourangle) lat = Latitude([5, 6] * u.deg) distance = Distance([1, 2] * u.kpc) s1 = SphericalRepresentation(lon=lon, lat=lat, distance=distance, copy=False) lon[:] = [1, 2] * u.rad lat[:] = [3, 4] * u.arcmin distance[:] = [8, 9] * u.Mpc assert_allclose_quantity(lon, s1.lon) assert_allclose_quantity(lat, s1.lat) assert_allclose_quantity(distance, s1.distance) def test_init_float32_array(self): """Regression test against #2983""" lon = Longitude(np.float32([1., 2.]), u.degree) lat = Latitude(np.float32([3., 4.]), u.degree) s1 = UnitSphericalRepresentation(lon=lon, lat=lat, copy=False) assert s1.lon.dtype == np.float32 assert s1.lat.dtype == np.float32 assert s1._values['lon'].dtype == np.float32 assert s1._values['lat'].dtype == np.float32 def test_reprobj(self): s1 = SphericalRepresentation(lon=8 * u.hourangle, lat=5 * u.deg, distance=10 * u.kpc) s2 = SphericalRepresentation.from_representation(s1) assert_allclose_quantity(s2.lon, 8. * u.hourangle) assert_allclose_quantity(s2.lat, 5. * u.deg) assert_allclose_quantity(s2.distance, 10 * u.kpc) def test_broadcasting(self): s1 = SphericalRepresentation(lon=[8, 9] * u.hourangle, lat=[5, 6] * u.deg, distance=10 * u.kpc) assert_allclose_quantity(s1.lon, [120, 135] * u.degree) assert_allclose_quantity(s1.lat, [5, 6] * u.degree) assert_allclose_quantity(s1.distance, [10, 10] * u.kpc) def test_broadcasting_mismatch(self): with pytest.raises(ValueError) as exc: s1 = SphericalRepresentation(lon=[8, 9, 10] * u.hourangle, lat=[5, 6] * u.deg, distance=[1, 2] * u.kpc) assert exc.value.args[0] == "Input parameters lon, lat, and distance cannot be broadcast" def test_readonly(self): s1 = SphericalRepresentation(lon=8 * u.hourangle, lat=5 * u.deg, distance=1. * u.kpc) with pytest.raises(AttributeError): s1.lon = 1. * u.deg with pytest.raises(AttributeError): s1.lat = 1. * u.deg with pytest.raises(AttributeError): s1.distance = 1. * u.kpc def test_getitem_len_iterable(self): s = SphericalRepresentation(lon=np.arange(10) * u.deg, lat=-np.arange(10) * u.deg, distance=1 * u.kpc) s_slc = s[2:8:2] assert_allclose_quantity(s_slc.lon, [2, 4, 6] * u.deg) assert_allclose_quantity(s_slc.lat, [-2, -4, -6] * u.deg) assert_allclose_quantity(s_slc.distance, [1, 1, 1] * u.kpc) assert len(s) == 10 assert isiterable(s) def test_getitem_len_iterable_scalar(self): s = SphericalRepresentation(lon=1 * u.deg, lat=-2 * u.deg, distance=3 * u.kpc) with pytest.raises(TypeError): s_slc = s[0] with pytest.raises(TypeError): len(s) assert not isiterable(s) def test_nan_distance(self): """ This is a regression test: calling represent_as() and passing in the same class as the object shouldn't round-trip through cartesian. """ sph = SphericalRepresentation(1*u.deg, 2*u.deg, np.nan*u.kpc) new_sph = sph.represent_as(SphericalRepresentation) assert_allclose_quantity(new_sph.lon, sph.lon) assert_allclose_quantity(new_sph.lat, sph.lat) dif = SphericalCosLatDifferential(1*u.mas/u.yr, 2*u.mas/u.yr, 3*u.km/u.s) sph = sph.with_differentials(dif) new_sph = sph.represent_as(SphericalRepresentation) assert_allclose_quantity(new_sph.lon, sph.lon) assert_allclose_quantity(new_sph.lat, sph.lat) class TestUnitSphericalRepresentation: def test_name(self): assert UnitSphericalRepresentation.get_name() == 'unitspherical' assert UnitSphericalRepresentation.get_name() in REPRESENTATION_CLASSES def test_empty_init(self): with pytest.raises(TypeError) as exc: s = UnitSphericalRepresentation() def test_init_quantity(self): s3 = UnitSphericalRepresentation(lon=8 * u.hourangle, lat=5 * u.deg) assert s3.lon == 8. * u.hourangle assert s3.lat == 5. * u.deg assert isinstance(s3.lon, Longitude) assert isinstance(s3.lat, Latitude) def test_init_lonlat(self): s2 = UnitSphericalRepresentation(Longitude(8, u.hour), Latitude(5, u.deg)) assert s2.lon == 8. * u.hourangle assert s2.lat == 5. * u.deg assert isinstance(s2.lon, Longitude) assert isinstance(s2.lat, Latitude) def test_init_array(self): s1 = UnitSphericalRepresentation(lon=[8, 9] * u.hourangle, lat=[5, 6] * u.deg) assert_allclose(s1.lon.degree, [120, 135]) assert_allclose(s1.lat.degree, [5, 6]) assert isinstance(s1.lon, Longitude) assert isinstance(s1.lat, Latitude) def test_init_array_nocopy(self): lon = Longitude([8, 9] * u.hourangle) lat = Latitude([5, 6] * u.deg) s1 = UnitSphericalRepresentation(lon=lon, lat=lat, copy=False) lon[:] = [1, 2] * u.rad lat[:] = [3, 4] * u.arcmin assert_allclose_quantity(lon, s1.lon) assert_allclose_quantity(lat, s1.lat) def test_reprobj(self): s1 = UnitSphericalRepresentation(lon=8 * u.hourangle, lat=5 * u.deg) s2 = UnitSphericalRepresentation.from_representation(s1) assert_allclose_quantity(s2.lon, 8. * u.hourangle) assert_allclose_quantity(s2.lat, 5. * u.deg) def test_broadcasting(self): s1 = UnitSphericalRepresentation(lon=[8, 9] * u.hourangle, lat=[5, 6] * u.deg) assert_allclose_quantity(s1.lon, [120, 135] * u.degree) assert_allclose_quantity(s1.lat, [5, 6] * u.degree) def test_broadcasting_mismatch(self): with pytest.raises(ValueError) as exc: s1 = UnitSphericalRepresentation(lon=[8, 9, 10] * u.hourangle, lat=[5, 6] * u.deg) assert exc.value.args[0] == "Input parameters lon and lat cannot be broadcast" def test_readonly(self): s1 = UnitSphericalRepresentation(lon=8 * u.hourangle, lat=5 * u.deg) with pytest.raises(AttributeError): s1.lon = 1. * u.deg with pytest.raises(AttributeError): s1.lat = 1. * u.deg def test_getitem(self): s = UnitSphericalRepresentation(lon=np.arange(10) * u.deg, lat=-np.arange(10) * u.deg) s_slc = s[2:8:2] assert_allclose_quantity(s_slc.lon, [2, 4, 6] * u.deg) assert_allclose_quantity(s_slc.lat, [-2, -4, -6] * u.deg) def test_getitem_scalar(self): s = UnitSphericalRepresentation(lon=1 * u.deg, lat=-2 * u.deg) with pytest.raises(TypeError): s_slc = s[0] class TestPhysicsSphericalRepresentation: def test_name(self): assert PhysicsSphericalRepresentation.get_name() == 'physicsspherical' assert PhysicsSphericalRepresentation.get_name() in REPRESENTATION_CLASSES def test_empty_init(self): with pytest.raises(TypeError) as exc: s = PhysicsSphericalRepresentation() def test_init_quantity(self): s3 = PhysicsSphericalRepresentation(phi=8 * u.hourangle, theta=5 * u.deg, r=10 * u.kpc) assert s3.phi == 8. * u.hourangle assert s3.theta == 5. * u.deg assert s3.r == 10 * u.kpc assert isinstance(s3.phi, Angle) assert isinstance(s3.theta, Angle) assert isinstance(s3.r, Distance) def test_init_phitheta(self): s2 = PhysicsSphericalRepresentation(Angle(8, u.hour), Angle(5, u.deg), Distance(10, u.kpc)) assert s2.phi == 8. * u.hourangle assert s2.theta == 5. * u.deg assert s2.r == 10. * u.kpc assert isinstance(s2.phi, Angle) assert isinstance(s2.theta, Angle) assert isinstance(s2.r, Distance) def test_init_array(self): s1 = PhysicsSphericalRepresentation(phi=[8, 9] * u.hourangle, theta=[5, 6] * u.deg, r=[1, 2] * u.kpc) assert_allclose(s1.phi.degree, [120, 135]) assert_allclose(s1.theta.degree, [5, 6]) assert_allclose(s1.r.kpc, [1, 2]) assert isinstance(s1.phi, Angle) assert isinstance(s1.theta, Angle) assert isinstance(s1.r, Distance) def test_init_array_nocopy(self): phi = Angle([8, 9] * u.hourangle) theta = Angle([5, 6] * u.deg) r = Distance([1, 2] * u.kpc) s1 = PhysicsSphericalRepresentation(phi=phi, theta=theta, r=r, copy=False) phi[:] = [1, 2] * u.rad theta[:] = [3, 4] * u.arcmin r[:] = [8, 9] * u.Mpc assert_allclose_quantity(phi, s1.phi) assert_allclose_quantity(theta, s1.theta) assert_allclose_quantity(r, s1.r) def test_reprobj(self): s1 = PhysicsSphericalRepresentation(phi=8 * u.hourangle, theta=5 * u.deg, r=10 * u.kpc) s2 = PhysicsSphericalRepresentation.from_representation(s1) assert_allclose_quantity(s2.phi, 8. * u.hourangle) assert_allclose_quantity(s2.theta, 5. * u.deg) assert_allclose_quantity(s2.r, 10 * u.kpc) def test_broadcasting(self): s1 = PhysicsSphericalRepresentation(phi=[8, 9] * u.hourangle, theta=[5, 6] * u.deg, r=10 * u.kpc) assert_allclose_quantity(s1.phi, [120, 135] * u.degree) assert_allclose_quantity(s1.theta, [5, 6] * u.degree) assert_allclose_quantity(s1.r, [10, 10] * u.kpc) def test_broadcasting_mismatch(self): with pytest.raises(ValueError) as exc: s1 = PhysicsSphericalRepresentation(phi=[8, 9, 10] * u.hourangle, theta=[5, 6] * u.deg, r=[1, 2] * u.kpc) assert exc.value.args[0] == "Input parameters phi, theta, and r cannot be broadcast" def test_readonly(self): s1 = PhysicsSphericalRepresentation(phi=[8, 9] * u.hourangle, theta=[5, 6] * u.deg, r=[10, 20] * u.kpc) with pytest.raises(AttributeError): s1.phi = 1. * u.deg with pytest.raises(AttributeError): s1.theta = 1. * u.deg with pytest.raises(AttributeError): s1.r = 1. * u.kpc def test_getitem(self): s = PhysicsSphericalRepresentation(phi=np.arange(10) * u.deg, theta=np.arange(5, 15) * u.deg, r=1 * u.kpc) s_slc = s[2:8:2] assert_allclose_quantity(s_slc.phi, [2, 4, 6] * u.deg) assert_allclose_quantity(s_slc.theta, [7, 9, 11] * u.deg) assert_allclose_quantity(s_slc.r, [1, 1, 1] * u.kpc) def test_getitem_scalar(self): s = PhysicsSphericalRepresentation(phi=1 * u.deg, theta=2 * u.deg, r=3 * u.kpc) with pytest.raises(TypeError): s_slc = s[0] class TestCartesianRepresentation: def test_name(self): assert CartesianRepresentation.get_name() == 'cartesian' assert CartesianRepresentation.get_name() in REPRESENTATION_CLASSES def test_empty_init(self): with pytest.raises(TypeError) as exc: s = CartesianRepresentation() def test_init_quantity(self): s1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc) assert s1.x.unit is u.kpc assert s1.y.unit is u.kpc assert s1.z.unit is u.kpc assert_allclose(s1.x.value, 1) assert_allclose(s1.y.value, 2) assert_allclose(s1.z.value, 3) def test_init_singleunit(self): s1 = CartesianRepresentation(x=1, y=2, z=3, unit=u.kpc) assert s1.x.unit is u.kpc assert s1.y.unit is u.kpc assert s1.z.unit is u.kpc assert_allclose(s1.x.value, 1) assert_allclose(s1.y.value, 2) assert_allclose(s1.z.value, 3) def test_init_array(self): s1 = CartesianRepresentation(x=[1, 2, 3] * u.pc, y=[2, 3, 4] * u.Mpc, z=[3, 4, 5] * u.kpc) assert s1.x.unit is u.pc assert s1.y.unit is u.Mpc assert s1.z.unit is u.kpc assert_allclose(s1.x.value, [1, 2, 3]) assert_allclose(s1.y.value, [2, 3, 4]) assert_allclose(s1.z.value, [3, 4, 5]) def test_init_one_array(self): s1 = CartesianRepresentation(x=[1, 2, 3] * u.pc) assert s1.x.unit is u.pc assert s1.y.unit is u.pc assert s1.z.unit is u.pc assert_allclose(s1.x.value, 1) assert_allclose(s1.y.value, 2) assert_allclose(s1.z.value, 3) r = np.arange(27.).reshape(3, 3, 3) * u.kpc s2 = CartesianRepresentation(r, xyz_axis=0) assert s2.shape == (3, 3) assert s2.x.unit == u.kpc assert np.all(s2.x == r[0]) assert np.all(s2.xyz == r) assert np.all(s2.get_xyz(xyz_axis=0) == r) s3 = CartesianRepresentation(r, xyz_axis=1) assert s3.shape == (3, 3) assert np.all(s3.x == r[:, 0]) assert np.all(s3.y == r[:, 1]) assert np.all(s3.z == r[:, 2]) assert np.all(s3.get_xyz(xyz_axis=1) == r) s4 = CartesianRepresentation(r, xyz_axis=2) assert s4.shape == (3, 3) assert np.all(s4.x == r[:, :, 0]) assert np.all(s4.get_xyz(xyz_axis=2) == r) s5 = CartesianRepresentation(r, unit=u.pc) assert s5.x.unit == u.pc assert np.all(s5.xyz == r) s6 = CartesianRepresentation(r.value, unit=u.pc, xyz_axis=2) assert s6.x.unit == u.pc assert np.all(s6.get_xyz(xyz_axis=2).value == r.value) def test_init_one_array_size_fail(self): with pytest.raises(ValueError) as exc: CartesianRepresentation(x=[1, 2, 3, 4] * u.pc) assert exc.value.args[0].startswith("too many values to unpack") def test_init_xyz_but_more_than_one_array_fail(self): with pytest.raises(ValueError) as exc: CartesianRepresentation(x=[1, 2, 3] * u.pc, y=[2, 3, 4] * u.pc, z=[3, 4, 5] * u.pc, xyz_axis=0) assert 'xyz_axis should only be set' in str(exc) def test_init_one_array_yz_fail(self): with pytest.raises(ValueError) as exc: CartesianRepresentation(x=[1, 2, 3, 4] * u.pc, y=[1, 2] * u.pc) assert exc.value.args[0] == ("x, y, and z are required to instantiate " "CartesianRepresentation") def test_init_array_nocopy(self): x = [8, 9, 10] * u.pc y = [5, 6, 7] * u.Mpc z = [2, 3, 4] * u.kpc s1 = CartesianRepresentation(x=x, y=y, z=z, copy=False) x[:] = [1, 2, 3] * u.kpc y[:] = [9, 9, 8] * u.kpc z[:] = [1, 2, 1] * u.kpc assert_allclose_quantity(x, s1.x) assert_allclose_quantity(y, s1.y) assert_allclose_quantity(z, s1.z) def test_xyz_is_view_if_possible(self): xyz = np.arange(1., 10.).reshape(3, 3) s1 = CartesianRepresentation(xyz, unit=u.kpc, copy=False) s1_xyz = s1.xyz assert s1_xyz.value[0, 0] == 1. xyz[0, 0] = 0. assert s1.x[0] == 0. assert s1_xyz.value[0, 0] == 0. # Not possible: we don't check that tuples are from the same array xyz = np.arange(1., 10.).reshape(3, 3) s2 = CartesianRepresentation(*xyz, unit=u.kpc, copy=False) s2_xyz = s2.xyz assert s2_xyz.value[0, 0] == 1. xyz[0, 0] = 0. assert s2.x[0] == 0. assert s2_xyz.value[0, 0] == 1. def test_reprobj(self): s1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc) s2 = CartesianRepresentation.from_representation(s1) assert s2.x == 1 * u.kpc assert s2.y == 2 * u.kpc assert s2.z == 3 * u.kpc def test_broadcasting(self): s1 = CartesianRepresentation(x=[1, 2] * u.kpc, y=[3, 4] * u.kpc, z=5 * u.kpc) assert s1.x.unit == u.kpc assert s1.y.unit == u.kpc assert s1.z.unit == u.kpc assert_allclose(s1.x.value, [1, 2]) assert_allclose(s1.y.value, [3, 4]) assert_allclose(s1.z.value, [5, 5]) def test_broadcasting_mismatch(self): with pytest.raises(ValueError) as exc: s1 = CartesianRepresentation(x=[1, 2] * u.kpc, y=[3, 4] * u.kpc, z=[5, 6, 7] * u.kpc) assert exc.value.args[0] == "Input parameters x, y, and z cannot be broadcast" def test_readonly(self): s1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc) with pytest.raises(AttributeError): s1.x = 1. * u.kpc with pytest.raises(AttributeError): s1.y = 1. * u.kpc with pytest.raises(AttributeError): s1.z = 1. * u.kpc def test_xyz(self): s1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc) assert isinstance(s1.xyz, u.Quantity) assert s1.xyz.unit is u.kpc assert_allclose(s1.xyz.value, [1, 2, 3]) def test_unit_mismatch(self): q_len = u.Quantity([1], u.km) q_nonlen = u.Quantity([1], u.kg) with pytest.raises(u.UnitsError) as exc: s1 = CartesianRepresentation(x=q_nonlen, y=q_len, z=q_len) assert exc.value.args[0] == "x, y, and z should have matching physical types" with pytest.raises(u.UnitsError) as exc: s1 = CartesianRepresentation(x=q_len, y=q_nonlen, z=q_len) assert exc.value.args[0] == "x, y, and z should have matching physical types" with pytest.raises(u.UnitsError) as exc: s1 = CartesianRepresentation(x=q_len, y=q_len, z=q_nonlen) assert exc.value.args[0] == "x, y, and z should have matching physical types" def test_unit_non_length(self): s1 = CartesianRepresentation(x=1 * u.kg, y=2 * u.kg, z=3 * u.kg) s2 = CartesianRepresentation(x=1 * u.km / u.s, y=2 * u.km / u.s, z=3 * u.km / u.s) banana = u.def_unit('banana') s3 = CartesianRepresentation(x=1 * banana, y=2 * banana, z=3 * banana) def test_getitem(self): s = CartesianRepresentation(x=np.arange(10) * u.m, y=-np.arange(10) * u.m, z=3 * u.km) s_slc = s[2:8:2] assert_allclose_quantity(s_slc.x, [2, 4, 6] * u.m) assert_allclose_quantity(s_slc.y, [-2, -4, -6] * u.m) assert_allclose_quantity(s_slc.z, [3, 3, 3] * u.km) def test_getitem_scalar(self): s = CartesianRepresentation(x=1 * u.m, y=-2 * u.m, z=3 * u.km) with pytest.raises(TypeError): s_slc = s[0] def test_transform(self): s1 = CartesianRepresentation(x=[1, 2] * u.kpc, y=[3, 4] * u.kpc, z=[5, 6] * u.kpc) matrix = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) s2 = s1.transform(matrix) assert_allclose(s2.x.value, [1 * 1 + 2 * 3 + 3 * 5, 1 * 2 + 2 * 4 + 3 * 6]) assert_allclose(s2.y.value, [4 * 1 + 5 * 3 + 6 * 5, 4 * 2 + 5 * 4 + 6 * 6]) assert_allclose(s2.z.value, [7 * 1 + 8 * 3 + 9 * 5, 7 * 2 + 8 * 4 + 9 * 6]) assert s2.x.unit is u.kpc assert s2.y.unit is u.kpc assert s2.z.unit is u.kpc class TestCylindricalRepresentation: def test_name(self): assert CylindricalRepresentation.get_name() == 'cylindrical' assert CylindricalRepresentation.get_name() in REPRESENTATION_CLASSES def test_empty_init(self): with pytest.raises(TypeError) as exc: s = CylindricalRepresentation() def test_init_quantity(self): s1 = CylindricalRepresentation(rho=1 * u.kpc, phi=2 * u.deg, z=3 * u.kpc) assert s1.rho.unit is u.kpc assert s1.phi.unit is u.deg assert s1.z.unit is u.kpc assert_allclose(s1.rho.value, 1) assert_allclose(s1.phi.value, 2) assert_allclose(s1.z.value, 3) def test_init_array(self): s1 = CylindricalRepresentation(rho=[1, 2, 3] * u.pc, phi=[2, 3, 4] * u.deg, z=[3, 4, 5] * u.kpc) assert s1.rho.unit is u.pc assert s1.phi.unit is u.deg assert s1.z.unit is u.kpc assert_allclose(s1.rho.value, [1, 2, 3]) assert_allclose(s1.phi.value, [2, 3, 4]) assert_allclose(s1.z.value, [3, 4, 5]) def test_init_array_nocopy(self): rho = [8, 9, 10] * u.pc phi = [5, 6, 7] * u.deg z = [2, 3, 4] * u.kpc s1 = CylindricalRepresentation(rho=rho, phi=phi, z=z, copy=False) rho[:] = [9, 2, 3] * u.kpc phi[:] = [1, 2, 3] * u.arcmin z[:] = [-2, 3, 8] * u.kpc assert_allclose_quantity(rho, s1.rho) assert_allclose_quantity(phi, s1.phi) assert_allclose_quantity(z, s1.z) def test_reprobj(self): s1 = CylindricalRepresentation(rho=1 * u.kpc, phi=2 * u.deg, z=3 * u.kpc) s2 = CylindricalRepresentation.from_representation(s1) assert s2.rho == 1 * u.kpc assert s2.phi == 2 * u.deg assert s2.z == 3 * u.kpc def test_broadcasting(self): s1 = CylindricalRepresentation(rho=[1, 2] * u.kpc, phi=[3, 4] * u.deg, z=5 * u.kpc) assert s1.rho.unit == u.kpc assert s1.phi.unit == u.deg assert s1.z.unit == u.kpc assert_allclose(s1.rho.value, [1, 2]) assert_allclose(s1.phi.value, [3, 4]) assert_allclose(s1.z.value, [5, 5]) def test_broadcasting_mismatch(self): with pytest.raises(ValueError) as exc: s1 = CylindricalRepresentation(rho=[1, 2] * u.kpc, phi=[3, 4] * u.deg, z=[5, 6, 7] * u.kpc) assert exc.value.args[0] == "Input parameters rho, phi, and z cannot be broadcast" def test_readonly(self): s1 = CylindricalRepresentation(rho=1 * u.kpc, phi=20 * u.deg, z=3 * u.kpc) with pytest.raises(AttributeError): s1.rho = 1. * u.kpc with pytest.raises(AttributeError): s1.phi = 20 * u.deg with pytest.raises(AttributeError): s1.z = 1. * u.kpc def unit_mismatch(self): q_len = u.Quantity([1], u.kpc) q_nonlen = u.Quantity([1], u.kg) with pytest.raises(u.UnitsError) as exc: s1 = CylindricalRepresentation(rho=q_nonlen, phi=10 * u.deg, z=q_len) assert exc.value.args[0] == "rho and z should have matching physical types" with pytest.raises(u.UnitsError) as exc: s1 = CylindricalRepresentation(rho=q_len, phi=10 * u.deg, z=q_nonlen) assert exc.value.args[0] == "rho and z should have matching physical types" def test_getitem(self): s = CylindricalRepresentation(rho=np.arange(10) * u.pc, phi=-np.arange(10) * u.deg, z=1 * u.kpc) s_slc = s[2:8:2] assert_allclose_quantity(s_slc.rho, [2, 4, 6] * u.pc) assert_allclose_quantity(s_slc.phi, [-2, -4, -6] * u.deg) assert_allclose_quantity(s_slc.z, [1, 1, 1] * u.kpc) def test_getitem_scalar(self): s = CylindricalRepresentation(rho=1 * u.pc, phi=-2 * u.deg, z=3 * u.kpc) with pytest.raises(TypeError): s_slc = s[0] def test_cartesian_spherical_roundtrip(): s1 = CartesianRepresentation(x=[1, 2000.] * u.kpc, y=[3000., 4.] * u.pc, z=[5., 6000.] * u.pc) s2 = SphericalRepresentation.from_representation(s1) s3 = CartesianRepresentation.from_representation(s2) s4 = SphericalRepresentation.from_representation(s3) assert_allclose_quantity(s1.x, s3.x) assert_allclose_quantity(s1.y, s3.y) assert_allclose_quantity(s1.z, s3.z) assert_allclose_quantity(s2.lon, s4.lon) assert_allclose_quantity(s2.lat, s4.lat) assert_allclose_quantity(s2.distance, s4.distance) def test_cartesian_physics_spherical_roundtrip(): s1 = CartesianRepresentation(x=[1, 2000.] * u.kpc, y=[3000., 4.] * u.pc, z=[5., 6000.] * u.pc) s2 = PhysicsSphericalRepresentation.from_representation(s1) s3 = CartesianRepresentation.from_representation(s2) s4 = PhysicsSphericalRepresentation.from_representation(s3) assert_allclose_quantity(s1.x, s3.x) assert_allclose_quantity(s1.y, s3.y) assert_allclose_quantity(s1.z, s3.z) assert_allclose_quantity(s2.phi, s4.phi) assert_allclose_quantity(s2.theta, s4.theta) assert_allclose_quantity(s2.r, s4.r) def test_spherical_physics_spherical_roundtrip(): s1 = SphericalRepresentation(lon=3 * u.deg, lat=4 * u.deg, distance=3 * u.kpc) s2 = PhysicsSphericalRepresentation.from_representation(s1) s3 = SphericalRepresentation.from_representation(s2) s4 = PhysicsSphericalRepresentation.from_representation(s3) assert_allclose_quantity(s1.lon, s3.lon) assert_allclose_quantity(s1.lat, s3.lat) assert_allclose_quantity(s1.distance, s3.distance) assert_allclose_quantity(s2.phi, s4.phi) assert_allclose_quantity(s2.theta, s4.theta) assert_allclose_quantity(s2.r, s4.r) assert_allclose_quantity(s1.lon, s4.phi) assert_allclose_quantity(s1.lat, 90. * u.deg - s4.theta) assert_allclose_quantity(s1.distance, s4.r) def test_cartesian_cylindrical_roundtrip(): s1 = CartesianRepresentation(x=np.array([1., 2000.]) * u.kpc, y=np.array([3000., 4.]) * u.pc, z=np.array([5., 600.]) * u.cm) s2 = CylindricalRepresentation.from_representation(s1) s3 = CartesianRepresentation.from_representation(s2) s4 = CylindricalRepresentation.from_representation(s3) assert_allclose_quantity(s1.x, s3.x) assert_allclose_quantity(s1.y, s3.y) assert_allclose_quantity(s1.z, s3.z) assert_allclose_quantity(s2.rho, s4.rho) assert_allclose_quantity(s2.phi, s4.phi) assert_allclose_quantity(s2.z, s4.z) def test_unit_spherical_roundtrip(): s1 = UnitSphericalRepresentation(lon=[10., 30.] * u.deg, lat=[5., 6.] * u.arcmin) s2 = CartesianRepresentation.from_representation(s1) s3 = SphericalRepresentation.from_representation(s2) s4 = UnitSphericalRepresentation.from_representation(s3) assert_allclose_quantity(s1.lon, s4.lon) assert_allclose_quantity(s1.lat, s4.lat) def test_no_unnecessary_copies(): s1 = UnitSphericalRepresentation(lon=[10., 30.] * u.deg, lat=[5., 6.] * u.arcmin) s2 = s1.represent_as(UnitSphericalRepresentation) assert s2 is s1 assert np.may_share_memory(s1.lon, s2.lon) assert np.may_share_memory(s1.lat, s2.lat) s3 = s1.represent_as(SphericalRepresentation) assert np.may_share_memory(s1.lon, s3.lon) assert np.may_share_memory(s1.lat, s3.lat) s4 = s1.represent_as(CartesianRepresentation) s5 = s4.represent_as(CylindricalRepresentation) assert np.may_share_memory(s5.z, s4.z) def test_representation_repr(): r1 = SphericalRepresentation(lon=1 * u.deg, lat=2.5 * u.deg, distance=1 * u.kpc) assert repr(r1) == ('<SphericalRepresentation (lon, lat, distance) in (deg, deg, kpc)\n' ' ({})>').format(' 1., 2.5, 1.' if NUMPY_LT_1_14 else '1., 2.5, 1.') r2 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc) assert repr(r2) == ('<CartesianRepresentation (x, y, z) in kpc\n' ' ({})>').format(' 1., 2., 3.' if NUMPY_LT_1_14 else '1., 2., 3.') r3 = CartesianRepresentation(x=[1, 2, 3] * u.kpc, y=4 * u.kpc, z=[9, 10, 11] * u.kpc) if NUMPY_LT_1_14: assert repr(r3) == ('<CartesianRepresentation (x, y, z) in kpc\n' ' [( 1., 4., 9.), ( 2., 4., 10.), ( 3., 4., 11.)]>') else: assert repr(r3) == ('<CartesianRepresentation (x, y, z) in kpc\n' ' [(1., 4., 9.), (2., 4., 10.), (3., 4., 11.)]>') def test_representation_repr_multi_d(): """Regression test for #5889.""" cr = CartesianRepresentation(np.arange(27).reshape(3, 3, 3), unit='m') if NUMPY_LT_1_14: assert repr(cr) == ( '<CartesianRepresentation (x, y, z) in m\n' ' [[( 0., 9., 18.), ( 1., 10., 19.), ( 2., 11., 20.)],\n' ' [( 3., 12., 21.), ( 4., 13., 22.), ( 5., 14., 23.)],\n' ' [( 6., 15., 24.), ( 7., 16., 25.), ( 8., 17., 26.)]]>') else: assert repr(cr) == ( '<CartesianRepresentation (x, y, z) in m\n' ' [[(0., 9., 18.), (1., 10., 19.), (2., 11., 20.)],\n' ' [(3., 12., 21.), (4., 13., 22.), (5., 14., 23.)],\n' ' [(6., 15., 24.), (7., 16., 25.), (8., 17., 26.)]]>') # This was broken before. if NUMPY_LT_1_14: assert repr(cr.T) == ( '<CartesianRepresentation (x, y, z) in m\n' ' [[( 0., 9., 18.), ( 3., 12., 21.), ( 6., 15., 24.)],\n' ' [( 1., 10., 19.), ( 4., 13., 22.), ( 7., 16., 25.)],\n' ' [( 2., 11., 20.), ( 5., 14., 23.), ( 8., 17., 26.)]]>') else: assert repr(cr.T) == ( '<CartesianRepresentation (x, y, z) in m\n' ' [[(0., 9., 18.), (3., 12., 21.), (6., 15., 24.)],\n' ' [(1., 10., 19.), (4., 13., 22.), (7., 16., 25.)],\n' ' [(2., 11., 20.), (5., 14., 23.), (8., 17., 26.)]]>') def test_representation_str(): r1 = SphericalRepresentation(lon=1 * u.deg, lat=2.5 * u.deg, distance=1 * u.kpc) assert str(r1) == ('( 1., 2.5, 1.) (deg, deg, kpc)' if NUMPY_LT_1_14 else '(1., 2.5, 1.) (deg, deg, kpc)') r2 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc) assert str(r2) == ('( 1., 2., 3.) kpc' if NUMPY_LT_1_14 else '(1., 2., 3.) kpc') r3 = CartesianRepresentation(x=[1, 2, 3] * u.kpc, y=4 * u.kpc, z=[9, 10, 11] * u.kpc) assert str(r3) == ('[( 1., 4., 9.), ( 2., 4., 10.), ( 3., 4., 11.)] kpc' if NUMPY_LT_1_14 else '[(1., 4., 9.), (2., 4., 10.), (3., 4., 11.)] kpc') def test_representation_str_multi_d(): """Regression test for #5889.""" cr = CartesianRepresentation(np.arange(27).reshape(3, 3, 3), unit='m') if NUMPY_LT_1_14: assert str(cr) == ( '[[( 0., 9., 18.), ( 1., 10., 19.), ( 2., 11., 20.)],\n' ' [( 3., 12., 21.), ( 4., 13., 22.), ( 5., 14., 23.)],\n' ' [( 6., 15., 24.), ( 7., 16., 25.), ( 8., 17., 26.)]] m') else: assert str(cr) == ( '[[(0., 9., 18.), (1., 10., 19.), (2., 11., 20.)],\n' ' [(3., 12., 21.), (4., 13., 22.), (5., 14., 23.)],\n' ' [(6., 15., 24.), (7., 16., 25.), (8., 17., 26.)]] m') # This was broken before. if NUMPY_LT_1_14: assert str(cr.T) == ( '[[( 0., 9., 18.), ( 3., 12., 21.), ( 6., 15., 24.)],\n' ' [( 1., 10., 19.), ( 4., 13., 22.), ( 7., 16., 25.)],\n' ' [( 2., 11., 20.), ( 5., 14., 23.), ( 8., 17., 26.)]] m') else: assert str(cr.T) == ( '[[(0., 9., 18.), (3., 12., 21.), (6., 15., 24.)],\n' ' [(1., 10., 19.), (4., 13., 22.), (7., 16., 25.)],\n' ' [(2., 11., 20.), (5., 14., 23.), (8., 17., 26.)]] m') @pytest.mark.remote_data def test_subclass_representation(): from astropy.coordinates.builtin_frames import ICRS class Longitude180(Longitude): def __new__(cls, angle, unit=None, wrap_angle=180 * u.deg, **kwargs): self = super().__new__(cls, angle, unit=unit, wrap_angle=wrap_angle, **kwargs) return self class SphericalWrap180Representation(SphericalRepresentation): attr_classes = OrderedDict([('lon', Longitude180), ('lat', Latitude), ('distance', u.Quantity)]) class ICRSWrap180(ICRS): frame_specific_representation_info = ICRS._frame_specific_representation_info.copy() frame_specific_representation_info[SphericalWrap180Representation] = \ frame_specific_representation_info[SphericalRepresentation] default_representation = SphericalWrap180Representation c = ICRSWrap180(ra=-1 * u.deg, dec=-2 * u.deg, distance=1 * u.m) assert c.ra.value == -1 assert c.ra.unit is u.deg assert c.dec.value == -2 assert c.dec.unit is u.deg def test_minimal_subclass(): # Basically to check what we document works; # see doc/coordinates/representations.rst class LogDRepresentation(BaseRepresentation): attr_classes = OrderedDict([('lon', Longitude), ('lat', Latitude), ('logd', u.Dex)]) def to_cartesian(self): d = self.logd.physical x = d * np.cos(self.lat) * np.cos(self.lon) y = d * np.cos(self.lat) * np.sin(self.lon) z = d * np.sin(self.lat) return CartesianRepresentation(x=x, y=y, z=z, copy=False) @classmethod def from_cartesian(cls, cart): s = np.hypot(cart.x, cart.y) r = np.hypot(s, cart.z) lon = np.arctan2(cart.y, cart.x) lat = np.arctan2(cart.z, s) return cls(lon=lon, lat=lat, logd=u.Dex(r), copy=False) ld1 = LogDRepresentation(90.*u.deg, 0.*u.deg, 1.*u.dex(u.kpc)) ld2 = LogDRepresentation(lon=90.*u.deg, lat=0.*u.deg, logd=1.*u.dex(u.kpc)) assert np.all(ld1.lon == ld2.lon) assert np.all(ld1.lat == ld2.lat) assert np.all(ld1.logd == ld2.logd) c = ld1.to_cartesian() assert_allclose_quantity(c.xyz, [0., 10., 0.] * u.kpc, atol=1.*u.npc) ld3 = LogDRepresentation.from_cartesian(c) assert np.all(ld3.lon == ld2.lon) assert np.all(ld3.lat == ld2.lat) assert np.all(ld3.logd == ld2.logd) s = ld1.represent_as(SphericalRepresentation) assert_allclose_quantity(s.lon, ld1.lon) assert_allclose_quantity(s.distance, 10.*u.kpc) assert_allclose_quantity(s.lat, ld1.lat) with pytest.raises(TypeError): LogDRepresentation(0.*u.deg, 1.*u.deg) with pytest.raises(TypeError): LogDRepresentation(0.*u.deg, 1.*u.deg, 1.*u.dex(u.kpc), lon=1.*u.deg) with pytest.raises(TypeError): LogDRepresentation(0.*u.deg, 1.*u.deg, 1.*u.dex(u.kpc), True, False) with pytest.raises(TypeError): LogDRepresentation(0.*u.deg, 1.*u.deg, 1.*u.dex(u.kpc), foo='bar') with pytest.raises(ValueError): # check we cannot redefine an existing class. class LogDRepresentation(BaseRepresentation): attr_classes = OrderedDict([('lon', Longitude), ('lat', Latitude), ('logr', u.Dex)]) def test_combine_xyz(): x, y, z = np.arange(27).reshape(3, 9) * u.kpc xyz = _combine_xyz(x, y, z, xyz_axis=0) assert xyz.shape == (3, 9) assert np.all(xyz[0] == x) assert np.all(xyz[1] == y) assert np.all(xyz[2] == z) x, y, z = np.arange(27).reshape(3, 3, 3) * u.kpc xyz = _combine_xyz(x, y, z, xyz_axis=0) assert xyz.ndim == 3 assert np.all(xyz[0] == x) assert np.all(xyz[1] == y) assert np.all(xyz[2] == z) xyz = _combine_xyz(x, y, z, xyz_axis=1) assert xyz.ndim == 3 assert np.all(xyz[:, 0] == x) assert np.all(xyz[:, 1] == y) assert np.all(xyz[:, 2] == z) xyz = _combine_xyz(x, y, z, xyz_axis=-1) assert xyz.ndim == 3 assert np.all(xyz[..., 0] == x) assert np.all(xyz[..., 1] == y) assert np.all(xyz[..., 2] == z) class TestCartesianRepresentationWithDifferential: def test_init_differential(self): diff = CartesianDifferential(d_x=1 * u.km/u.s, d_y=2 * u.km/u.s, d_z=3 * u.km/u.s) # Check that a single differential gets turned into a 1-item dict. s1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc, differentials=diff) assert s1.x.unit is u.kpc assert s1.y.unit is u.kpc assert s1.z.unit is u.kpc assert len(s1.differentials) == 1 assert s1.differentials['s'] is diff # can also pass in an explicit dictionary s1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc, differentials={'s': diff}) assert len(s1.differentials) == 1 assert s1.differentials['s'] is diff # using the wrong key will cause it to fail with pytest.raises(ValueError): s1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc, differentials={'1 / s2': diff}) # make sure other kwargs are handled properly s1 = CartesianRepresentation(x=1, y=2, z=3, differentials=diff, copy=False, unit=u.kpc) assert len(s1.differentials) == 1 assert s1.differentials['s'] is diff with pytest.raises(TypeError): # invalid type passed to differentials CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc, differentials='garmonbozia') # make sure differentials can't accept differentials with pytest.raises(TypeError): CartesianDifferential(d_x=1 * u.km/u.s, d_y=2 * u.km/u.s, d_z=3 * u.km/u.s, differentials=diff) def test_init_differential_compatible(self): # TODO: more extensive checking of this # should fail - representation and differential not compatible diff = SphericalDifferential(d_lon=1 * u.mas/u.yr, d_lat=2 * u.mas/u.yr, d_distance=3 * u.km/u.s) with pytest.raises(TypeError): CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc, differentials=diff) # should succeed - representation and differential are compatible diff = SphericalCosLatDifferential(d_lon_coslat=1 * u.mas/u.yr, d_lat=2 * u.mas/u.yr, d_distance=3 * u.km/u.s) r1 = SphericalRepresentation(lon=15*u.deg, lat=21*u.deg, distance=1*u.pc, differentials=diff) def test_init_differential_multiple_equivalent_keys(self): d1 = CartesianDifferential(*[1, 2, 3] * u.km/u.s) d2 = CartesianDifferential(*[4, 5, 6] * u.km/u.s) # verify that the check against expected_unit validates against passing # in two different but equivalent keys with pytest.raises(ValueError): r1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc, differentials={'s': d1, 'yr': d2}) def test_init_array_broadcasting(self): arr1 = np.arange(8).reshape(4, 2) * u.km/u.s diff = CartesianDifferential(d_x=arr1, d_y=arr1, d_z=arr1) # shapes aren't compatible arr2 = np.arange(27).reshape(3, 9) * u.kpc with pytest.raises(ValueError): rep = CartesianRepresentation(x=arr2, y=arr2, z=arr2, differentials=diff) arr2 = np.arange(8).reshape(4, 2) * u.kpc rep = CartesianRepresentation(x=arr2, y=arr2, z=arr2, differentials=diff) assert rep.x.unit is u.kpc assert rep.y.unit is u.kpc assert rep.z.unit is u.kpc assert len(rep.differentials) == 1 assert rep.differentials['s'] is diff assert rep.xyz.shape == rep.differentials['s'].d_xyz.shape def test_reprobj(self): # should succeed - representation and differential are compatible diff = SphericalCosLatDifferential(d_lon_coslat=1 * u.mas/u.yr, d_lat=2 * u.mas/u.yr, d_distance=3 * u.km/u.s) r1 = SphericalRepresentation(lon=15*u.deg, lat=21*u.deg, distance=1*u.pc, differentials=diff) r2 = CartesianRepresentation.from_representation(r1) assert r2.get_name() == 'cartesian' assert not r2.differentials def test_readonly(self): s1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc) with pytest.raises(AttributeError): # attribute is not settable s1.differentials = 'thing' def test_represent_as(self): diff = CartesianDifferential(d_x=1 * u.km/u.s, d_y=2 * u.km/u.s, d_z=3 * u.km/u.s) rep1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc, differentials=diff) # Only change the representation, drop the differential new_rep = rep1.represent_as(SphericalRepresentation) assert new_rep.get_name() == 'spherical' assert not new_rep.differentials # dropped # Pass in separate classes for representation, differential new_rep = rep1.represent_as(SphericalRepresentation, SphericalCosLatDifferential) assert new_rep.get_name() == 'spherical' assert new_rep.differentials['s'].get_name() == 'sphericalcoslat' # Pass in a dictionary for the differential classes new_rep = rep1.represent_as(SphericalRepresentation, {'s': SphericalCosLatDifferential}) assert new_rep.get_name() == 'spherical' assert new_rep.differentials['s'].get_name() == 'sphericalcoslat' # make sure represent_as() passes through the differentials for name in REPRESENTATION_CLASSES: if name == 'radial': # TODO: Converting a CartesianDifferential to a # RadialDifferential fails, even on `master` continue new_rep = rep1.represent_as(REPRESENTATION_CLASSES[name], DIFFERENTIAL_CLASSES[name]) assert new_rep.get_name() == name assert len(new_rep.differentials) == 1 assert new_rep.differentials['s'].get_name() == name with pytest.raises(ValueError) as excinfo: rep1.represent_as('name') assert 'use frame object' in str(excinfo.value) def test_getitem(self): d = CartesianDifferential(d_x=np.arange(10) * u.m/u.s, d_y=-np.arange(10) * u.m/u.s, d_z=1. * u.m/u.s) s = CartesianRepresentation(x=np.arange(10) * u.m, y=-np.arange(10) * u.m, z=3 * u.km, differentials=d) s_slc = s[2:8:2] s_dif = s_slc.differentials['s'] assert_allclose_quantity(s_slc.x, [2, 4, 6] * u.m) assert_allclose_quantity(s_slc.y, [-2, -4, -6] * u.m) assert_allclose_quantity(s_slc.z, [3, 3, 3] * u.km) assert_allclose_quantity(s_dif.d_x, [2, 4, 6] * u.m/u.s) assert_allclose_quantity(s_dif.d_y, [-2, -4, -6] * u.m/u.s) assert_allclose_quantity(s_dif.d_z, [1, 1, 1] * u.m/u.s) def test_transform(self): d1 = CartesianDifferential(d_x=[1, 2] * u.km/u.s, d_y=[3, 4] * u.km/u.s, d_z=[5, 6] * u.km/u.s) r1 = CartesianRepresentation(x=[1, 2] * u.kpc, y=[3, 4] * u.kpc, z=[5, 6] * u.kpc, differentials=d1) matrix = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) r2 = r1.transform(matrix) d2 = r2.differentials['s'] assert_allclose_quantity(d2.d_x, [22., 28]*u.km/u.s) assert_allclose_quantity(d2.d_y, [49, 64]*u.km/u.s) assert_allclose_quantity(d2.d_z, [76, 100.]*u.km/u.s) def test_with_differentials(self): # make sure with_differential correctly creates a new copy with the same # differential cr = CartesianRepresentation([1, 2, 3]*u.kpc) diff = CartesianDifferential([.1, .2, .3]*u.km/u.s) cr2 = cr.with_differentials(diff) assert cr.differentials != cr2.differentials assert cr2.differentials['s'] is diff # make sure it works even if a differential is present already diff2 = CartesianDifferential([.1, .2, .3]*u.m/u.s) cr3 = CartesianRepresentation([1, 2, 3]*u.kpc, differentials=diff) cr4 = cr3.with_differentials(diff2) assert cr4.differentials['s'] != cr3.differentials['s'] assert cr4.differentials['s'] == diff2 # also ensure a *scalar* differential will works cr5 = cr.with_differentials(diff) assert len(cr5.differentials) == 1 assert cr5.differentials['s'] == diff # make sure we don't update the original representation's dict d1 = CartesianDifferential(*np.random.random((3, 5)), unit=u.km/u.s) d2 = CartesianDifferential(*np.random.random((3, 5)), unit=u.km/u.s**2) r1 = CartesianRepresentation(*np.random.random((3, 5)), unit=u.pc, differentials=d1) r2 = r1.with_differentials(d2) assert r1.differentials['s'] is r2.differentials['s'] assert 's2' not in r1.differentials assert 's2' in r2.differentials def test_repr_with_differentials(): diff = CartesianDifferential([.1, .2, .3]*u.km/u.s) cr = CartesianRepresentation([1, 2, 3]*u.kpc, differentials=diff) assert "has differentials w.r.t.: 's'" in repr(cr) def test_to_cartesian(): """ Test that to_cartesian drops the differential. """ sd = SphericalDifferential(d_lat=1*u.deg, d_lon=2*u.deg, d_distance=10*u.m) sr = SphericalRepresentation(lat=1*u.deg, lon=2*u.deg, distance=10*u.m, differentials=sd) cart = sr.to_cartesian() assert cart.get_name() == 'cartesian' assert not cart.differentials def test_recommended_units_deprecation(): sr = SphericalRepresentation(lat=1*u.deg, lon=2*u.deg, distance=10*u.m) with catch_warnings(AstropyDeprecationWarning) as w: sr.recommended_units assert 'recommended_units' in str(w[0].message) with catch_warnings(AstropyDeprecationWarning) as w: class MyClass(SphericalRepresentation): attr_classes = SphericalRepresentation.attr_classes recommended_units = {} assert 'recommended_units' in str(w[0].message) @pytest.fixture def unitphysics(): """ This fixture is used """ had_unit = False if hasattr(PhysicsSphericalRepresentation, '_unit_representation'): orig = PhysicsSphericalRepresentation._unit_representation had_unit = True class UnitPhysicsSphericalRepresentation(BaseRepresentation): attr_classes = OrderedDict([('phi', Angle), ('theta', Angle)]) def __init__(self, phi, theta, differentials=None, copy=True): super().__init__(phi, theta, copy=copy, differentials=differentials) # Wrap/validate phi/theta if copy: self._phi = self._phi.wrap_at(360 * u.deg) else: # necessary because the above version of `wrap_at` has to be a copy self._phi.wrap_at(360 * u.deg, inplace=True) if np.any(self._theta < 0.*u.deg) or np.any(self._theta > 180.*u.deg): raise ValueError('Inclination angle(s) must be within ' '0 deg <= angle <= 180 deg, ' 'got {0}'.format(theta.to(u.degree))) @property def phi(self): return self._phi @property def theta(self): return self._theta def unit_vectors(self): sinphi, cosphi = np.sin(self.phi), np.cos(self.phi) sintheta, costheta = np.sin(self.theta), np.cos(self.theta) return OrderedDict( (('phi', CartesianRepresentation(-sinphi, cosphi, 0., copy=False)), ('theta', CartesianRepresentation(costheta*cosphi, costheta*sinphi, -sintheta, copy=False)))) def scale_factors(self): sintheta = np.sin(self.theta) l = np.broadcast_to(1.*u.one, self.shape, subok=True) return OrderedDict((('phi', sintheta), ('theta', l))) def to_cartesian(self): x = np.sin(self.theta) * np.cos(self.phi) y = np.sin(self.theta) * np.sin(self.phi) z = np.cos(self.theta) return CartesianRepresentation(x=x, y=y, z=z, copy=False) @classmethod def from_cartesian(cls, cart): """ Converts 3D rectangular cartesian coordinates to spherical polar coordinates. """ s = np.hypot(cart.x, cart.y) phi = np.arctan2(cart.y, cart.x) theta = np.arctan2(s, cart.z) return cls(phi=phi, theta=theta, copy=False) def norm(self): return u.Quantity(np.ones(self.shape), u.dimensionless_unscaled, copy=False) PhysicsSphericalRepresentation._unit_representation = UnitPhysicsSphericalRepresentation yield UnitPhysicsSphericalRepresentation if had_unit: PhysicsSphericalRepresentation._unit_representation = orig else: del PhysicsSphericalRepresentation._unit_representation # remove from the module-level representations, if present REPRESENTATION_CLASSES.pop(UnitPhysicsSphericalRepresentation.get_name(), None) def test_unitphysics(unitphysics): obj = unitphysics(phi=0*u.deg, theta=10*u.deg) objkw = unitphysics(phi=0*u.deg, theta=10*u.deg) assert objkw.phi == obj.phi assert objkw.theta == obj.theta asphys = obj.represent_as(PhysicsSphericalRepresentation) assert asphys.phi == obj.phi assert asphys.theta == obj.theta assert_allclose_quantity(asphys.r, 1*u.dimensionless_unscaled) assph = obj.represent_as(SphericalRepresentation) assert assph.lon == obj.phi assert assph.lat == 80*u.deg assert_allclose_quantity(assph.distance, 1*u.dimensionless_unscaled) def test_distance_warning(recwarn): SphericalRepresentation(1*u.deg, 2*u.deg, 1*u.kpc) with pytest.raises(ValueError) as excinfo: SphericalRepresentation(1*u.deg, 2*u.deg, -1*u.kpc) assert 'Distance must be >= 0' in str(excinfo.value) # second check is because the "originating" ValueError says the above, # while the representation one includes the below assert 'you must explicitly pass' in str(excinfo.value)
410abf39e64dbe32b666d2003917f9a1b96d3f95c5084b30bef16b9ede0bcc4b
# Licensed under a 3-clause BSD style license - see LICENSE.rst """Accuracy tests for GCRS coordinate transformations, primarily to/from AltAz. """ import pytest import numpy as np from astropy import units as u from astropy.tests.helper import (assert_quantity_allclose as assert_allclose) from astropy.time import Time from astropy.coordinates import (EarthLocation, get_sun, ICRS, GCRS, CIRS, ITRS, AltAz, PrecessedGeocentric, CartesianRepresentation, SkyCoord, SphericalRepresentation, UnitSphericalRepresentation, HCRS, HeliocentricMeanEcliptic) from astropy._erfa import epv00 from .utils import randomly_sample_sphere from astropy.coordinates.builtin_frames.utils import get_jd12 from astropy.coordinates import solar_system_ephemeris from astropy.units import allclose try: import jplephem # pylint: disable=W0611 except ImportError: HAS_JPLEPHEM = False else: HAS_JPLEPHEM = True def test_icrs_cirs(): """ Check a few cases of ICRS<->CIRS for consistency. Also includes the CIRS<->CIRS transforms at different times, as those go through ICRS """ ra, dec, dist = randomly_sample_sphere(200) inod = ICRS(ra=ra, dec=dec) iwd = ICRS(ra=ra, dec=dec, distance=dist*u.pc) cframe1 = CIRS() cirsnod = inod.transform_to(cframe1) # uses the default time # first do a round-tripping test inod2 = cirsnod.transform_to(ICRS) assert_allclose(inod.ra, inod2.ra) assert_allclose(inod.dec, inod2.dec) # now check that a different time yields different answers cframe2 = CIRS(obstime=Time('J2005')) cirsnod2 = inod.transform_to(cframe2) assert not allclose(cirsnod.ra, cirsnod2.ra, rtol=1e-8) assert not allclose(cirsnod.dec, cirsnod2.dec, rtol=1e-8) # parallax effects should be included, so with and w/o distance should be different cirswd = iwd.transform_to(cframe1) assert not allclose(cirswd.ra, cirsnod.ra, rtol=1e-8) assert not allclose(cirswd.dec, cirsnod.dec, rtol=1e-8) # and the distance should transform at least somehow assert not allclose(cirswd.distance, iwd.distance, rtol=1e-8) # now check that the cirs self-transform works as expected cirsnod3 = cirsnod.transform_to(cframe1) # should be a no-op assert_allclose(cirsnod.ra, cirsnod3.ra) assert_allclose(cirsnod.dec, cirsnod3.dec) cirsnod4 = cirsnod.transform_to(cframe2) # should be different assert not allclose(cirsnod4.ra, cirsnod.ra, rtol=1e-8) assert not allclose(cirsnod4.dec, cirsnod.dec, rtol=1e-8) cirsnod5 = cirsnod4.transform_to(cframe1) # should be back to the same assert_allclose(cirsnod.ra, cirsnod5.ra) assert_allclose(cirsnod.dec, cirsnod5.dec) ra, dec, dist = randomly_sample_sphere(200) icrs_coords = [ICRS(ra=ra, dec=dec), ICRS(ra=ra, dec=dec, distance=dist*u.pc)] gcrs_frames = [GCRS(), GCRS(obstime=Time('J2005'))] @pytest.mark.parametrize('icoo', icrs_coords) def test_icrs_gcrs(icoo): """ Check ICRS<->GCRS for consistency """ gcrscoo = icoo.transform_to(gcrs_frames[0]) # uses the default time # first do a round-tripping test icoo2 = gcrscoo.transform_to(ICRS) assert_allclose(icoo.distance, icoo2.distance) assert_allclose(icoo.ra, icoo2.ra) assert_allclose(icoo.dec, icoo2.dec) assert isinstance(icoo2.data, icoo.data.__class__) # now check that a different time yields different answers gcrscoo2 = icoo.transform_to(gcrs_frames[1]) assert not allclose(gcrscoo.ra, gcrscoo2.ra, rtol=1e-8, atol=1e-10*u.deg) assert not allclose(gcrscoo.dec, gcrscoo2.dec, rtol=1e-8, atol=1e-10*u.deg) # now check that the cirs self-transform works as expected gcrscoo3 = gcrscoo.transform_to(gcrs_frames[0]) # should be a no-op assert_allclose(gcrscoo.ra, gcrscoo3.ra) assert_allclose(gcrscoo.dec, gcrscoo3.dec) gcrscoo4 = gcrscoo.transform_to(gcrs_frames[1]) # should be different assert not allclose(gcrscoo4.ra, gcrscoo.ra, rtol=1e-8, atol=1e-10*u.deg) assert not allclose(gcrscoo4.dec, gcrscoo.dec, rtol=1e-8, atol=1e-10*u.deg) gcrscoo5 = gcrscoo4.transform_to(gcrs_frames[0]) # should be back to the same assert_allclose(gcrscoo.ra, gcrscoo5.ra, rtol=1e-8, atol=1e-10*u.deg) assert_allclose(gcrscoo.dec, gcrscoo5.dec, rtol=1e-8, atol=1e-10*u.deg) # also make sure that a GCRS with a different geoloc/geovel gets a different answer # roughly a moon-like frame gframe3 = GCRS(obsgeoloc=[385000., 0, 0]*u.km, obsgeovel=[1, 0, 0]*u.km/u.s) gcrscoo6 = icoo.transform_to(gframe3) # should be different assert not allclose(gcrscoo.ra, gcrscoo6.ra, rtol=1e-8, atol=1e-10*u.deg) assert not allclose(gcrscoo.dec, gcrscoo6.dec, rtol=1e-8, atol=1e-10*u.deg) icooviag3 = gcrscoo6.transform_to(ICRS) # and now back to the original assert_allclose(icoo.ra, icooviag3.ra) assert_allclose(icoo.dec, icooviag3.dec) @pytest.mark.parametrize('gframe', gcrs_frames) def test_icrs_gcrs_dist_diff(gframe): """ Check that with and without distance give different ICRS<->GCRS answers """ gcrsnod = icrs_coords[0].transform_to(gframe) gcrswd = icrs_coords[1].transform_to(gframe) # parallax effects should be included, so with and w/o distance should be different assert not allclose(gcrswd.ra, gcrsnod.ra, rtol=1e-8, atol=1e-10*u.deg) assert not allclose(gcrswd.dec, gcrsnod.dec, rtol=1e-8, atol=1e-10*u.deg) # and the distance should transform at least somehow assert not allclose(gcrswd.distance, icrs_coords[1].distance, rtol=1e-8, atol=1e-10*u.pc) @pytest.mark.remote_data def test_cirs_to_altaz(): """ Check the basic CIRS<->AltAz transforms. More thorough checks implicitly happen in `test_iau_fullstack` """ from astropy.coordinates import EarthLocation ra, dec, dist = randomly_sample_sphere(200) cirs = CIRS(ra=ra, dec=dec, obstime='J2000') crepr = SphericalRepresentation(lon=ra, lat=dec, distance=dist) cirscart = CIRS(crepr, obstime=cirs.obstime, representation_type=CartesianRepresentation) loc = EarthLocation(lat=0*u.deg, lon=0*u.deg, height=0*u.m) altazframe = AltAz(location=loc, obstime=Time('J2005')) cirs2 = cirs.transform_to(altazframe).transform_to(cirs) cirs3 = cirscart.transform_to(altazframe).transform_to(cirs) # check round-tripping assert_allclose(cirs.ra, cirs2.ra) assert_allclose(cirs.dec, cirs2.dec) assert_allclose(cirs.ra, cirs3.ra) assert_allclose(cirs.dec, cirs3.dec) @pytest.mark.remote_data def test_gcrs_itrs(): """ Check basic GCRS<->ITRS transforms for round-tripping. """ ra, dec, _ = randomly_sample_sphere(200) gcrs = GCRS(ra=ra, dec=dec, obstime='J2000') gcrs6 = GCRS(ra=ra, dec=dec, obstime='J2006') gcrs2 = gcrs.transform_to(ITRS).transform_to(gcrs) gcrs6_2 = gcrs6.transform_to(ITRS).transform_to(gcrs) assert_allclose(gcrs.ra, gcrs2.ra) assert_allclose(gcrs.dec, gcrs2.dec) assert not allclose(gcrs.ra, gcrs6_2.ra) assert not allclose(gcrs.dec, gcrs6_2.dec) # also try with the cartesian representation gcrsc = gcrs.realize_frame(gcrs.data) gcrsc.representation_type = CartesianRepresentation gcrsc2 = gcrsc.transform_to(ITRS).transform_to(gcrsc) assert_allclose(gcrsc.spherical.lon.deg, gcrsc2.ra.deg) assert_allclose(gcrsc.spherical.lat, gcrsc2.dec) @pytest.mark.remote_data def test_cirs_itrs(): """ Check basic CIRS<->ITRS transforms for round-tripping. """ ra, dec, _ = randomly_sample_sphere(200) cirs = CIRS(ra=ra, dec=dec, obstime='J2000') cirs6 = CIRS(ra=ra, dec=dec, obstime='J2006') cirs2 = cirs.transform_to(ITRS).transform_to(cirs) cirs6_2 = cirs6.transform_to(ITRS).transform_to(cirs) # different obstime # just check round-tripping assert_allclose(cirs.ra, cirs2.ra) assert_allclose(cirs.dec, cirs2.dec) assert not allclose(cirs.ra, cirs6_2.ra) assert not allclose(cirs.dec, cirs6_2.dec) @pytest.mark.remote_data def test_gcrs_cirs(): """ Check GCRS<->CIRS transforms for round-tripping. More complicated than the above two because it's multi-hop """ ra, dec, _ = randomly_sample_sphere(200) gcrs = GCRS(ra=ra, dec=dec, obstime='J2000') gcrs6 = GCRS(ra=ra, dec=dec, obstime='J2006') gcrs2 = gcrs.transform_to(CIRS).transform_to(gcrs) gcrs6_2 = gcrs6.transform_to(CIRS).transform_to(gcrs) assert_allclose(gcrs.ra, gcrs2.ra) assert_allclose(gcrs.dec, gcrs2.dec) assert not allclose(gcrs.ra, gcrs6_2.ra) assert not allclose(gcrs.dec, gcrs6_2.dec) # now try explicit intermediate pathways and ensure they're all consistent gcrs3 = gcrs.transform_to(ITRS).transform_to(CIRS).transform_to(ITRS).transform_to(gcrs) assert_allclose(gcrs.ra, gcrs3.ra) assert_allclose(gcrs.dec, gcrs3.dec) gcrs4 = gcrs.transform_to(ICRS).transform_to(CIRS).transform_to(ICRS).transform_to(gcrs) assert_allclose(gcrs.ra, gcrs4.ra) assert_allclose(gcrs.dec, gcrs4.dec) @pytest.mark.remote_data def test_gcrs_altaz(): """ Check GCRS<->AltAz transforms for round-tripping. Has multiple paths """ from astropy.coordinates import EarthLocation ra, dec, _ = randomly_sample_sphere(1) gcrs = GCRS(ra=ra[0], dec=dec[0], obstime='J2000') # check array times sure N-d arrays work times = Time(np.linspace(2456293.25, 2456657.25, 51) * u.day, format='jd') loc = EarthLocation(lon=10 * u.deg, lat=80. * u.deg) aaframe = AltAz(obstime=times, location=loc) aa1 = gcrs.transform_to(aaframe) aa2 = gcrs.transform_to(ICRS).transform_to(CIRS).transform_to(aaframe) aa3 = gcrs.transform_to(ITRS).transform_to(CIRS).transform_to(aaframe) # make sure they're all consistent assert_allclose(aa1.alt, aa2.alt) assert_allclose(aa1.az, aa2.az) assert_allclose(aa1.alt, aa3.alt) assert_allclose(aa1.az, aa3.az) @pytest.mark.remote_data def test_precessed_geocentric(): assert PrecessedGeocentric().equinox.jd == Time('J2000').jd gcrs_coo = GCRS(180*u.deg, 2*u.deg, distance=10000*u.km) pgeo_coo = gcrs_coo.transform_to(PrecessedGeocentric) assert np.abs(gcrs_coo.ra - pgeo_coo.ra) > 10*u.marcsec assert np.abs(gcrs_coo.dec - pgeo_coo.dec) > 10*u.marcsec assert_allclose(gcrs_coo.distance, pgeo_coo.distance) gcrs_roundtrip = pgeo_coo.transform_to(GCRS) assert_allclose(gcrs_coo.ra, gcrs_roundtrip.ra) assert_allclose(gcrs_coo.dec, gcrs_roundtrip.dec) assert_allclose(gcrs_coo.distance, gcrs_roundtrip.distance) pgeo_coo2 = gcrs_coo.transform_to(PrecessedGeocentric(equinox='B1850')) assert np.abs(gcrs_coo.ra - pgeo_coo2.ra) > 1.5*u.deg assert np.abs(gcrs_coo.dec - pgeo_coo2.dec) > 0.5*u.deg assert_allclose(gcrs_coo.distance, pgeo_coo2.distance) gcrs2_roundtrip = pgeo_coo2.transform_to(GCRS) assert_allclose(gcrs_coo.ra, gcrs2_roundtrip.ra) assert_allclose(gcrs_coo.dec, gcrs2_roundtrip.dec) assert_allclose(gcrs_coo.distance, gcrs2_roundtrip.distance) # shared by parametrized tests below. Some use the whole AltAz, others use just obstime totest_frames = [AltAz(location=EarthLocation(-90*u.deg, 65*u.deg), obstime=Time('J2000')), # J2000 is often a default so this might work when others don't AltAz(location=EarthLocation(120*u.deg, -35*u.deg), obstime=Time('J2000')), AltAz(location=EarthLocation(-90*u.deg, 65*u.deg), obstime=Time('2014-01-01 00:00:00')), AltAz(location=EarthLocation(-90*u.deg, 65*u.deg), obstime=Time('2014-08-01 08:00:00')), AltAz(location=EarthLocation(120*u.deg, -35*u.deg), obstime=Time('2014-01-01 00:00:00')) ] MOONDIST = 385000*u.km # approximate moon semi-major orbit axis of moon MOONDIST_CART = CartesianRepresentation(3**-0.5*MOONDIST, 3**-0.5*MOONDIST, 3**-0.5*MOONDIST) EARTHECC = 0.017 + 0.005 # roughly earth orbital eccentricity, but with an added tolerance @pytest.mark.remote_data @pytest.mark.parametrize('testframe', totest_frames) def test_gcrs_altaz_sunish(testframe): """ Sanity-check that the sun is at a reasonable distance from any altaz """ sun = get_sun(testframe.obstime) assert sun.frame.name == 'gcrs' # the .to(u.au) is not necessary, it just makes the asserts on failure more readable assert (EARTHECC - 1)*u.au < sun.distance.to(u.au) < (EARTHECC + 1)*u.au sunaa = sun.transform_to(testframe) assert (EARTHECC - 1)*u.au < sunaa.distance.to(u.au) < (EARTHECC + 1)*u.au @pytest.mark.remote_data @pytest.mark.parametrize('testframe', totest_frames) def test_gcrs_altaz_moonish(testframe): """ Sanity-check that an object resembling the moon goes to the right place with a GCRS->AltAz transformation """ moon = GCRS(MOONDIST_CART, obstime=testframe.obstime) moonaa = moon.transform_to(testframe) # now check that the distance change is similar to earth radius assert 1000*u.km < np.abs(moonaa.distance - moon.distance).to(u.au) < 7000*u.km # now check that it round-trips moon2 = moonaa.transform_to(moon) assert_allclose(moon.cartesian.xyz, moon2.cartesian.xyz) # also should add checks that the alt/az are different for different earth locations @pytest.mark.remote_data @pytest.mark.parametrize('testframe', totest_frames) def test_gcrs_altaz_bothroutes(testframe): """ Repeat of both the moonish and sunish tests above to make sure the two routes through the coordinate graph are consistent with each other """ sun = get_sun(testframe.obstime) sunaa_viaicrs = sun.transform_to(ICRS).transform_to(testframe) sunaa_viaitrs = sun.transform_to(ITRS(obstime=testframe.obstime)).transform_to(testframe) moon = GCRS(MOONDIST_CART, obstime=testframe.obstime) moonaa_viaicrs = moon.transform_to(ICRS).transform_to(testframe) moonaa_viaitrs = moon.transform_to(ITRS(obstime=testframe.obstime)).transform_to(testframe) assert_allclose(sunaa_viaicrs.cartesian.xyz, sunaa_viaitrs.cartesian.xyz) assert_allclose(moonaa_viaicrs.cartesian.xyz, moonaa_viaitrs.cartesian.xyz) @pytest.mark.remote_data @pytest.mark.parametrize('testframe', totest_frames) def test_cirs_altaz_moonish(testframe): """ Sanity-check that an object resembling the moon goes to the right place with a CIRS<->AltAz transformation """ moon = CIRS(MOONDIST_CART, obstime=testframe.obstime) moonaa = moon.transform_to(testframe) assert 1000*u.km < np.abs(moonaa.distance - moon.distance).to(u.km) < 7000*u.km # now check that it round-trips moon2 = moonaa.transform_to(moon) assert_allclose(moon.cartesian.xyz, moon2.cartesian.xyz) @pytest.mark.remote_data @pytest.mark.parametrize('testframe', totest_frames) def test_cirs_altaz_nodist(testframe): """ Check that a UnitSphericalRepresentation coordinate round-trips for the CIRS<->AltAz transformation. """ coo0 = CIRS(UnitSphericalRepresentation(10*u.deg, 20*u.deg), obstime=testframe.obstime) # check that it round-trips coo1 = coo0.transform_to(testframe).transform_to(coo0) assert_allclose(coo0.cartesian.xyz, coo1.cartesian.xyz) @pytest.mark.parametrize('testframe', totest_frames) def test_cirs_icrs_moonish(testframe): """ check that something like the moon goes to about the right distance from the ICRS origin when starting from CIRS """ moonish = CIRS(MOONDIST_CART, obstime=testframe.obstime) moonicrs = moonish.transform_to(ICRS) assert 0.97*u.au < moonicrs.distance < 1.03*u.au @pytest.mark.parametrize('testframe', totest_frames) def test_gcrs_icrs_moonish(testframe): """ check that something like the moon goes to about the right distance from the ICRS origin when starting from GCRS """ moonish = GCRS(MOONDIST_CART, obstime=testframe.obstime) moonicrs = moonish.transform_to(ICRS) assert 0.97*u.au < moonicrs.distance < 1.03*u.au @pytest.mark.remote_data @pytest.mark.parametrize('testframe', totest_frames) def test_icrs_gcrscirs_sunish(testframe): """ check that the ICRS barycenter goes to about the right distance from various ~geocentric frames (other than testframe) """ # slight offset to avoid divide-by-zero errors icrs = ICRS(0*u.deg, 0*u.deg, distance=10*u.km) gcrs = icrs.transform_to(GCRS(obstime=testframe.obstime)) assert (EARTHECC - 1)*u.au < gcrs.distance.to(u.au) < (EARTHECC + 1)*u.au cirs = icrs.transform_to(CIRS(obstime=testframe.obstime)) assert (EARTHECC - 1)*u.au < cirs.distance.to(u.au) < (EARTHECC + 1)*u.au itrs = icrs.transform_to(ITRS(obstime=testframe.obstime)) assert (EARTHECC - 1)*u.au < itrs.spherical.distance.to(u.au) < (EARTHECC + 1)*u.au @pytest.mark.remote_data @pytest.mark.parametrize('testframe', totest_frames) def test_icrs_altaz_moonish(testframe): """ Check that something expressed in *ICRS* as being moon-like goes to the right AltAz distance """ # we use epv00 instead of get_sun because get_sun includes aberration earth_pv_helio, earth_pv_bary = epv00(*get_jd12(testframe.obstime, 'tdb')) earth_icrs_xyz = earth_pv_bary[0]*u.au moonoffset = [0, 0, MOONDIST.value]*MOONDIST.unit moonish_icrs = ICRS(CartesianRepresentation(earth_icrs_xyz + moonoffset)) moonaa = moonish_icrs.transform_to(testframe) # now check that the distance change is similar to earth radius assert 1000*u.km < np.abs(moonaa.distance - MOONDIST).to(u.au) < 7000*u.km def test_gcrs_self_transform_closeby(): """ Tests GCRS self transform for objects which are nearby and thus have reasonable parallax. Moon positions were originally created using JPL DE432s ephemeris. The two lunar positions (one geocentric, one at a defined location) are created via a transformation from ICRS to two different GCRS frames. We test that the GCRS-GCRS self transform can correctly map one GCRS frame onto the other. """ t = Time("2014-12-25T07:00") moon_geocentric = SkyCoord(GCRS(318.10579159*u.deg, -11.65281165*u.deg, 365042.64880308*u.km, obstime=t)) # this is the location of the Moon as seen from La Palma obsgeoloc = [-5592982.59658935, -63054.1948592, 3059763.90102216]*u.m obsgeovel = [4.59798494, -407.84677071, 0.]*u.m/u.s moon_lapalma = SkyCoord(GCRS(318.7048445*u.deg, -11.98761996*u.deg, 369722.8231031*u.km, obstime=t, obsgeoloc=obsgeoloc, obsgeovel=obsgeovel)) transformed = moon_geocentric.transform_to(moon_lapalma.frame) delta = transformed.separation_3d(moon_lapalma) assert_allclose(delta, 0.0*u.m, atol=1*u.m) @pytest.mark.remote_data @pytest.mark.skipif('not HAS_JPLEPHEM') def test_ephemerides(): """ We test that using different ephemerides gives very similar results for transformations """ t = Time("2014-12-25T07:00") moon = SkyCoord(GCRS(318.10579159*u.deg, -11.65281165*u.deg, 365042.64880308*u.km, obstime=t)) icrs_frame = ICRS() hcrs_frame = HCRS(obstime=t) ecl_frame = HeliocentricMeanEcliptic(equinox=t) cirs_frame = CIRS(obstime=t) moon_icrs_builtin = moon.transform_to(icrs_frame) moon_hcrs_builtin = moon.transform_to(hcrs_frame) moon_helioecl_builtin = moon.transform_to(ecl_frame) moon_cirs_builtin = moon.transform_to(cirs_frame) with solar_system_ephemeris.set('jpl'): moon_icrs_jpl = moon.transform_to(icrs_frame) moon_hcrs_jpl = moon.transform_to(hcrs_frame) moon_helioecl_jpl = moon.transform_to(ecl_frame) moon_cirs_jpl = moon.transform_to(cirs_frame) # most transformations should differ by an amount which is # non-zero but of order milliarcsecs sep_icrs = moon_icrs_builtin.separation(moon_icrs_jpl) sep_hcrs = moon_hcrs_builtin.separation(moon_hcrs_jpl) sep_helioecl = moon_helioecl_builtin.separation(moon_helioecl_jpl) sep_cirs = moon_cirs_builtin.separation(moon_cirs_jpl) assert_allclose([sep_icrs, sep_hcrs, sep_helioecl], 0.0*u.deg, atol=10*u.mas) assert all(sep > 10*u.microarcsecond for sep in (sep_icrs, sep_hcrs, sep_helioecl)) # CIRS should be the same assert_allclose(sep_cirs, 0.0*u.deg, atol=1*u.microarcsecond)
2976bb3171c8545b787bbd1e20804525d966bb393f060481026d5ce2146410b6
import pytest from astropy.tests.helper import assert_quantity_allclose from astropy.units import allclose as quantity_allclose from astropy import units as u from astropy.coordinates import Longitude, Latitude, EarthLocation from astropy.coordinates.sites import get_builtin_sites, get_downloaded_sites, SiteRegistry def test_builtin_sites(): reg = get_builtin_sites() greenwich = reg['greenwich'] lon, lat, el = greenwich.to_geodetic() assert_quantity_allclose(lon, Longitude('0:0:0', unit=u.deg), atol=10*u.arcsec) assert_quantity_allclose(lat, Latitude('51:28:40', unit=u.deg), atol=1*u.arcsec) assert_quantity_allclose(el, 46*u.m, atol=1*u.m) names = reg.names assert 'greenwich' in names assert 'example_site' in names with pytest.raises(KeyError) as exc: reg['nonexistent site'] assert exc.value.args[0] == "Site 'nonexistent site' not in database. Use the 'names' attribute to see available sites." @pytest.mark.remote_data(source='astropy') def test_online_sites(): reg = get_downloaded_sites() keck = reg['keck'] lon, lat, el = keck.to_geodetic() assert_quantity_allclose(lon, -Longitude('155:28.7', unit=u.deg), atol=0.001*u.deg) assert_quantity_allclose(lat, Latitude('19:49.7', unit=u.deg), atol=0.001*u.deg) assert_quantity_allclose(el, 4160*u.m, atol=1*u.m) names = reg.names assert 'keck' in names assert 'ctio' in names with pytest.raises(KeyError) as exc: reg['nonexistent site'] assert exc.value.args[0] == "Site 'nonexistent site' not in database. Use the 'names' attribute to see available sites." with pytest.raises(KeyError) as exc: reg['kec'] assert exc.value.args[0] == "Site 'kec' not in database. Use the 'names' attribute to see available sites. Did you mean one of: 'keck'?'" @pytest.mark.remote_data(source='astropy') # this will *try* the online so we have to make it remote_data, even though it # could fall back on the non-remote version def test_EarthLocation_basic(): greenwichel = EarthLocation.of_site('greenwich') lon, lat, el = greenwichel.to_geodetic() assert_quantity_allclose(lon, Longitude('0:0:0', unit=u.deg), atol=10*u.arcsec) assert_quantity_allclose(lat, Latitude('51:28:40', unit=u.deg), atol=1*u.arcsec) assert_quantity_allclose(el, 46*u.m, atol=1*u.m) names = EarthLocation.get_site_names() assert 'greenwich' in names assert 'example_site' in names with pytest.raises(KeyError) as exc: EarthLocation.of_site('nonexistent site') assert exc.value.args[0] == "Site 'nonexistent site' not in database. Use EarthLocation.get_site_names to see available sites." def test_EarthLocation_state_offline(): EarthLocation._site_registry = None EarthLocation._get_site_registry(force_builtin=True) assert EarthLocation._site_registry is not None oldreg = EarthLocation._site_registry newreg = EarthLocation._get_site_registry() assert oldreg is newreg newreg = EarthLocation._get_site_registry(force_builtin=True) assert oldreg is not newreg @pytest.mark.remote_data(source='astropy') def test_EarthLocation_state_online(): EarthLocation._site_registry = None EarthLocation._get_site_registry(force_download=True) assert EarthLocation._site_registry is not None oldreg = EarthLocation._site_registry newreg = EarthLocation._get_site_registry() assert oldreg is newreg newreg = EarthLocation._get_site_registry(force_download=True) assert oldreg is not newreg def test_registry(): reg = SiteRegistry() assert len(reg.names) == 0 names = ['sitea', 'site A'] loc = EarthLocation.from_geodetic(lat=1*u.deg, lon=2*u.deg, height=3*u.km) reg.add_site(names, loc) assert len(reg.names) == 2 loc1 = reg['SIteA'] assert loc1 is loc loc2 = reg['sIte a'] assert loc2 is loc def test_non_EarthLocation(): """ A regression test for a typo bug pointed out at the bottom of https://github.com/astropy/astropy/pull/4042 """ class EarthLocation2(EarthLocation): pass # This lets keeps us from needing to do remote_data # note that this does *not* mess up the registry for EarthLocation because # registry is cached on a per-class basis EarthLocation2._get_site_registry(force_builtin=True) el2 = EarthLocation2.of_site('greenwich') assert type(el2) is EarthLocation2 assert el2.info.name == 'Royal Observatory Greenwich' def check_builtin_matches_remote(download_url=True): """ This function checks that the builtin sites registry is consistent with the remote registry (or a registry at some other location). Note that current this is *not* run by the testing suite (because it doesn't start with "test", and is instead meant to be used as a check before merging changes in astropy-data) """ builtin_registry = EarthLocation._get_site_registry(force_builtin=True) dl_registry = EarthLocation._get_site_registry(force_download=download_url) in_dl = {} matches = {} for name in builtin_registry.names: in_dl[name] = name in dl_registry if in_dl[name]: matches[name] = quantity_allclose(builtin_registry[name], dl_registry[name]) else: matches[name] = False if not all(matches.values()): # this makes sure we actually see which don't match print("In builtin registry but not in download:") for name in in_dl: if not in_dl[name]: print(' ', name) print("In both but not the same value:") for name in matches: if not matches[name] and in_dl[name]: print(' ', name, 'builtin:', builtin_registry[name], 'download:', dl_registry[name]) assert False, "Builtin and download registry aren't consistent - failures printed to stdout" def test_meta_present(): reg = get_builtin_sites() greenwich = reg['greenwich'] assert greenwich.info.meta['source'] == ('Ordnance Survey via ' 'http://gpsinformation.net/main/greenwich.htm and UNESCO')
a2566171fd967f88960a2ff609fc783300925deb2827dd7a520567d8d9ebcda1
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """ Tests for miscellaneous functionality in the `funcs` module """ import pytest import numpy as np from numpy import testing as npt from astropy import units as u from astropy.time import Time from astropy._erfa import ErfaWarning def test_sun(): """ Test that `get_sun` works and it behaves roughly as it should (in GCRS) """ from astropy.coordinates.funcs import get_sun northern_summer_solstice = Time('2010-6-21') northern_winter_solstice = Time('2010-12-21') equinox_1 = Time('2010-3-21') equinox_2 = Time('2010-9-21') gcrs1 = get_sun(equinox_1) assert np.abs(gcrs1.dec.deg) < 1 gcrs2 = get_sun(Time([northern_summer_solstice, equinox_2, northern_winter_solstice])) assert np.all(np.abs(gcrs2.dec - [23.5, 0, -23.5]*u.deg) < 1*u.deg) def test_constellations(recwarn): from astropy.coordinates import ICRS, FK5, SkyCoord from astropy.coordinates.funcs import get_constellation inuma = ICRS(9*u.hour, 65*u.deg) n_prewarn = len(recwarn) res = get_constellation(inuma) res_short = get_constellation(inuma, short_name=True) assert len(recwarn) == n_prewarn # neither version should not make warnings assert res == 'Ursa Major' assert res_short == 'UMa' assert isinstance(res, str) or getattr(res, 'shape', None) == tuple() # these are taken from the ReadMe for Roman 1987 ras = [9, 23.5, 5.12, 9.4555, 12.8888, 15.6687, 19, 6.2222] decs = [65, -20, 9.12, -19.9, 22, -12.1234, -40, -81.1234] shortnames = ['UMa', 'Aqr', 'Ori', 'Hya', 'Com', 'Lib', 'CrA', 'Men'] testcoos = FK5(ras*u.hour, decs*u.deg, equinox='B1950') npt.assert_equal(get_constellation(testcoos, short_name=True), shortnames) # test on a SkyCoord, *and* test Boötes, which is special in that it has a # non-ASCII character bootest = SkyCoord(15*u.hour, 30*u.deg, frame='icrs') boores = get_constellation(bootest) assert boores == u'Boötes' assert isinstance(boores, str) or getattr(boores, 'shape', None) == tuple() def test_concatenate(): from astropy.coordinates import FK5, SkyCoord, ICRS from astropy.coordinates.funcs import concatenate # Just positions fk5 = FK5(1*u.deg, 2*u.deg) sc = SkyCoord(3*u.deg, 4*u.deg, frame='fk5') res = concatenate([fk5, sc]) np.testing.assert_allclose(res.ra, [1, 3]*u.deg) np.testing.assert_allclose(res.dec, [2, 4]*u.deg) with pytest.raises(TypeError): concatenate(fk5) with pytest.raises(TypeError): concatenate(1*u.deg) # positions and velocities fr = ICRS(ra=10*u.deg, dec=11.*u.deg, pm_ra_cosdec=12*u.mas/u.yr, pm_dec=13*u.mas/u.yr) sc = SkyCoord(ra=20*u.deg, dec=21.*u.deg, pm_ra_cosdec=22*u.mas/u.yr, pm_dec=23*u.mas/u.yr) res = concatenate([fr, sc]) with pytest.raises(ValueError): concatenate([fr, fk5]) fr2 = ICRS(ra=10*u.deg, dec=11.*u.deg) with pytest.raises(ValueError): concatenate([fr, fr2]) def test_concatenate_representations(): from astropy.coordinates.funcs import concatenate_representations from astropy.coordinates import representation as r reps = [r.CartesianRepresentation([1, 2, 3.]*u.kpc), r.SphericalRepresentation(lon=1*u.deg, lat=2.*u.deg, distance=10*u.pc), r.UnitSphericalRepresentation(lon=1*u.deg, lat=2.*u.deg), r.CartesianRepresentation(np.ones((3, 100)) * u.kpc), r.CartesianRepresentation(np.ones((3, 16, 8)) * u.kpc)] reps.append(reps[0].with_differentials( r.CartesianDifferential([1, 2, 3.] * u.km/u.s))) reps.append(reps[1].with_differentials( r.SphericalCosLatDifferential(1*u.mas/u.yr, 2*u.mas/u.yr, 3*u.km/u.s))) reps.append(reps[2].with_differentials( r.SphericalCosLatDifferential(1*u.mas/u.yr, 2*u.mas/u.yr, 3*u.km/u.s))) reps.append(reps[2].with_differentials( r.UnitSphericalCosLatDifferential(1*u.mas/u.yr, 2*u.mas/u.yr))) reps.append(reps[2].with_differentials( {'s': r.RadialDifferential(1*u.km/u.s)})) reps.append(reps[3].with_differentials( r.CartesianDifferential(*np.ones((3, 100)) * u.km/u.s))) reps.append(reps[4].with_differentials( r.CartesianDifferential(*np.ones((3, 16, 8)) * u.km/u.s))) # Test that combining all of the above with itself succeeds for rep in reps: if not rep.shape: expected_shape = (2, ) else: expected_shape = (2 * rep.shape[0], ) + rep.shape[1:] tmp = concatenate_representations((rep, rep)) assert tmp.shape == expected_shape if 's' in rep.differentials: assert tmp.differentials['s'].shape == expected_shape # Try combining 4, just for something different for rep in reps: if not rep.shape: expected_shape = (4, ) else: expected_shape = (4 * rep.shape[0], ) + rep.shape[1:] tmp = concatenate_representations((rep, rep, rep, rep)) assert tmp.shape == expected_shape if 's' in rep.differentials: assert tmp.differentials['s'].shape == expected_shape # Test that combining pairs fails with pytest.raises(TypeError): concatenate_representations((reps[0], reps[1])) with pytest.raises(ValueError): concatenate_representations((reps[0], reps[5])) # Check that passing in a single object fails with pytest.raises(TypeError): concatenate_representations(reps[0])
72af72ee93e8138e36465ded5445638304897b59a4aed0ece8cfbd7ae44da7b8
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """ Tests for the SkyCoord class. Note that there are also SkyCoord tests in test_api_ape5.py """ import copy import pytest import numpy as np import numpy.testing as npt from astropy import units as u from astropy.tests.helper import assert_quantity_allclose as assert_allclose from astropy.coordinates.representation import REPRESENTATION_CLASSES from astropy.coordinates import (ICRS, FK4, FK5, Galactic, SkyCoord, Angle, SphericalRepresentation, CartesianRepresentation, UnitSphericalRepresentation, AltAz, BaseCoordinateFrame, Attribute, frame_transform_graph, RepresentationMapping) from astropy.coordinates import Latitude, EarthLocation from astropy.time import Time from astropy.utils import minversion, isiterable from astropy.utils.compat import NUMPY_LT_1_14 from astropy.units import allclose as quantity_allclose from astropy.io import fits from astropy.wcs import WCS RA = 1.0 * u.deg DEC = 2.0 * u.deg C_ICRS = ICRS(RA, DEC) C_FK5 = C_ICRS.transform_to(FK5) J2001 = Time('J2001') def allclose(a, b, rtol=0.0, atol=None): if atol is None: atol = 1.e-8 * getattr(a, 'unit', 1.) return quantity_allclose(a, b, rtol, atol) try: import scipy HAS_SCIPY = True except ImportError: HAS_SCIPY = False if HAS_SCIPY and minversion(scipy, '0.12.0', inclusive=False): OLDER_SCIPY = False else: OLDER_SCIPY = True def test_transform_to(): for frame in (FK5, FK5(equinox=Time('J1975.0')), FK4, FK4(equinox=Time('J1975.0')), SkyCoord(RA, DEC, frame='fk4', equinox='J1980')): c_frame = C_ICRS.transform_to(frame) s_icrs = SkyCoord(RA, DEC, frame='icrs') s_frame = s_icrs.transform_to(frame) assert allclose(c_frame.ra, s_frame.ra) assert allclose(c_frame.dec, s_frame.dec) assert allclose(c_frame.distance, s_frame.distance) # set up for parametrized test rt_sets = [] rt_frames = [ICRS, FK4, FK5, Galactic] for rt_frame0 in rt_frames: for rt_frame1 in rt_frames: for equinox0 in (None, 'J1975.0'): for obstime0 in (None, 'J1980.0'): for equinox1 in (None, 'J1975.0'): for obstime1 in (None, 'J1980.0'): rt_sets.append((rt_frame0, rt_frame1, equinox0, equinox1, obstime0, obstime1)) rt_args = ('frame0', 'frame1', 'equinox0', 'equinox1', 'obstime0', 'obstime1') @pytest.mark.parametrize(rt_args, rt_sets) def test_round_tripping(frame0, frame1, equinox0, equinox1, obstime0, obstime1): """ Test round tripping out and back using transform_to in every combination. """ attrs0 = {'equinox': equinox0, 'obstime': obstime0} attrs1 = {'equinox': equinox1, 'obstime': obstime1} # Remove None values attrs0 = dict((k, v) for k, v in attrs0.items() if v is not None) attrs1 = dict((k, v) for k, v in attrs1.items() if v is not None) # Go out and back sc = SkyCoord(RA, DEC, frame=frame0, **attrs0) # Keep only frame attributes for frame1 attrs1 = dict((attr, val) for attr, val in attrs1.items() if attr in frame1.get_frame_attr_names()) sc2 = sc.transform_to(frame1(**attrs1)) # When coming back only keep frame0 attributes for transform_to attrs0 = dict((attr, val) for attr, val in attrs0.items() if attr in frame0.get_frame_attr_names()) # also, if any are None, fill in with defaults for attrnm in frame0.get_frame_attr_names(): if attrs0.get(attrnm, None) is None: if attrnm == 'obstime' and frame0.get_frame_attr_names()[attrnm] is None: if 'equinox' in attrs0: attrs0[attrnm] = attrs0['equinox'] else: attrs0[attrnm] = frame0.get_frame_attr_names()[attrnm] sc_rt = sc2.transform_to(frame0(**attrs0)) if frame0 is Galactic: assert allclose(sc.l, sc_rt.l) assert allclose(sc.b, sc_rt.b) else: assert allclose(sc.ra, sc_rt.ra) assert allclose(sc.dec, sc_rt.dec) if equinox0: assert type(sc.equinox) is Time and sc.equinox == sc_rt.equinox if obstime0: assert type(sc.obstime) is Time and sc.obstime == sc_rt.obstime def test_coord_init_string(): """ Spherical or Cartesian represenation input coordinates. """ sc = SkyCoord('1d 2d') assert allclose(sc.ra, 1 * u.deg) assert allclose(sc.dec, 2 * u.deg) sc = SkyCoord('1d', '2d') assert allclose(sc.ra, 1 * u.deg) assert allclose(sc.dec, 2 * u.deg) sc = SkyCoord('1°2′3″', '2°3′4″') assert allclose(sc.ra, Angle('1°2′3″')) assert allclose(sc.dec, Angle('2°3′4″')) sc = SkyCoord('1°2′3″ 2°3′4″') assert allclose(sc.ra, Angle('1°2′3″')) assert allclose(sc.dec, Angle('2°3′4″')) with pytest.raises(ValueError) as err: SkyCoord('1d 2d 3d') assert "Cannot parse first argument data" in str(err) sc1 = SkyCoord('8 00 00 +5 00 00.0', unit=(u.hour, u.deg), frame='icrs') assert isinstance(sc1, SkyCoord) assert allclose(sc1.ra, Angle(120 * u.deg)) assert allclose(sc1.dec, Angle(5 * u.deg)) sc11 = SkyCoord('8h00m00s+5d00m00.0s', unit=(u.hour, u.deg), frame='icrs') assert isinstance(sc11, SkyCoord) assert allclose(sc1.ra, Angle(120 * u.deg)) assert allclose(sc1.dec, Angle(5 * u.deg)) sc2 = SkyCoord('8 00 -5 00 00.0', unit=(u.hour, u.deg), frame='icrs') assert isinstance(sc2, SkyCoord) assert allclose(sc2.ra, Angle(120 * u.deg)) assert allclose(sc2.dec, Angle(-5 * u.deg)) sc3 = SkyCoord('8 00 -5 00.6', unit=(u.hour, u.deg), frame='icrs') assert isinstance(sc3, SkyCoord) assert allclose(sc3.ra, Angle(120 * u.deg)) assert allclose(sc3.dec, Angle(-5.01 * u.deg)) sc4 = SkyCoord('J080000.00-050036.00', unit=(u.hour, u.deg), frame='icrs') assert isinstance(sc4, SkyCoord) assert allclose(sc4.ra, Angle(120 * u.deg)) assert allclose(sc4.dec, Angle(-5.01 * u.deg)) sc41 = SkyCoord('J080000+050036', unit=(u.hour, u.deg), frame='icrs') assert isinstance(sc41, SkyCoord) assert allclose(sc41.ra, Angle(120 * u.deg)) assert allclose(sc41.dec, Angle(+5.01 * u.deg)) sc5 = SkyCoord('8h00.6m -5d00.6m', unit=(u.hour, u.deg), frame='icrs') assert isinstance(sc5, SkyCoord) assert allclose(sc5.ra, Angle(120.15 * u.deg)) assert allclose(sc5.dec, Angle(-5.01 * u.deg)) sc6 = SkyCoord('8h00.6m -5d00.6m', unit=(u.hour, u.deg), frame='fk4') assert isinstance(sc6, SkyCoord) assert allclose(sc6.ra, Angle(120.15 * u.deg)) assert allclose(sc6.dec, Angle(-5.01 * u.deg)) sc61 = SkyCoord('8h00.6m-5d00.6m', unit=(u.hour, u.deg), frame='fk4') assert isinstance(sc61, SkyCoord) assert allclose(sc6.ra, Angle(120.15 * u.deg)) assert allclose(sc6.dec, Angle(-5.01 * u.deg)) sc61 = SkyCoord('8h00.6-5d00.6', unit=(u.hour, u.deg), frame='fk4') assert isinstance(sc61, SkyCoord) assert allclose(sc6.ra, Angle(120.15 * u.deg)) assert allclose(sc6.dec, Angle(-5.01 * u.deg)) sc7 = SkyCoord("J1874221.60+122421.6", unit=u.deg) assert isinstance(sc7, SkyCoord) assert allclose(sc7.ra, Angle(187.706 * u.deg)) assert allclose(sc7.dec, Angle(12.406 * u.deg)) with pytest.raises(ValueError): SkyCoord('8 00 -5 00.6', unit=(u.deg, u.deg), frame='galactic') def test_coord_init_unit(): """ Test variations of the unit keyword. """ for unit in ('deg', 'deg,deg', ' deg , deg ', u.deg, (u.deg, u.deg), np.array(['deg', 'deg'])): sc = SkyCoord(1, 2, unit=unit) assert allclose(sc.ra, Angle(1 * u.deg)) assert allclose(sc.dec, Angle(2 * u.deg)) for unit in ('hourangle', 'hourangle,hourangle', ' hourangle , hourangle ', u.hourangle, [u.hourangle, u.hourangle]): sc = SkyCoord(1, 2, unit=unit) assert allclose(sc.ra, Angle(15 * u.deg)) assert allclose(sc.dec, Angle(30 * u.deg)) for unit in ('hourangle,deg', (u.hourangle, u.deg)): sc = SkyCoord(1, 2, unit=unit) assert allclose(sc.ra, Angle(15 * u.deg)) assert allclose(sc.dec, Angle(2 * u.deg)) for unit in ('deg,deg,deg,deg', [u.deg, u.deg, u.deg, u.deg], None): with pytest.raises(ValueError) as err: SkyCoord(1, 2, unit=unit) assert 'Unit keyword must have one to three unit values' in str(err) for unit in ('m', (u.m, u.deg), ''): with pytest.raises(u.UnitsError) as err: SkyCoord(1, 2, unit=unit) def test_coord_init_list(): """ Spherical or Cartesian representation input coordinates. """ sc = SkyCoord([('1d', '2d'), (1 * u.deg, 2 * u.deg), '1d 2d', ('1°', '2°'), '1° 2°'], unit='deg') assert allclose(sc.ra, Angle('1d')) assert allclose(sc.dec, Angle('2d')) with pytest.raises(ValueError) as err: SkyCoord(['1d 2d 3d']) assert "Cannot parse first argument data" in str(err) with pytest.raises(ValueError) as err: SkyCoord([('1d', '2d', '3d')]) assert "Cannot parse first argument data" in str(err) sc = SkyCoord([1 * u.deg, 1 * u.deg], [2 * u.deg, 2 * u.deg]) assert allclose(sc.ra, Angle('1d')) assert allclose(sc.dec, Angle('2d')) with pytest.raises(ValueError) as err: SkyCoord([1 * u.deg, 2 * u.deg]) # this list is taken as RA w/ missing dec assert "One or more elements of input sequence does not have a length" in str(err) def test_coord_init_array(): """ Input in the form of a list array or numpy array """ for a in (['1 2', '3 4'], [['1', '2'], ['3', '4']], [[1, 2], [3, 4]]): sc = SkyCoord(a, unit='deg') assert allclose(sc.ra - [1, 3] * u.deg, 0 * u.deg) assert allclose(sc.dec - [2, 4] * u.deg, 0 * u.deg) sc = SkyCoord(np.array(a), unit='deg') assert allclose(sc.ra - [1, 3] * u.deg, 0 * u.deg) assert allclose(sc.dec - [2, 4] * u.deg, 0 * u.deg) def test_coord_init_representation(): """ Spherical or Cartesian represenation input coordinates. """ coord = SphericalRepresentation(lon=8 * u.deg, lat=5 * u.deg, distance=1 * u.kpc) sc = SkyCoord(coord, frame='icrs') assert allclose(sc.ra, coord.lon) assert allclose(sc.dec, coord.lat) assert allclose(sc.distance, coord.distance) with pytest.raises(ValueError) as err: SkyCoord(coord, frame='icrs', ra='1d') assert "conflicts with keyword argument 'ra'" in str(err) coord = CartesianRepresentation(1 * u.one, 2 * u.one, 3 * u.one) sc = SkyCoord(coord, frame='icrs') sc_cart = sc.represent_as(CartesianRepresentation) assert allclose(sc_cart.x, 1.0) assert allclose(sc_cart.y, 2.0) assert allclose(sc_cart.z, 3.0) def test_frame_init(): """ Different ways of providing the frame. """ sc = SkyCoord(RA, DEC, frame='icrs') assert sc.frame.name == 'icrs' sc = SkyCoord(RA, DEC, frame=ICRS) assert sc.frame.name == 'icrs' sc = SkyCoord(sc) assert sc.frame.name == 'icrs' sc = SkyCoord(C_ICRS) assert sc.frame.name == 'icrs' SkyCoord(C_ICRS, frame='icrs') assert sc.frame.name == 'icrs' with pytest.raises(ValueError) as err: SkyCoord(C_ICRS, frame='galactic') assert 'Cannot override frame=' in str(err) def test_attr_inheritance(): """ When initializing from an existing coord the representation attrs like equinox should be inherited to the SkyCoord. If there is a conflict then raise an exception. """ sc = SkyCoord(1, 2, frame='icrs', unit='deg', equinox='J1999', obstime='J2001') sc2 = SkyCoord(sc) assert sc2.equinox == sc.equinox assert sc2.obstime == sc.obstime assert allclose(sc2.ra, sc.ra) assert allclose(sc2.dec, sc.dec) assert allclose(sc2.distance, sc.distance) sc2 = SkyCoord(sc.frame) # Doesn't have equinox there so we get FK4 defaults assert sc2.equinox != sc.equinox assert sc2.obstime != sc.obstime assert allclose(sc2.ra, sc.ra) assert allclose(sc2.dec, sc.dec) assert allclose(sc2.distance, sc.distance) sc = SkyCoord(1, 2, frame='fk4', unit='deg', equinox='J1999', obstime='J2001') sc2 = SkyCoord(sc) assert sc2.equinox == sc.equinox assert sc2.obstime == sc.obstime assert allclose(sc2.ra, sc.ra) assert allclose(sc2.dec, sc.dec) assert allclose(sc2.distance, sc.distance) sc2 = SkyCoord(sc.frame) # sc.frame has equinox, obstime assert sc2.equinox == sc.equinox assert sc2.obstime == sc.obstime assert allclose(sc2.ra, sc.ra) assert allclose(sc2.dec, sc.dec) assert allclose(sc2.distance, sc.distance) def test_attr_conflicts(): """ Check conflicts resolution between coordinate attributes and init kwargs. """ sc = SkyCoord(1, 2, frame='icrs', unit='deg', equinox='J1999', obstime='J2001') # OK if attrs both specified but with identical values SkyCoord(sc, equinox='J1999', obstime='J2001') # OK because sc.frame doesn't have obstime SkyCoord(sc.frame, equinox='J1999', obstime='J2100') # Not OK if attrs don't match with pytest.raises(ValueError) as err: SkyCoord(sc, equinox='J1999', obstime='J2002') assert "Coordinate attribute 'obstime'=" in str(err) # Same game but with fk4 which has equinox and obstime frame attrs sc = SkyCoord(1, 2, frame='fk4', unit='deg', equinox='J1999', obstime='J2001') # OK if attrs both specified but with identical values SkyCoord(sc, equinox='J1999', obstime='J2001') # Not OK if SkyCoord attrs don't match with pytest.raises(ValueError) as err: SkyCoord(sc, equinox='J1999', obstime='J2002') assert "Frame attribute 'obstime' has conflicting" in str(err) # Not OK because sc.frame has different attrs with pytest.raises(ValueError) as err: SkyCoord(sc.frame, equinox='J1999', obstime='J2002') assert "Frame attribute 'obstime' has conflicting" in str(err) def test_frame_attr_getattr(): """ When accessing frame attributes like equinox, the value should come from self.frame when that object has the relevant attribute, otherwise from self. """ sc = SkyCoord(1, 2, frame='icrs', unit='deg', equinox='J1999', obstime='J2001') assert sc.equinox == 'J1999' # Just the raw value (not validated) assert sc.obstime == 'J2001' sc = SkyCoord(1, 2, frame='fk4', unit='deg', equinox='J1999', obstime='J2001') assert sc.equinox == Time('J1999') # Coming from the self.frame object assert sc.obstime == Time('J2001') sc = SkyCoord(1, 2, frame='fk4', unit='deg', equinox='J1999') assert sc.equinox == Time('J1999') assert sc.obstime == Time('J1999') def test_to_string(): """ Basic testing of converting SkyCoord to strings. This just tests for a single input coordinate and and 1-element list. It does not test the underlying `Angle.to_string` method itself. """ coord = '1h2m3s 1d2m3s' for wrap in (lambda x: x, lambda x: [x]): sc = SkyCoord(wrap(coord)) assert sc.to_string() == wrap('15.5125 1.03417') assert sc.to_string('dms') == wrap('15d30m45s 1d02m03s') assert sc.to_string('hmsdms') == wrap('01h02m03s +01d02m03s') with_kwargs = sc.to_string('hmsdms', precision=3, pad=True, alwayssign=True) assert with_kwargs == wrap('+01h02m03.000s +01d02m03.000s') def test_seps(): sc1 = SkyCoord(0 * u.deg, 1 * u.deg, frame='icrs') sc2 = SkyCoord(0 * u.deg, 2 * u.deg, frame='icrs') sep = sc1.separation(sc2) assert (sep - 1 * u.deg)/u.deg < 1e-10 with pytest.raises(ValueError): sc1.separation_3d(sc2) sc3 = SkyCoord(1 * u.deg, 1 * u.deg, distance=1 * u.kpc, frame='icrs') sc4 = SkyCoord(1 * u.deg, 1 * u.deg, distance=2 * u.kpc, frame='icrs') sep3d = sc3.separation_3d(sc4) assert sep3d == 1 * u.kpc def test_repr(): sc1 = SkyCoord(0 * u.deg, 1 * u.deg, frame='icrs') sc2 = SkyCoord(1 * u.deg, 1 * u.deg, frame='icrs', distance=1 * u.kpc) assert repr(sc1) == ('<SkyCoord (ICRS): (ra, dec) in deg\n' ' ({})>').format(' 0., 1.' if NUMPY_LT_1_14 else '0., 1.') assert repr(sc2) == ('<SkyCoord (ICRS): (ra, dec, distance) in (deg, deg, kpc)\n' ' ({})>').format(' 1., 1., 1.' if NUMPY_LT_1_14 else '1., 1., 1.') sc3 = SkyCoord(0.25 * u.deg, [1, 2.5] * u.deg, frame='icrs') assert repr(sc3).startswith('<SkyCoord (ICRS): (ra, dec) in deg\n') sc_default = SkyCoord(0 * u.deg, 1 * u.deg) assert repr(sc_default) == ('<SkyCoord (ICRS): (ra, dec) in deg\n' ' ({})>').format(' 0., 1.' if NUMPY_LT_1_14 else '0., 1.') @pytest.mark.remote_data def test_repr_altaz(): sc2 = SkyCoord(1 * u.deg, 1 * u.deg, frame='icrs', distance=1 * u.kpc) loc = EarthLocation(-2309223 * u.m, -3695529 * u.m, -4641767 * u.m) time = Time('2005-03-21 00:00:00') sc4 = sc2.transform_to(AltAz(location=loc, obstime=time)) assert repr(sc4).startswith("<SkyCoord (AltAz: obstime=2005-03-21 00:00:00.000, " "location=(-2309223., -3695529., " "-4641767.) m, pressure=0.0 hPa, " "temperature=0.0 deg_C, relative_humidity=0.0, " "obswl=1.0 micron): (az, alt, distance) in " "(deg, deg, m)\n") def test_ops(): """ Tests miscellaneous operations like `len` """ sc = SkyCoord(0 * u.deg, 1 * u.deg, frame='icrs') sc_arr = SkyCoord(0 * u.deg, [1, 2] * u.deg, frame='icrs') sc_empty = SkyCoord([] * u.deg, [] * u.deg, frame='icrs') assert sc.isscalar assert not sc_arr.isscalar assert not sc_empty.isscalar with pytest.raises(TypeError): len(sc) assert len(sc_arr) == 2 assert len(sc_empty) == 0 assert bool(sc) assert bool(sc_arr) assert not bool(sc_empty) assert sc_arr[0].isscalar assert len(sc_arr[:1]) == 1 # A scalar shouldn't be indexable with pytest.raises(TypeError): sc[0:] # but it should be possible to just get an item sc_item = sc[()] assert sc_item.shape == () # and to turn it into an array sc_1d = sc[np.newaxis] assert sc_1d.shape == (1,) with pytest.raises(TypeError): iter(sc) assert not isiterable(sc) assert isiterable(sc_arr) assert isiterable(sc_empty) it = iter(sc_arr) assert next(it).dec == sc_arr[0].dec assert next(it).dec == sc_arr[1].dec with pytest.raises(StopIteration): next(it) def test_none_transform(): """ Ensure that transforming from a SkyCoord with no frame provided works like ICRS """ sc = SkyCoord(0 * u.deg, 1 * u.deg) sc_arr = SkyCoord(0 * u.deg, [1, 2] * u.deg) sc2 = sc.transform_to(ICRS) assert sc.ra == sc2.ra and sc.dec == sc2.dec sc5 = sc.transform_to('fk5') assert sc5.ra == sc2.transform_to('fk5').ra sc_arr2 = sc_arr.transform_to(ICRS) sc_arr5 = sc_arr.transform_to('fk5') npt.assert_array_equal(sc_arr5.ra, sc_arr2.transform_to('fk5').ra) def test_position_angle(): c1 = SkyCoord(0*u.deg, 0*u.deg) c2 = SkyCoord(1*u.deg, 0*u.deg) assert_allclose(c1.position_angle(c2) - 90.0 * u.deg, 0*u.deg) c3 = SkyCoord(1*u.deg, 0.1*u.deg) assert c1.position_angle(c3) < 90*u.deg c4 = SkyCoord(0*u.deg, 1*u.deg) assert_allclose(c1.position_angle(c4), 0*u.deg) carr1 = SkyCoord(0*u.deg, [0, 1, 2]*u.deg) carr2 = SkyCoord([-1, -2, -3]*u.deg, [0.1, 1.1, 2.1]*u.deg) res = carr1.position_angle(carr2) assert res.shape == (3,) assert np.all(res < 360*u.degree) assert np.all(res > 270*u.degree) cicrs = SkyCoord(0*u.deg, 0*u.deg, frame='icrs') cfk5 = SkyCoord(1*u.deg, 0*u.deg, frame='fk5') # because of the frame transform, it's just a *bit* more than 90 degrees assert cicrs.position_angle(cfk5) > 90.0 * u.deg assert cicrs.position_angle(cfk5) < 91.0 * u.deg def test_position_angle_directly(): """Regression check for #3800: position_angle should accept floats.""" from astropy.coordinates.angle_utilities import position_angle result = position_angle(10., 20., 10., 20.) assert result.unit is u.radian assert result.value == 0. def test_sep_pa_equivalence(): """Regression check for bug in #5702. PA and separation from object 1 to 2 should be consistent with those from 2 to 1 """ cfk5 = SkyCoord(1*u.deg, 0*u.deg, frame='fk5') cfk5B1950 = SkyCoord(1*u.deg, 0*u.deg, frame='fk5', equinox='B1950') # test with both default and explicit equinox #5722 and #3106 sep_forward = cfk5.separation(cfk5B1950) sep_backward = cfk5B1950.separation(cfk5) assert sep_forward != 0 and sep_backward != 0 assert_allclose(sep_forward, sep_backward) posang_forward = cfk5.position_angle(cfk5B1950) posang_backward = cfk5B1950.position_angle(cfk5) assert posang_forward != 0 and posang_backward != 0 assert 179 < (posang_forward - posang_backward).wrap_at(360*u.deg).degree < 181 dcfk5 = SkyCoord(1*u.deg, 0*u.deg, frame='fk5', distance=1*u.pc) dcfk5B1950 = SkyCoord(1*u.deg, 0*u.deg, frame='fk5', equinox='B1950', distance=1.*u.pc) sep3d_forward = dcfk5.separation_3d(dcfk5B1950) sep3d_backward = dcfk5B1950.separation_3d(dcfk5) assert sep3d_forward != 0 and sep3d_backward != 0 assert_allclose(sep3d_forward, sep3d_backward) def test_directional_offset_by(): # Round-trip tests: where is sc2 from sc1? # Use those offsets from sc1 and verify you get to sc2. npoints = 7 # How many points when doing vectors of SkyCoords for sc1 in [SkyCoord(0*u.deg,-90*u.deg), # South pole SkyCoord(0 * u.deg, 90 * u.deg), # North pole SkyCoord(1*u.deg,2*u.deg), SkyCoord(np.linspace(0,359,npoints),np.linspace(-90, 90,npoints), unit=u.deg, frame='fk4'), SkyCoord(np.linspace(359,0,npoints),np.linspace(-90, 90,npoints), unit=u.deg, frame='icrs'), SkyCoord(np.linspace(-3,3,npoints),np.linspace(-90, 90,npoints), unit=(u.rad, u.deg), frame='barycentricmeanecliptic')]: for sc2 in [SkyCoord(5*u.deg,10*u.deg), SkyCoord(np.linspace(0, 359, npoints), np.linspace(-90, 90, npoints), unit=u.deg, frame='galactic')]: # Find the displacement from sc1 to sc2, posang = sc1.position_angle(sc2) sep = sc1.separation(sc2) # then do the offset from sc1 and verify that you are at sc2 sc2a = sc1.directional_offset_by(position_angle=posang, separation=sep) assert np.max(np.abs(sc2.separation(sc2a).arcsec)) < 1e-3 # Specific test cases # Go over the North pole a little way, and # over the South pole a long way, to get to same spot sc1 = SkyCoord(0*u.deg, 89*u.deg) for posang,sep in [(0*u.deg, 2*u.deg), (180*u.deg, 358*u.deg)]: sc2 = sc1.directional_offset_by(posang, sep) assert allclose([sc2.ra.degree, sc2.dec.degree], [180, 89]) # Go twice as far to ensure that dec is actually changing # and that >360deg is supported sc2 = sc1.directional_offset_by(posang, 2*sep) assert allclose([sc2.ra.degree, sc2.dec.degree], [180, 87]) # Verify that a separation of 180 deg in any direction gets to the antipode # and 360 deg returns to start sc1 = SkyCoord(10*u.deg, 47*u.deg) for posang in np.linspace(0, 377, npoints): sc2 = sc1.directional_offset_by(posang, 180*u.deg) assert allclose([sc2.ra.degree, sc2.dec.degree], [190, -47]) sc2 = sc1.directional_offset_by(posang, 360*u.deg) assert allclose([sc2.ra.degree, sc2.dec.degree], [10, 47]) # Verify that a 90 degree posang, which means East # corresponds to an increase in RA, by ~separation/cos(dec) and # a slight convergence to equator sc1 = SkyCoord(10*u.deg, 60*u.deg) sc2 = sc1.directional_offset_by(90*u.deg, 1.0*u.deg) assert 11.9 < sc2.ra.degree < 12.0 assert 59.9 < sc2.dec.degree < 60.0 def test_table_to_coord(): """ Checks "end-to-end" use of `Table` with `SkyCoord` - the `Quantity` initializer is the intermediary that translate the table columns into something coordinates understands. (Regression test for #1762 ) """ from astropy.table import Table, Column t = Table() t.add_column(Column(data=[1, 2, 3], name='ra', unit=u.deg)) t.add_column(Column(data=[4, 5, 6], name='dec', unit=u.deg)) c = SkyCoord(t['ra'], t['dec']) assert allclose(c.ra.to(u.deg), [1, 2, 3] * u.deg) assert allclose(c.dec.to(u.deg), [4, 5, 6] * u.deg) def assert_quantities_allclose(coord, q1s, attrs): """ Compare two tuples of quantities. This assumes that the values in q1 are of order(1) and uses atol=1e-13, rtol=0. It also asserts that the units of the two quantities are the *same*, in order to check that the representation output has the expected units. """ q2s = [getattr(coord, attr) for attr in attrs] assert len(q1s) == len(q2s) for q1, q2 in zip(q1s, q2s): assert q1.shape == q2.shape assert allclose(q1, q2, rtol=0, atol=1e-13 * q1.unit) # Sets of inputs corresponding to Galactic frame base_unit_attr_sets = [ ('spherical', u.karcsec, u.karcsec, u.kpc, Latitude, 'l', 'b', 'distance'), ('unitspherical', u.karcsec, u.karcsec, None, Latitude, 'l', 'b', None), ('physicsspherical', u.karcsec, u.karcsec, u.kpc, Angle, 'phi', 'theta', 'r'), ('cartesian', u.km, u.km, u.km, u.Quantity, 'u', 'v', 'w'), ('cylindrical', u.km, u.karcsec, u.km, Angle, 'rho', 'phi', 'z') ] units_attr_sets = [] for base_unit_attr_set in base_unit_attr_sets: repr_name = base_unit_attr_set[0] for representation in (repr_name, REPRESENTATION_CLASSES[repr_name]): for c1, c2, c3 in ((1, 2, 3), ([1], [2], [3])): for arrayify in True, False: if arrayify: c1 = np.array(c1) c2 = np.array(c2) c3 = np.array(c3) units_attr_sets.append(base_unit_attr_set + (representation, c1, c2, c3)) units_attr_args = ('repr_name', 'unit1', 'unit2', 'unit3', 'cls2', 'attr1', 'attr2', 'attr3', 'representation', 'c1', 'c2', 'c3') @pytest.mark.parametrize(units_attr_args, [x for x in units_attr_sets if x[0] != 'unitspherical']) def test_skycoord_three_components(repr_name, unit1, unit2, unit3, cls2, attr1, attr2, attr3, representation, c1, c2, c3): """ Tests positional inputs using components (COMP1, COMP2, COMP3) and various representations. Use weird units and Galactic frame. """ sc = SkyCoord(c1, c2, c3, unit=(unit1, unit2, unit3), representation_type=representation, frame=Galactic) assert_quantities_allclose(sc, (c1*unit1, c2*unit2, c3*unit3), (attr1, attr2, attr3)) sc = SkyCoord(1000*c1*u.Unit(unit1/1000), cls2(c2, unit=unit2), 1000*c3*u.Unit(unit3/1000), frame=Galactic, unit=(unit1, unit2, unit3), representation_type=representation) assert_quantities_allclose(sc, (c1*unit1, c2*unit2, c3*unit3), (attr1, attr2, attr3)) kwargs = {attr3: c3} sc = SkyCoord(c1, c2, unit=(unit1, unit2, unit3), frame=Galactic, representation_type=representation, **kwargs) assert_quantities_allclose(sc, (c1*unit1, c2*unit2, c3*unit3), (attr1, attr2, attr3)) kwargs = {attr1: c1, attr2: c2, attr3: c3} sc = SkyCoord(frame=Galactic, unit=(unit1, unit2, unit3), representation_type=representation, **kwargs) assert_quantities_allclose(sc, (c1*unit1, c2*unit2, c3*unit3), (attr1, attr2, attr3)) @pytest.mark.parametrize(units_attr_args, [x for x in units_attr_sets if x[0] in ('spherical', 'unitspherical')]) def test_skycoord_spherical_two_components(repr_name, unit1, unit2, unit3, cls2, attr1, attr2, attr3, representation, c1, c2, c3): """ Tests positional inputs using components (COMP1, COMP2) for spherical representations. Use weird units and Galactic frame. """ sc = SkyCoord(c1, c2, unit=(unit1, unit2), frame=Galactic, representation_type=representation) assert_quantities_allclose(sc, (c1*unit1, c2*unit2), (attr1, attr2)) sc = SkyCoord(1000*c1*u.Unit(unit1/1000), cls2(c2, unit=unit2), frame=Galactic, unit=(unit1, unit2, unit3), representation_type=representation) assert_quantities_allclose(sc, (c1*unit1, c2*unit2), (attr1, attr2)) kwargs = {attr1: c1, attr2: c2} sc = SkyCoord(frame=Galactic, unit=(unit1, unit2), representation_type=representation, **kwargs) assert_quantities_allclose(sc, (c1*unit1, c2*unit2), (attr1, attr2)) @pytest.mark.parametrize(units_attr_args, [x for x in units_attr_sets if x[0] != 'unitspherical']) def test_galactic_three_components(repr_name, unit1, unit2, unit3, cls2, attr1, attr2, attr3, representation, c1, c2, c3): """ Tests positional inputs using components (COMP1, COMP2, COMP3) and various representations. Use weird units and Galactic frame. """ sc = Galactic(1000*c1*u.Unit(unit1/1000), cls2(c2, unit=unit2), 1000*c3*u.Unit(unit3/1000), representation_type=representation) assert_quantities_allclose(sc, (c1*unit1, c2*unit2, c3*unit3), (attr1, attr2, attr3)) kwargs = {attr3: c3*unit3} sc = Galactic(c1*unit1, c2*unit2, representation_type=representation, **kwargs) assert_quantities_allclose(sc, (c1*unit1, c2*unit2, c3*unit3), (attr1, attr2, attr3)) kwargs = {attr1: c1*unit1, attr2: c2*unit2, attr3: c3*unit3} sc = Galactic(representation_type=representation, **kwargs) assert_quantities_allclose(sc, (c1*unit1, c2*unit2, c3*unit3), (attr1, attr2, attr3)) @pytest.mark.parametrize(units_attr_args, [x for x in units_attr_sets if x[0] in ('spherical', 'unitspherical')]) def test_galactic_spherical_two_components(repr_name, unit1, unit2, unit3, cls2, attr1, attr2, attr3, representation, c1, c2, c3): """ Tests positional inputs using components (COMP1, COMP2) for spherical representations. Use weird units and Galactic frame. """ sc = Galactic(1000*c1*u.Unit(unit1/1000), cls2(c2, unit=unit2), representation_type=representation) assert_quantities_allclose(sc, (c1*unit1, c2*unit2), (attr1, attr2)) sc = Galactic(c1*unit1, c2*unit2, representation_type=representation) assert_quantities_allclose(sc, (c1*unit1, c2*unit2), (attr1, attr2)) kwargs = {attr1: c1*unit1, attr2: c2*unit2} sc = Galactic(representation_type=representation, **kwargs) assert_quantities_allclose(sc, (c1*unit1, c2*unit2), (attr1, attr2)) @pytest.mark.parametrize(('repr_name', 'unit1', 'unit2', 'unit3', 'cls2', 'attr1', 'attr2', 'attr3'), [x for x in base_unit_attr_sets if x[0] != 'unitspherical']) def test_skycoord_coordinate_input(repr_name, unit1, unit2, unit3, cls2, attr1, attr2, attr3): c1, c2, c3 = 1, 2, 3 sc = SkyCoord([(c1, c2, c3)], unit=(unit1, unit2, unit3), representation_type=repr_name, frame='galactic') assert_quantities_allclose(sc, ([c1]*unit1, [c2]*unit2, [c3]*unit3), (attr1, attr2, attr3)) c1, c2, c3 = 1*unit1, 2*unit2, 3*unit3 sc = SkyCoord([(c1, c2, c3)], representation_type=repr_name, frame='galactic') assert_quantities_allclose(sc, ([1]*unit1, [2]*unit2, [3]*unit3), (attr1, attr2, attr3)) def test_skycoord_string_coordinate_input(): sc = SkyCoord('01 02 03 +02 03 04', unit='deg', representation_type='unitspherical') assert_quantities_allclose(sc, (Angle('01:02:03', unit='deg'), Angle('02:03:04', unit='deg')), ('ra', 'dec')) sc = SkyCoord(['01 02 03 +02 03 04'], unit='deg', representation_type='unitspherical') assert_quantities_allclose(sc, (Angle(['01:02:03'], unit='deg'), Angle(['02:03:04'], unit='deg')), ('ra', 'dec')) def test_units(): sc = SkyCoord(1, 2, 3, unit='m', representation_type='cartesian') # All get meters assert sc.x.unit is u.m assert sc.y.unit is u.m assert sc.z.unit is u.m sc = SkyCoord(1, 2*u.km, 3, unit='m', representation_type='cartesian') # All get u.m assert sc.x.unit is u.m assert sc.y.unit is u.m assert sc.z.unit is u.m sc = SkyCoord(1, 2, 3, unit=u.m, representation_type='cartesian') # All get u.m assert sc.x.unit is u.m assert sc.y.unit is u.m assert sc.z.unit is u.m sc = SkyCoord(1, 2, 3, unit='m, km, pc', representation_type='cartesian') assert_quantities_allclose(sc, (1*u.m, 2*u.km, 3*u.pc), ('x', 'y', 'z')) with pytest.raises(u.UnitsError) as err: SkyCoord(1, 2, 3, unit=(u.m, u.m), representation_type='cartesian') assert 'should have matching physical types' in str(err) SkyCoord(1, 2, 3, unit=(u.m, u.km, u.pc), representation_type='cartesian') assert_quantities_allclose(sc, (1*u.m, 2*u.km, 3*u.pc), ('x', 'y', 'z')) @pytest.mark.xfail def test_units_known_fail(): # should fail but doesn't => corner case oddity with pytest.raises(u.UnitsError): SkyCoord(1, 2, 3, unit=u.deg, representation_type='spherical') def test_nodata_failure(): with pytest.raises(ValueError): SkyCoord() @pytest.mark.parametrize(('mode', 'origin'), [('wcs', 0), ('all', 0), ('all', 1)]) def test_wcs_methods(mode, origin): from astropy.wcs import WCS from astropy.utils.data import get_pkg_data_contents from astropy.wcs.utils import pixel_to_skycoord header = get_pkg_data_contents('../../wcs/tests/data/maps/1904-66_TAN.hdr', encoding='binary') wcs = WCS(header) ref = SkyCoord(0.1 * u.deg, -89. * u.deg, frame='icrs') xp, yp = ref.to_pixel(wcs, mode=mode, origin=origin) # WCS is in FK5 so we need to transform back to ICRS new = pixel_to_skycoord(xp, yp, wcs, mode=mode, origin=origin).transform_to('icrs') assert_allclose(new.ra.degree, ref.ra.degree) assert_allclose(new.dec.degree, ref.dec.degree) # also try to round-trip with `from_pixel` scnew = SkyCoord.from_pixel(xp, yp, wcs, mode=mode, origin=origin).transform_to('icrs') assert_allclose(scnew.ra.degree, ref.ra.degree) assert_allclose(scnew.dec.degree, ref.dec.degree) # Also make sure the right type comes out class SkyCoord2(SkyCoord): pass scnew2 = SkyCoord2.from_pixel(xp, yp, wcs, mode=mode, origin=origin) assert scnew.__class__ is SkyCoord assert scnew2.__class__ is SkyCoord2 def test_frame_attr_transform_inherit(): """ Test that frame attributes get inherited as expected during transform. Driven by #3106. """ c = SkyCoord(1 * u.deg, 2 * u.deg, frame=FK5) c2 = c.transform_to(FK4) assert c2.equinox.value == 'B1950.000' assert c2.obstime.value == 'B1950.000' c2 = c.transform_to(FK4(equinox='J1975', obstime='J1980')) assert c2.equinox.value == 'J1975.000' assert c2.obstime.value == 'J1980.000' c = SkyCoord(1 * u.deg, 2 * u.deg, frame=FK4) c2 = c.transform_to(FK5) assert c2.equinox.value == 'J2000.000' assert c2.obstime is None c = SkyCoord(1 * u.deg, 2 * u.deg, frame=FK4, obstime='J1980') c2 = c.transform_to(FK5) assert c2.equinox.value == 'J2000.000' assert c2.obstime.value == 'J1980.000' c = SkyCoord(1 * u.deg, 2 * u.deg, frame=FK4, equinox='J1975', obstime='J1980') c2 = c.transform_to(FK5) assert c2.equinox.value == 'J1975.000' assert c2.obstime.value == 'J1980.000' c2 = c.transform_to(FK5(equinox='J1990')) assert c2.equinox.value == 'J1990.000' assert c2.obstime.value == 'J1980.000' # The work-around for #5722 c = SkyCoord(1 * u.deg, 2 * u.deg, frame='fk5') c1 = SkyCoord(1 * u.deg, 2 * u.deg, frame='fk5', equinox='B1950.000') c2 = c1.transform_to(c) assert not c2.is_equivalent_frame(c) # counterintuitive, but documented assert c2.equinox.value == 'B1950.000' c3 = c1.transform_to(c, merge_attributes=False) assert c3.equinox.value == 'J2000.000' assert c3.is_equivalent_frame(c) def test_deepcopy(): c1 = SkyCoord(1 * u.deg, 2 * u.deg) c2 = copy.copy(c1) c3 = copy.deepcopy(c1) c4 = SkyCoord([1, 2] * u.m, [2, 3] * u.m, [3, 4] * u.m, representation_type='cartesian', frame='fk5', obstime='J1999.9', equinox='J1988.8') c5 = copy.deepcopy(c4) assert np.all(c5.x == c4.x) # and y and z assert c5.frame.name == c4.frame.name assert c5.obstime == c4.obstime assert c5.equinox == c4.equinox assert c5.representation_type == c4.representation_type def test_no_copy(): c1 = SkyCoord(np.arange(10.) * u.hourangle, np.arange(20., 30.) * u.deg) c2 = SkyCoord(c1, copy=False) # Note: c1.ra and c2.ra will *not* share memory, as these are recalculated # to be in "preferred" units. See discussion in #4883. assert np.may_share_memory(c1.data.lon, c2.data.lon) c3 = SkyCoord(c1, copy=True) assert not np.may_share_memory(c1.data.lon, c3.data.lon) def test_immutable(): c1 = SkyCoord(1 * u.deg, 2 * u.deg) with pytest.raises(AttributeError): c1.ra = 3.0 c1.foo = 42 assert c1.foo == 42 @pytest.mark.skipif(str('not HAS_SCIPY')) @pytest.mark.skipif(str('OLDER_SCIPY')) def test_search_around(): """ Test the search_around_* methods Here we don't actually test the values are right, just that the methods of SkyCoord work. The accuracy tests are in ``test_matching.py`` """ from astropy.utils import NumpyRNGContext with NumpyRNGContext(987654321): sc1 = SkyCoord(np.random.rand(20) * 360.*u.degree, (np.random.rand(20) * 180. - 90.)*u.degree) sc2 = SkyCoord(np.random.rand(100) * 360. * u.degree, (np.random.rand(100) * 180. - 90.)*u.degree) sc1ds = SkyCoord(ra=sc1.ra, dec=sc1.dec, distance=np.random.rand(20)*u.kpc) sc2ds = SkyCoord(ra=sc2.ra, dec=sc2.dec, distance=np.random.rand(100)*u.kpc) idx1_sky, idx2_sky, d2d_sky, d3d_sky = sc1.search_around_sky(sc2, 10*u.deg) idx1_3d, idx2_3d, d2d_3d, d3d_3d = sc1ds.search_around_3d(sc2ds, 250*u.pc) def test_init_with_frame_instance_keyword(): # Frame instance c1 = SkyCoord(3 * u.deg, 4 * u.deg, frame=FK5(equinox='J2010')) assert c1.equinox == Time('J2010') # Frame instance with data (data gets ignored) c2 = SkyCoord(3 * u.deg, 4 * u.deg, frame=FK5(1. * u.deg, 2 * u.deg, equinox='J2010')) assert c2.equinox == Time('J2010') assert allclose(c2.ra.degree, 3) assert allclose(c2.dec.degree, 4) # SkyCoord instance c3 = SkyCoord(3 * u.deg, 4 * u.deg, frame=c1) assert c3.equinox == Time('J2010') # Check duplicate arguments with pytest.raises(ValueError) as err: c = SkyCoord(3 * u.deg, 4 * u.deg, frame=FK5(equinox='J2010'), equinox='J2001') assert "Cannot specify frame attribute 'equinox'" in str(err) def test_guess_from_table(): from astropy.table import Table, Column from astropy.utils import NumpyRNGContext tab = Table() with NumpyRNGContext(987654321): tab.add_column(Column(data=np.random.rand(1000), unit='deg', name='RA[J2000]')) tab.add_column(Column(data=np.random.rand(1000), unit='deg', name='DEC[J2000]')) sc = SkyCoord.guess_from_table(tab) npt.assert_array_equal(sc.ra.deg, tab['RA[J2000]']) npt.assert_array_equal(sc.dec.deg, tab['DEC[J2000]']) # try without units in the table tab['RA[J2000]'].unit = None tab['DEC[J2000]'].unit = None # should fail if not given explicitly with pytest.raises(u.UnitsError): sc2 = SkyCoord.guess_from_table(tab) # but should work if provided sc2 = SkyCoord.guess_from_table(tab, unit=u.deg) npt.assert_array_equal(sc.ra.deg, tab['RA[J2000]']) npt.assert_array_equal(sc.dec.deg, tab['DEC[J2000]']) # should fail if two options are available - ambiguity bad! tab.add_column(Column(data=np.random.rand(1000), name='RA_J1900')) with pytest.raises(ValueError) as excinfo: sc3 = SkyCoord.guess_from_table(tab, unit=u.deg) assert 'J1900' in excinfo.value.args[0] and 'J2000' in excinfo.value.args[0] # should also fail if user specifies something already in the table, but # should succeed even if the user has to give one of the components tab.remove_column('RA_J1900') with pytest.raises(ValueError): sc3 = SkyCoord.guess_from_table(tab, ra=tab['RA[J2000]'], unit=u.deg) oldra = tab['RA[J2000]'] tab.remove_column('RA[J2000]') sc3 = SkyCoord.guess_from_table(tab, ra=oldra, unit=u.deg) npt.assert_array_equal(sc3.ra.deg, oldra) npt.assert_array_equal(sc3.dec.deg, tab['DEC[J2000]']) # check a few non-ICRS/spherical systems x, y, z = np.arange(3).reshape(3, 1) * u.pc l, b = np.arange(2).reshape(2, 1) * u.deg tabcart = Table([x, y, z], names=('x', 'y', 'z')) tabgal = Table([b, l], names=('b', 'l')) sc_cart = SkyCoord.guess_from_table(tabcart, representation_type='cartesian') npt.assert_array_equal(sc_cart.x, x) npt.assert_array_equal(sc_cart.y, y) npt.assert_array_equal(sc_cart.z, z) sc_gal = SkyCoord.guess_from_table(tabgal, frame='galactic') npt.assert_array_equal(sc_gal.l, l) npt.assert_array_equal(sc_gal.b, b) # also try some column names that *end* with the attribute name tabgal['b'].name = 'gal_b' tabgal['l'].name = 'gal_l' SkyCoord.guess_from_table(tabgal, frame='galactic') tabgal['gal_b'].name = 'blob' tabgal['gal_l'].name = 'central' with pytest.raises(ValueError): SkyCoord.guess_from_table(tabgal, frame='galactic') def test_skycoord_list_creation(): """ Test that SkyCoord can be created in a reasonable way with lists of SkyCoords (regression for #2702) """ sc = SkyCoord(ra=[1, 2, 3]*u.deg, dec=[4, 5, 6]*u.deg) sc0 = sc[0] sc2 = sc[2] scnew = SkyCoord([sc0, sc2]) assert np.all(scnew.ra == [1, 3]*u.deg) assert np.all(scnew.dec == [4, 6]*u.deg) # also check ranges sc01 = sc[:2] scnew2 = SkyCoord([sc01, sc2]) assert np.all(scnew2.ra == sc.ra) assert np.all(scnew2.dec == sc.dec) # now try with a mix of skycoord, frame, and repr objects frobj = ICRS(2*u.deg, 5*u.deg) reprobj = UnitSphericalRepresentation(3*u.deg, 6*u.deg) scnew3 = SkyCoord([sc0, frobj, reprobj]) assert np.all(scnew3.ra == sc.ra) assert np.all(scnew3.dec == sc.dec) # should *fail* if different frame attributes or types are passed in scfk5_j2000 = SkyCoord(1*u.deg, 4*u.deg, frame='fk5') with pytest.raises(ValueError): SkyCoord([sc0, scfk5_j2000]) scfk5_j2010 = SkyCoord(1*u.deg, 4*u.deg, frame='fk5', equinox='J2010') with pytest.raises(ValueError): SkyCoord([scfk5_j2000, scfk5_j2010]) # but they should inherit if they're all consistent scfk5_2_j2010 = SkyCoord(2*u.deg, 5*u.deg, frame='fk5', equinox='J2010') scfk5_3_j2010 = SkyCoord(3*u.deg, 6*u.deg, frame='fk5', equinox='J2010') scnew4 = SkyCoord([scfk5_j2010, scfk5_2_j2010, scfk5_3_j2010]) assert np.all(scnew4.ra == sc.ra) assert np.all(scnew4.dec == sc.dec) assert scnew4.equinox == Time('J2010') def test_nd_skycoord_to_string(): c = SkyCoord(np.ones((2, 2)), 1, unit=('deg', 'deg')) ts = c.to_string() assert np.all(ts.shape == c.shape) assert np.all(ts == u'1 1') def test_equiv_skycoord(): sci1 = SkyCoord(1*u.deg, 2*u.deg, frame='icrs') sci2 = SkyCoord(1*u.deg, 3*u.deg, frame='icrs') assert sci1.is_equivalent_frame(sci1) assert sci1.is_equivalent_frame(sci2) assert sci1.is_equivalent_frame(ICRS()) assert not sci1.is_equivalent_frame(FK5()) with pytest.raises(TypeError): sci1.is_equivalent_frame(10) scf1 = SkyCoord(1*u.deg, 2*u.deg, frame='fk5') scf2 = SkyCoord(1*u.deg, 2*u.deg, frame='fk5', equinox='J2005') # obstime is *not* an FK5 attribute, but we still want scf1 and scf3 to come # to come out different because they're part of SkyCoord scf3 = SkyCoord(1*u.deg, 2*u.deg, frame='fk5', obstime='J2005') assert scf1.is_equivalent_frame(scf1) assert not scf1.is_equivalent_frame(sci1) assert scf1.is_equivalent_frame(FK5()) assert not scf1.is_equivalent_frame(scf2) assert scf2.is_equivalent_frame(FK5(equinox='J2005')) assert not scf3.is_equivalent_frame(scf1) assert not scf3.is_equivalent_frame(FK5(equinox='J2005')) def test_constellations(): # the actual test for accuracy is in test_funcs - this is just meant to make # sure we get sensible answers sc = SkyCoord(135*u.deg, 65*u.deg) assert sc.get_constellation() == 'Ursa Major' assert sc.get_constellation(short_name=True) == 'UMa' scs = SkyCoord([135]*2*u.deg, [65]*2*u.deg) npt.assert_equal(scs.get_constellation(), ['Ursa Major']*2) npt.assert_equal(scs.get_constellation(short_name=True), ['UMa']*2) @pytest.mark.remote_data def test_constellations_with_nameresolve(): assert SkyCoord.from_name('And I').get_constellation(short_name=True) == 'And' # you'd think "And ..." should be in Andromeda. But you'd be wrong. assert SkyCoord.from_name('And VI').get_constellation() == 'Pegasus' # maybe it's because And VI isn't really a galaxy? assert SkyCoord.from_name('And XXII').get_constellation() == 'Pisces' assert SkyCoord.from_name('And XXX').get_constellation() == 'Cassiopeia' # ok maybe not # ok, but at least some of the others do make sense... assert SkyCoord.from_name('Coma Cluster').get_constellation(short_name=True) == 'Com' assert SkyCoord.from_name('Orion Nebula').get_constellation() == 'Orion' assert SkyCoord.from_name('Triangulum Galaxy').get_constellation() == 'Triangulum' def test_getitem_representation(): """ Make sure current representation survives __getitem__ even if different from data representation. """ sc = SkyCoord([1, 1] * u.deg, [2, 2] * u.deg) sc.representation_type = 'cartesian' assert sc[0].representation_type is CartesianRepresentation def test_spherical_offsets(): i00 = SkyCoord(0*u.arcmin, 0*u.arcmin, frame='icrs') i01 = SkyCoord(0*u.arcmin, 1*u.arcmin, frame='icrs') i10 = SkyCoord(1*u.arcmin, 0*u.arcmin, frame='icrs') i11 = SkyCoord(1*u.arcmin, 1*u.arcmin, frame='icrs') i22 = SkyCoord(2*u.arcmin, 2*u.arcmin, frame='icrs') dra, ddec = i00.spherical_offsets_to(i01) assert_allclose(dra, 0*u.arcmin) assert_allclose(ddec, 1*u.arcmin) dra, ddec = i00.spherical_offsets_to(i10) assert_allclose(dra, 1*u.arcmin) assert_allclose(ddec, 0*u.arcmin) dra, ddec = i10.spherical_offsets_to(i01) assert_allclose(dra, -1*u.arcmin) assert_allclose(ddec, 1*u.arcmin) dra, ddec = i11.spherical_offsets_to(i22) assert_allclose(ddec, 1*u.arcmin) assert 0*u.arcmin < dra < 1*u.arcmin fk5 = SkyCoord(0*u.arcmin, 0*u.arcmin, frame='fk5') with pytest.raises(ValueError): # different frames should fail i00.spherical_offsets_to(fk5) i1deg = ICRS(1*u.deg, 1*u.deg) dra, ddec = i00.spherical_offsets_to(i1deg) assert_allclose(dra, 1*u.deg) assert_allclose(ddec, 1*u.deg) # make sure an abbreviated array-based version of the above also works i00s = SkyCoord([0]*4*u.arcmin, [0]*4*u.arcmin, frame='icrs') i01s = SkyCoord([0]*4*u.arcmin, np.arange(4)*u.arcmin, frame='icrs') dra, ddec = i00s.spherical_offsets_to(i01s) assert_allclose(dra, 0*u.arcmin) assert_allclose(ddec, np.arange(4)*u.arcmin) def test_frame_attr_changes(): """ This tests the case where a frame is added with a new frame attribute after a SkyCoord has been created. This is necessary because SkyCoords get the attributes set at creation time, but the set of attributes can change as frames are added or removed from the transform graph. This makes sure that everything continues to work consistently. """ sc_before = SkyCoord(1*u.deg, 2*u.deg, frame='icrs') assert 'fakeattr' not in dir(sc_before) class FakeFrame(BaseCoordinateFrame): fakeattr = Attribute() # doesn't matter what this does as long as it just puts the frame in the # transform graph transset = (ICRS, FakeFrame, lambda c, f: c) frame_transform_graph.add_transform(*transset) try: assert 'fakeattr' in dir(sc_before) assert sc_before.fakeattr is None sc_after1 = SkyCoord(1*u.deg, 2*u.deg, frame='icrs') assert 'fakeattr' in dir(sc_after1) assert sc_after1.fakeattr is None sc_after2 = SkyCoord(1*u.deg, 2*u.deg, frame='icrs', fakeattr=1) assert sc_after2.fakeattr == 1 finally: frame_transform_graph.remove_transform(*transset) assert 'fakeattr' not in dir(sc_before) assert 'fakeattr' not in dir(sc_after1) assert 'fakeattr' not in dir(sc_after2) def test_cache_clear_sc(): from astropy.coordinates import SkyCoord i = SkyCoord(1*u.deg, 2*u.deg) # Add an in frame units version of the rep to the cache. repr(i) assert len(i.cache['representation']) == 2 i.cache.clear() assert len(i.cache['representation']) == 0 def test_set_attribute_exceptions(): """Ensure no attrbute for any frame can be set directly. Though it is fine if the current frame does not have it.""" sc = SkyCoord(1.*u.deg, 2.*u.deg, frame='fk5') assert hasattr(sc.frame, 'equinox') with pytest.raises(AttributeError): sc.equinox = 'B1950' assert sc.relative_humidity is None sc.relative_humidity = 0.5 assert sc.relative_humidity == 0.5 assert not hasattr(sc.frame, 'relative_humidity') def test_extra_attributes(): """Ensure any extra attributes are dealt with correctly. Regression test against #5743. """ obstime_string = ['2017-01-01T00:00', '2017-01-01T00:10'] obstime = Time(obstime_string) sc = SkyCoord([5, 10], [20, 30], unit=u.deg, obstime=obstime_string) assert not hasattr(sc.frame, 'obstime') assert type(sc.obstime) is Time assert sc.obstime.shape == (2,) assert np.all(sc.obstime == obstime) # ensure equivalency still works for more than one obstime. assert sc.is_equivalent_frame(sc) sc_1 = sc[1] assert sc_1.obstime == obstime[1] # Transforming to FK4 should use sc.obstime. sc_fk4 = sc.transform_to('fk4') assert np.all(sc_fk4.frame.obstime == obstime) # And transforming back should not loose it. sc2 = sc_fk4.transform_to('icrs') assert not hasattr(sc2.frame, 'obstime') assert np.all(sc2.obstime == obstime) # Ensure obstime get taken from the SkyCoord if passed in directly. # (regression test for #5749). sc3 = SkyCoord([0., 1.], [2., 3.], unit='deg', frame=sc) assert np.all(sc3.obstime == obstime) # Finally, check that we can delete such attributes. del sc3.obstime assert sc3.obstime is None def test_apply_space_motion(): # use this 12 year period because it's a multiple of 4 to avoid the quirks # of leap years while having 2 leap seconds in it t1 = Time('2000-01-01T00:00') t2 = Time('2012-01-01T00:00') # Check a very simple case first: frame = ICRS(ra=10.*u.deg, dec=0*u.deg, distance=10.*u.pc, pm_ra_cosdec=0.1*u.deg/u.yr, pm_dec=0*u.mas/u.yr, radial_velocity=0*u.km/u.s) # Cases that should work (just testing input for now): c1 = SkyCoord(frame, obstime=t1, pressure=101*u.kPa) applied1 = c1.apply_space_motion(new_obstime=t2) applied2 = c1.apply_space_motion(dt=12*u.year) assert isinstance(applied1.frame, c1.frame.__class__) assert isinstance(applied2.frame, c1.frame.__class__) assert_allclose(applied1.ra, applied2.ra) assert_allclose(applied1.pm_ra, applied2.pm_ra) assert_allclose(applied1.dec, applied2.dec) assert_allclose(applied1.distance, applied2.distance) # ensure any frame attributes that were there before get passed through assert applied1.pressure == c1.pressure # there were 2 leap seconds between 2000 and 2010, so the difference in # the two forms of time evolution should be ~2 sec adt = np.abs(applied2.obstime - applied1.obstime) assert 1.9*u.second < adt.to(u.second) < 2.1*u.second c2 = SkyCoord(frame) applied3 = c2.apply_space_motion(dt=6*u.year) assert isinstance(applied3.frame, c1.frame.__class__) assert applied3.obstime is None # this should *not* be .6 deg due to space-motion on a sphere, but it # should be fairly close assert 0.5*u.deg < applied3.ra-c1.ra < .7*u.deg # the two cases should only match somewhat due to it being space motion, but # they should be at least this close assert quantity_allclose(applied1.ra-c1.ra, (applied3.ra-c1.ra)*2, atol=1e-3*u.deg) # but *not* this close assert not quantity_allclose(applied1.ra-c1.ra, (applied3.ra-c1.ra)*2, atol=1e-4*u.deg) with pytest.raises(ValueError): c2.apply_space_motion(new_obstime=t2) def test_custom_frame_skycoord(): # also regression check for the case from #7069 class BlahBleeBlopFrame(BaseCoordinateFrame): default_representation = SphericalRepresentation # without a differential, SkyCoord creation fails # default_differential = SphericalDifferential _frame_specific_representation_info = { 'spherical': [ RepresentationMapping('lon', 'lon', 'recommended'), RepresentationMapping('lat', 'lat', 'recommended'), RepresentationMapping('distance', 'radius', 'recommended') ] } SkyCoord(lat=1*u.deg, lon=2*u.deg, frame=BlahBleeBlopFrame) def test_user_friendly_pm_error(): """ This checks that a more user-friendly error message is raised for the user if they pass, e.g., pm_ra instead of pm_ra_cosdec """ with pytest.raises(ValueError) as e: SkyCoord(ra=150*u.deg, dec=-11*u.deg, pm_ra=100*u.mas/u.yr, pm_dec=10*u.mas/u.yr) assert 'pm_ra_cosdec' in str(e.value) with pytest.raises(ValueError) as e: SkyCoord(l=150*u.deg, b=-11*u.deg, pm_l=100*u.mas/u.yr, pm_b=10*u.mas/u.yr, frame='galactic') assert 'pm_l_cosb' in str(e.value) # The special error should not turn on here: with pytest.raises(ValueError) as e: SkyCoord(x=1*u.pc, y=2*u.pc, z=3*u.pc, pm_ra=100*u.mas/u.yr, pm_dec=10*u.mas/u.yr, representation_type='cartesian') assert 'pm_ra_cosdec' not in str(e.value) def test_contained_by(): """ Test Skycoord.contained(wcs,image) """ header = """ WCSAXES = 2 / Number of coordinate axes CRPIX1 = 1045.0 / Pixel coordinate of reference point CRPIX2 = 1001.0 / Pixel coordinate of reference point PC1_1 = -0.00556448550786 / Coordinate transformation matrix element PC1_2 = -0.001042120133257 / Coordinate transformation matrix element PC2_1 = 0.001181477028705 / Coordinate transformation matrix element PC2_2 = -0.005590809742987 / Coordinate transformation matrix element CDELT1 = 1.0 / [deg] Coordinate increment at reference point CDELT2 = 1.0 / [deg] Coordinate increment at reference point CUNIT1 = 'deg' / Units of coordinate increment and value CUNIT2 = 'deg' / Units of coordinate increment and value CTYPE1 = 'RA---TAN' / TAN (gnomonic) projection + SIP distortions CTYPE2 = 'DEC--TAN' / TAN (gnomonic) projection + SIP distortions CRVAL1 = 250.34971683647 / [deg] Coordinate value at reference point CRVAL2 = 2.2808772582495 / [deg] Coordinate value at reference point LONPOLE = 180.0 / [deg] Native longitude of celestial pole LATPOLE = 2.2808772582495 / [deg] Native latitude of celestial pole RADESYS = 'ICRS' / Equatorial coordinate system MJD-OBS = 58612.339199259 / [d] MJD of observation matching DATE-OBS DATE-OBS= '2019-05-09T08:08:26.816Z' / ISO-8601 observation date matching MJD-OB NAXIS = 2 / NAXIS NAXIS1 = 2136 / length of first array dimension NAXIS2 = 2078 / length of second array dimension """ header = fits.Header.fromstring(header.strip(),'\n') test_wcs = WCS(header) coord = SkyCoord(254,2,unit='deg') assert coord.contained_by(test_wcs) == True coord = SkyCoord(240,2,unit='deg') assert coord.contained_by(test_wcs) == False img = np.zeros((2136,2078)) coord = SkyCoord(250,2,unit='deg') assert coord.contained_by(test_wcs, img) == True coord = SkyCoord(240,2,unit='deg') assert coord.contained_by(test_wcs, img) == False ra = np.array([254.2, 254.1]) dec = np.array([2, 12.1]) coords = SkyCoord(ra, dec, unit='deg') assert np.all(test_wcs.footprint_contains(coords) == np.array([True, False])) def test_none_differential_type(): """ This is a regression test for #8021 """ from astropy.coordinates import BaseCoordinateFrame class MockHeliographicStonyhurst(BaseCoordinateFrame): default_representation = SphericalRepresentation frame_specific_representation_info = { SphericalRepresentation: [RepresentationMapping(reprname='lon', framename='lon', defaultunit=u.deg), RepresentationMapping(reprname='lat', framename='lat', defaultunit=u.deg), RepresentationMapping(reprname='distance', framename='radius', defaultunit=None)] } fr = MockHeliographicStonyhurst(lon=1*u.deg, lat=2*u.deg, radius=10*u.au) SkyCoord(0*u.deg, fr.lat, fr.radius, frame=fr) # this was the failure
831dfab77182343cce01647046e2616478a0e54b784b99356712d603cdc3876a
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """ This includes tests for the Distance class and related calculations """ import pytest import numpy as np from numpy import testing as npt from astropy import units as u from astropy.units import allclose as quantity_allclose from astropy.coordinates import Longitude, Latitude, Distance, CartesianRepresentation from astropy.coordinates.builtin_frames import ICRS, Galactic from astropy.tests.helper import catch_warnings from astropy.utils.exceptions import AstropyWarning try: import scipy # pylint: disable=W0611 except ImportError: HAS_SCIPY = False else: HAS_SCIPY = True def test_distances(): """ Tests functionality for Coordinate class distances and cartesian transformations. """ ''' Distances can also be specified, and allow for a full 3D definition of a coordinate. ''' # try all the different ways to initialize a Distance distance = Distance(12, u.parsec) Distance(40, unit=u.au) Distance(value=5, unit=u.kpc) # need to provide a unit with pytest.raises(u.UnitsError): Distance(12) # standard units are pre-defined npt.assert_allclose(distance.lyr, 39.138765325702551) npt.assert_allclose(distance.km, 370281309776063.0) # Coordinate objects can be assigned a distance object, giving them a full # 3D position c = Galactic(l=158.558650*u.degree, b=-43.350066*u.degree, distance=Distance(12, u.parsec)) # or initialize distances via redshifts - this is actually tested in the # function below that checks for scipy. This is kept here as an example # c.distance = Distance(z=0.2) # uses current cosmology # with whatever your preferred cosmology may be # c.distance = Distance(z=0.2, cosmology=WMAP5) # Coordinate objects can be initialized with a distance using special # syntax c1 = Galactic(l=158.558650*u.deg, b=-43.350066*u.deg, distance=12 * u.kpc) # Coordinate objects can be instantiated with cartesian coordinates # Internally they will immediately be converted to two angles + a distance cart = CartesianRepresentation(x=2 * u.pc, y=4 * u.pc, z=8 * u.pc) c2 = Galactic(cart) sep12 = c1.separation_3d(c2) # returns a *3d* distance between the c1 and c2 coordinates # not that this does *not* assert isinstance(sep12, Distance) npt.assert_allclose(sep12.pc, 12005.784163916317, 10) ''' All spherical coordinate systems with distances can be converted to cartesian coordinates. ''' cartrep2 = c2.cartesian assert isinstance(cartrep2.x, u.Quantity) npt.assert_allclose(cartrep2.x.value, 2) npt.assert_allclose(cartrep2.y.value, 4) npt.assert_allclose(cartrep2.z.value, 8) # with no distance, the unit sphere is assumed when converting to cartesian c3 = Galactic(l=158.558650*u.degree, b=-43.350066*u.degree, distance=None) unitcart = c3.cartesian npt.assert_allclose(((unitcart.x**2 + unitcart.y**2 + unitcart.z**2)**0.5).value, 1.0) # TODO: choose between these when CartesianRepresentation gets a definite # decision on whether or not it gets __add__ # # CartesianRepresentation objects can be added and subtracted, which are # vector/elementwise they can also be given as arguments to a coordinate # system # csum = ICRS(c1.cartesian + c2.cartesian) csumrep = CartesianRepresentation(c1.cartesian.xyz + c2.cartesian.xyz) csum = ICRS(csumrep) npt.assert_allclose(csumrep.x.value, -8.12016610185) npt.assert_allclose(csumrep.y.value, 3.19380597435) npt.assert_allclose(csumrep.z.value, -8.2294483707) npt.assert_allclose(csum.ra.degree, 158.529401774) npt.assert_allclose(csum.dec.degree, -43.3235825777) npt.assert_allclose(csum.distance.kpc, 11.9942200501) @pytest.mark.skipif(str('not HAS_SCIPY')) def test_distances_scipy(): """ The distance-related tests that require scipy due to the cosmology module needing scipy integration routines """ from astropy.cosmology import WMAP5 # try different ways to initialize a Distance d4 = Distance(z=0.23) # uses default cosmology - as of writing, WMAP7 npt.assert_allclose(d4.z, 0.23, rtol=1e-8) d5 = Distance(z=0.23, cosmology=WMAP5) npt.assert_allclose(d5.compute_z(WMAP5), 0.23, rtol=1e-8) d6 = Distance(z=0.23, cosmology=WMAP5, unit=u.km) npt.assert_allclose(d6.value, 3.5417046898762366e+22) with pytest.raises(ValueError): Distance(cosmology=WMAP5, unit=u.km) with pytest.raises(ValueError): Distance() def test_distance_change(): ra = Longitude("4:08:15.162342", unit=u.hour) dec = Latitude("-41:08:15.162342", unit=u.degree) c1 = ICRS(ra, dec, Distance(1, unit=u.kpc)) oldx = c1.cartesian.x.value assert (oldx - 0.35284083171901953) < 1e-10 # first make sure distances are immutible with pytest.raises(AttributeError): c1.distance = Distance(2, unit=u.kpc) # now x should increase with a bigger distance increases c2 = ICRS(ra, dec, Distance(2, unit=u.kpc)) assert c2.cartesian.x.value == oldx * 2 def test_distance_is_quantity(): """ test that distance behaves like a proper quantity """ Distance(2 * u.kpc) d = Distance([2, 3.1], u.kpc) assert d.shape == (2,) a = d.view(np.ndarray) q = d.view(u.Quantity) a[0] = 1.2 q.value[1] = 5.4 assert d[0].value == 1.2 assert d[1].value == 5.4 q = u.Quantity(d, copy=True) q.value[1] = 0 assert q.value[1] == 0 assert d.value[1] != 0 # regression test against #2261 d = Distance([2 * u.kpc, 250. * u.pc]) assert d.unit is u.kpc assert np.all(d.value == np.array([2., 0.25])) def test_distmod(): d = Distance(10, u.pc) assert d.distmod.value == 0 d = Distance(distmod=20) assert d.distmod.value == 20 assert d.kpc == 100 d = Distance(distmod=-1., unit=u.au) npt.assert_allclose(d.value, 1301442.9440836983) with pytest.raises(ValueError): d = Distance(value=d, distmod=20) with pytest.raises(ValueError): d = Distance(z=.23, distmod=20) # check the Mpc/kpc/pc behavior assert Distance(distmod=1).unit == u.pc assert Distance(distmod=11).unit == u.kpc assert Distance(distmod=26).unit == u.Mpc assert Distance(distmod=-21).unit == u.AU # if an array, uses the mean of the log of the distances assert Distance(distmod=[1, 11, 26]).unit == u.kpc def test_parallax(): d = Distance(parallax=1*u.arcsecond) assert d.pc == 1. with pytest.raises(ValueError): d = Distance(15*u.pc, parallax=20*u.milliarcsecond) with pytest.raises(ValueError): d = Distance(parallax=20*u.milliarcsecond, distmod=20) # array plx = [1, 10, 100.]*u.mas d = Distance(parallax=plx) assert quantity_allclose(d.pc, [1000., 100., 10.]) assert quantity_allclose(plx, d.parallax) # check behavior for negative parallax with pytest.raises(ValueError): Distance(parallax=-1 * u.mas) with pytest.raises(ValueError): Distance(parallax=[10, 1, -1] * u.mas) with catch_warnings(AstropyWarning) as w: Distance(parallax=-1 * u.mas, allow_negative=True) assert len(w) > 0 with catch_warnings(AstropyWarning) as w: Distance(parallax=[10, 1, -1] * u.mas, allow_negative=True) assert len(w) > 0 def test_distance_in_coordinates(): """ test that distances can be created from quantities and that cartesian representations come out right """ ra = Longitude("4:08:15.162342", unit=u.hour) dec = Latitude("-41:08:15.162342", unit=u.degree) coo = ICRS(ra, dec, distance=2*u.kpc) cart = coo.cartesian assert isinstance(cart.xyz, u.Quantity) def test_negative_distance(): """ Test optional kwarg allow_negative """ with pytest.raises(ValueError): Distance([-2, 3.1], u.kpc) with pytest.raises(ValueError): Distance([-2, -3.1], u.kpc) with pytest.raises(ValueError): Distance(-2, u.kpc) d = Distance(-2, u.kpc, allow_negative=True) assert d.value == -2 def test_distance_comparison(): """Ensure comparisons of distances work (#2206, #2250)""" a = Distance(15*u.kpc) b = Distance(15*u.kpc) assert a == b c = Distance(1.*u.Mpc) assert a < c def test_distance_to_quantity_when_not_units_of_length(): """Any operation that leaves units other than those of length should turn a distance into a quantity (#2206, #2250)""" d = Distance(15*u.kpc) twice = 2.*d assert isinstance(twice, Distance) area = 4.*np.pi*d**2 assert area.unit.is_equivalent(u.m**2) assert not isinstance(area, Distance) assert type(area) is u.Quantity
852a4bc04414c5b2bbbf25ca09a2a353cbdb386516f952830b889a634e4c4782
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """Test initialization of angles not already covered by the API tests""" import pickle import pytest import numpy as np from astropy.coordinates.earth import EarthLocation, ELLIPSOIDS from astropy.coordinates.angles import Longitude, Latitude from astropy.units import allclose as quantity_allclose from astropy import units as u from astropy.time import Time from astropy import constants from astropy.coordinates.name_resolve import NameResolveError def allclose_m14(a, b, rtol=1.e-14, atol=None): if atol is None: atol = 1.e-14 * getattr(a, 'unit', 1) return quantity_allclose(a, b, rtol, atol) def allclose_m8(a, b, rtol=1.e-8, atol=None): if atol is None: atol = 1.e-8 * getattr(a, 'unit', 1) return quantity_allclose(a, b, rtol, atol) def isclose_m14(val, ref): return np.array([allclose_m14(v, r) for (v, r) in zip(val, ref)]) def isclose_m8(val, ref): return np.array([allclose_m8(v, r) for (v, r) in zip(val, ref)]) def vvd(val, valok, dval, func, test, status): """Mimic routine of erfa/src/t_erfa_c.c (to help copy & paste)""" assert quantity_allclose(val, valok * val.unit, atol=dval * val.unit) def test_gc2gd(): """Test that we reproduce erfa/src/t_erfa_c.c t_gc2gd""" x, y, z = (2e6, 3e6, 5.244e6) status = 0 # help for copy & paste of vvd location = EarthLocation.from_geocentric(x, y, z, u.m) e, p, h = location.to_geodetic('WGS84') e, p, h = e.to(u.radian), p.to(u.radian), h.to(u.m) vvd(e, 0.98279372324732907, 1e-14, "eraGc2gd", "e2", status) vvd(p, 0.97160184820607853, 1e-14, "eraGc2gd", "p2", status) vvd(h, 331.41731754844348, 1e-8, "eraGc2gd", "h2", status) e, p, h = location.to_geodetic('GRS80') e, p, h = e.to(u.radian), p.to(u.radian), h.to(u.m) vvd(e, 0.98279372324732907, 1e-14, "eraGc2gd", "e2", status) vvd(p, 0.97160184820607853, 1e-14, "eraGc2gd", "p2", status) vvd(h, 331.41731754844348, 1e-8, "eraGc2gd", "h2", status) e, p, h = location.to_geodetic('WGS72') e, p, h = e.to(u.radian), p.to(u.radian), h.to(u.m) vvd(e, 0.98279372324732907, 1e-14, "eraGc2gd", "e3", status) vvd(p, 0.97160181811015119, 1e-14, "eraGc2gd", "p3", status) vvd(h, 333.27707261303181, 1e-8, "eraGc2gd", "h3", status) def test_gd2gc(): """Test that we reproduce erfa/src/t_erfa_c.c t_gd2gc""" e = 3.1 * u.rad p = -0.5 * u.rad h = 2500.0 * u.m status = 0 # help for copy & paste of vvd location = EarthLocation.from_geodetic(e, p, h, ellipsoid='WGS84') xyz = tuple(v.to(u.m) for v in location.to_geocentric()) vvd(xyz[0], -5599000.5577049947, 1e-7, "eraGd2gc", "0/1", status) vvd(xyz[1], 233011.67223479203, 1e-7, "eraGd2gc", "1/1", status) vvd(xyz[2], -3040909.4706983363, 1e-7, "eraGd2gc", "2/1", status) location = EarthLocation.from_geodetic(e, p, h, ellipsoid='GRS80') xyz = tuple(v.to(u.m) for v in location.to_geocentric()) vvd(xyz[0], -5599000.5577260984, 1e-7, "eraGd2gc", "0/2", status) vvd(xyz[1], 233011.6722356703, 1e-7, "eraGd2gc", "1/2", status) vvd(xyz[2], -3040909.4706095476, 1e-7, "eraGd2gc", "2/2", status) location = EarthLocation.from_geodetic(e, p, h, ellipsoid='WGS72') xyz = tuple(v.to(u.m) for v in location.to_geocentric()) vvd(xyz[0], -5598998.7626301490, 1e-7, "eraGd2gc", "0/3", status) vvd(xyz[1], 233011.5975297822, 1e-7, "eraGd2gc", "1/3", status) vvd(xyz[2], -3040908.6861467111, 1e-7, "eraGd2gc", "2/3", status) class TestInput(): def setup(self): self.lon = Longitude([0., 45., 90., 135., 180., -180, -90, -45], u.deg, wrap_angle=180*u.deg) self.lat = Latitude([+0., 30., 60., +90., -90., -60., -30., 0.], u.deg) self.h = u.Quantity([0.1, 0.5, 1.0, -0.5, -1.0, +4.2, -11., -.1], u.m) self.location = EarthLocation.from_geodetic(self.lon, self.lat, self.h) self.x, self.y, self.z = self.location.to_geocentric() def test_default_ellipsoid(self): assert self.location.ellipsoid == EarthLocation._ellipsoid def test_geo_attributes(self): assert all(np.all(_1 == _2) for _1, _2 in zip(self.location.geodetic, self.location.to_geodetic())) assert all(np.all(_1 == _2) for _1, _2 in zip(self.location.geocentric, self.location.to_geocentric())) def test_attribute_classes(self): """Test that attribute classes are correct (and not EarthLocation)""" assert type(self.location.x) is u.Quantity assert type(self.location.y) is u.Quantity assert type(self.location.z) is u.Quantity assert type(self.location.lon) is Longitude assert type(self.location.lat) is Latitude assert type(self.location.height) is u.Quantity def test_input(self): """Check input is parsed correctly""" # units of length should be assumed geocentric geocentric = EarthLocation(self.x, self.y, self.z) assert np.all(geocentric == self.location) geocentric2 = EarthLocation(self.x.value, self.y.value, self.z.value, self.x.unit) assert np.all(geocentric2 == self.location) geodetic = EarthLocation(self.lon, self.lat, self.h) assert np.all(geodetic == self.location) geodetic2 = EarthLocation(self.lon.to_value(u.degree), self.lat.to_value(u.degree), self.h.to_value(u.m)) assert np.all(geodetic2 == self.location) geodetic3 = EarthLocation(self.lon, self.lat) assert allclose_m14(geodetic3.lon.value, self.location.lon.value) assert allclose_m14(geodetic3.lat.value, self.location.lat.value) assert not np.any(isclose_m14(geodetic3.height.value, self.location.height.value)) geodetic4 = EarthLocation(self.lon, self.lat, self.h[-1]) assert allclose_m14(geodetic4.lon.value, self.location.lon.value) assert allclose_m14(geodetic4.lat.value, self.location.lat.value) assert allclose_m14(geodetic4.height[-1].value, self.location.height[-1].value) assert not np.any(isclose_m14(geodetic4.height[:-1].value, self.location.height[:-1].value)) # check length unit preservation geocentric5 = EarthLocation(self.x, self.y, self.z, u.pc) assert geocentric5.unit is u.pc assert geocentric5.x.unit is u.pc assert geocentric5.height.unit is u.pc assert allclose_m14(geocentric5.x.to_value(self.x.unit), self.x.value) geodetic5 = EarthLocation(self.lon, self.lat, self.h.to(u.pc)) assert geodetic5.unit is u.pc assert geodetic5.x.unit is u.pc assert geodetic5.height.unit is u.pc assert allclose_m14(geodetic5.x.to_value(self.x.unit), self.x.value) def test_invalid_input(self): """Check invalid input raises exception""" # incomprehensible by either raises TypeError with pytest.raises(TypeError): EarthLocation(self.lon, self.y, self.z) # wrong units with pytest.raises(u.UnitsError): EarthLocation.from_geocentric(self.lon, self.lat, self.lat) # inconsistent units with pytest.raises(u.UnitsError): EarthLocation.from_geocentric(self.h, self.lon, self.lat) # floats without a unit with pytest.raises(TypeError): EarthLocation.from_geocentric(self.x.value, self.y.value, self.z.value) # inconsistent shape with pytest.raises(ValueError): EarthLocation.from_geocentric(self.x, self.y, self.z[:5]) # inconsistent units with pytest.raises(u.UnitsError): EarthLocation.from_geodetic(self.x, self.y, self.z) # inconsistent shape with pytest.raises(ValueError): EarthLocation.from_geodetic(self.lon, self.lat, self.h[:5]) def test_slicing(self): # test on WGS72 location, so we can check the ellipsoid is passed on locwgs72 = EarthLocation.from_geodetic(self.lon, self.lat, self.h, ellipsoid='WGS72') loc_slice1 = locwgs72[4] assert isinstance(loc_slice1, EarthLocation) assert loc_slice1.unit is locwgs72.unit assert loc_slice1.ellipsoid == locwgs72.ellipsoid == 'WGS72' assert not loc_slice1.shape with pytest.raises(TypeError): loc_slice1[0] with pytest.raises(IndexError): len(loc_slice1) loc_slice2 = locwgs72[4:6] assert isinstance(loc_slice2, EarthLocation) assert len(loc_slice2) == 2 assert loc_slice2.unit is locwgs72.unit assert loc_slice2.ellipsoid == locwgs72.ellipsoid assert loc_slice2.shape == (2,) loc_x = locwgs72['x'] assert type(loc_x) is u.Quantity assert loc_x.shape == locwgs72.shape assert loc_x.unit is locwgs72.unit def test_invalid_ellipsoid(self): # unknown ellipsoid with pytest.raises(ValueError): EarthLocation.from_geodetic(self.lon, self.lat, self.h, ellipsoid='foo') with pytest.raises(TypeError): EarthLocation(self.lon, self.lat, self.h, ellipsoid='foo') with pytest.raises(ValueError): self.location.ellipsoid = 'foo' with pytest.raises(ValueError): self.location.to_geodetic('foo') @pytest.mark.parametrize('ellipsoid', ELLIPSOIDS) def test_ellipsoid(self, ellipsoid): """Test that different ellipsoids are understood, and differ""" # check that heights differ for different ellipsoids # need different tolerance, since heights are relative to ~6000 km lon, lat, h = self.location.to_geodetic(ellipsoid) if ellipsoid == self.location.ellipsoid: assert allclose_m8(h.value, self.h.value) else: # Some heights are very similar for some; some lon, lat identical. assert not np.all(isclose_m8(h.value, self.h.value)) # given lon, lat, height, check that x,y,z differ location = EarthLocation.from_geodetic(self.lon, self.lat, self.h, ellipsoid=ellipsoid) if ellipsoid == self.location.ellipsoid: assert allclose_m14(location.z.value, self.z.value) else: assert not np.all(isclose_m14(location.z.value, self.z.value)) def test_to_value(self): loc = self.location loc_ndarray = loc.view(np.ndarray) assert np.all(loc.value == loc_ndarray) loc2 = self.location.to(u.km) loc2_ndarray = np.empty_like(loc_ndarray) for coo in 'x', 'y', 'z': loc2_ndarray[coo] = loc_ndarray[coo] / 1000. assert np.all(loc2.value == loc2_ndarray) loc2_value = self.location.to_value(u.km) assert np.all(loc2_value == loc2_ndarray) def test_pickling(): """Regression test against #4304.""" el = EarthLocation(0.*u.m, 6000*u.km, 6000*u.km) s = pickle.dumps(el) el2 = pickle.loads(s) assert el == el2 def test_repr_latex(): """ Regression test for issue #4542 """ somelocation = EarthLocation(lon='149:3:57.9', lat='-31:16:37.3') somelocation._repr_latex_() somelocation2 = EarthLocation(lon=[1., 2.]*u.deg, lat=[-1., 9.]*u.deg) somelocation2._repr_latex_() @pytest.mark.remote_data # TODO: this parametrize should include a second option with a valid Google API # key. For example, we should make an API key for Astropy, and add it to Travis # as an environment variable (for security). @pytest.mark.parametrize('google_api_key', [None]) def test_of_address(google_api_key): NYC_lon = -74.0 * u.deg NYC_lat = 40.7 * u.deg # ~10 km tolerance to address difference between OpenStreetMap and Google # for "New York, NY". This doesn't matter in practice because this test is # only used to verify that the query succeeded, not that the returned # position is precise. NYC_tol = 0.1 * u.deg # just a location try: loc = EarthLocation.of_address("New York, NY") except NameResolveError as e: # API limit might surface even here in Travis CI. if 'unknown failure with' not in str(e): pytest.xfail(str(e)) else: assert quantity_allclose(loc.lat, NYC_lat, atol=NYC_tol) assert quantity_allclose(loc.lon, NYC_lon, atol=NYC_tol) assert np.allclose(loc.height.value, 0.) # Put this one here as buffer to get around Google map API limit per sec. # no match: This always raises NameResolveError with pytest.raises(NameResolveError): EarthLocation.of_address("lkjasdflkja") if google_api_key is not None: # a location and height try: loc = EarthLocation.of_address("New York, NY", get_height=True) except NameResolveError as e: # Buffer above sometimes insufficient to get around API limit but # we also do not want to drag things out with time.sleep(0.195), # where 0.195 was empirically determined on some physical machine. pytest.xfail(str(e)) else: assert quantity_allclose(loc.lat, NYC_lat, atol=NYC_tol) assert quantity_allclose(loc.lon, NYC_lon, atol=NYC_tol) assert quantity_allclose(loc.height, 10.438*u.meter, atol=1.*u.cm) def test_geodetic_tuple(): lat = 2*u.deg lon = 10*u.deg height = 100*u.m el = EarthLocation.from_geodetic(lat=lat, lon=lon, height=height) res1 = el.to_geodetic() res2 = el.geodetic assert res1.lat == res2.lat and quantity_allclose(res1.lat, lat) assert res1.lon == res2.lon and quantity_allclose(res1.lon, lon) assert res1.height == res2.height and quantity_allclose(res1.height, height) def test_gravitational_redshift(): someloc = EarthLocation(lon=-87.7*u.deg, lat=37*u.deg) sometime = Time('2017-8-21 18:26:40') zg0 = someloc.gravitational_redshift(sometime) # should be of order ~few mm/s change per week zg_week = someloc.gravitational_redshift(sometime + 7 * u.day) assert 1.*u.mm/u.s < abs(zg_week - zg0) < 1*u.cm/u.s # ~cm/s over a half-year zg_halfyear = someloc.gravitational_redshift(sometime + 0.5 * u.yr) assert 1*u.cm/u.s < abs(zg_halfyear - zg0) < 1*u.dm/u.s # but when back to the same time in a year, should be tenths of mm # even over decades zg_year = someloc.gravitational_redshift(sometime - 20 * u.year) assert .1*u.mm/u.s < abs(zg_year - zg0) < 1*u.mm/u.s # Check mass adjustments. # If Jupiter and the moon are ignored, effect should be off by ~ .5 mm/s masses = {'sun': constants.G*constants.M_sun, 'jupiter': 0*constants.G*u.kg, 'moon': 0*constants.G*u.kg} zg_moonjup = someloc.gravitational_redshift(sometime, masses=masses) assert .1*u.mm/u.s < abs(zg_moonjup - zg0) < 1*u.mm/u.s # Check that simply not including the bodies gives the same result. assert zg_moonjup == someloc.gravitational_redshift(sometime, bodies=('sun',)) # And that earth can be given, even not as last argument assert zg_moonjup == someloc.gravitational_redshift( sometime, bodies=('earth', 'sun',)) # If the earth is also ignored, effect should be off by ~ 20 cm/s # This also tests the conversion of kg to gravitational units. masses['earth'] = 0*u.kg zg_moonjupearth = someloc.gravitational_redshift(sometime, masses=masses) assert 1*u.dm/u.s < abs(zg_moonjupearth - zg0) < 1*u.m/u.s # If all masses are zero, redshift should be 0 as well. masses['sun'] = 0*u.kg assert someloc.gravitational_redshift(sometime, masses=masses) == 0 with pytest.raises(KeyError): someloc.gravitational_redshift(sometime, bodies=('saturn',)) with pytest.raises(u.UnitsError): masses = {'sun': constants.G*constants.M_sun, 'jupiter': constants.G*constants.M_jup, 'moon': 1*u.km, # wrong units! 'earth': constants.G*constants.M_earth} someloc.gravitational_redshift(sometime, masses=masses)
39ceb0e296fac30d0beb9fc9aa50fcb9c2ecb7e6df01b79522197859ecf95812
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst import pytest import numpy as np from astropy.units import allclose as quantity_allclose from astropy import units as u from astropy import constants from astropy.time import Time from astropy.coordinates.builtin_frames import ICRS, AltAz, LSR, GCRS, Galactic, FK5 from astropy.coordinates.baseframe import frame_transform_graph from astropy.coordinates.sites import get_builtin_sites from astropy.coordinates import (TimeAttribute, FunctionTransformWithFiniteDifference, get_sun, CartesianRepresentation, SphericalRepresentation, CartesianDifferential, SphericalDifferential, DynamicMatrixTransform) J2000 = Time('J2000') @pytest.mark.parametrize("dt, symmetric", [(1*u.second, True), (1*u.year, True), (1*u.second, False), (1*u.year, False)]) def test_faux_lsr(dt, symmetric): class LSR2(LSR): obstime = TimeAttribute(default=J2000) @frame_transform_graph.transform(FunctionTransformWithFiniteDifference, ICRS, LSR2, finite_difference_dt=dt, symmetric_finite_difference=symmetric) def icrs_to_lsr(icrs_coo, lsr_frame): dt = lsr_frame.obstime - J2000 offset = lsr_frame.v_bary * dt.to(u.second) return lsr_frame.realize_frame(icrs_coo.data.without_differentials() + offset) @frame_transform_graph.transform(FunctionTransformWithFiniteDifference, LSR2, ICRS, finite_difference_dt=dt, symmetric_finite_difference=symmetric) def lsr_to_icrs(lsr_coo, icrs_frame): dt = lsr_coo.obstime - J2000 offset = lsr_coo.v_bary * dt.to(u.second) return icrs_frame.realize_frame(lsr_coo.data - offset) ic = ICRS(ra=12.3*u.deg, dec=45.6*u.deg, distance=7.8*u.au, pm_ra_cosdec=0*u.marcsec/u.yr, pm_dec=0*u.marcsec/u.yr, radial_velocity=0*u.km/u.s) lsrc = ic.transform_to(LSR2()) assert quantity_allclose(ic.cartesian.xyz, lsrc.cartesian.xyz) idiff = ic.cartesian.differentials['s'] ldiff = lsrc.cartesian.differentials['s'] change = (ldiff.d_xyz - idiff.d_xyz).to(u.km/u.s) totchange = np.sum(change**2)**0.5 assert quantity_allclose(totchange, np.sum(lsrc.v_bary.d_xyz**2)**0.5) ic2 = ICRS(ra=120.3*u.deg, dec=45.6*u.deg, distance=7.8*u.au, pm_ra_cosdec=0*u.marcsec/u.yr, pm_dec=10*u.marcsec/u.yr, radial_velocity=1000*u.km/u.s) lsrc2 = ic2.transform_to(LSR2()) ic2_roundtrip = lsrc2.transform_to(ICRS) tot = np.sum(lsrc2.cartesian.differentials['s'].d_xyz**2)**0.5 assert np.abs(tot.to('km/s') - 1000*u.km/u.s) < 20*u.km/u.s assert quantity_allclose(ic2.cartesian.xyz, ic2_roundtrip.cartesian.xyz) def test_faux_fk5_galactic(): from astropy.coordinates.builtin_frames.galactic_transforms import fk5_to_gal, _gal_to_fk5 class Galactic2(Galactic): pass dt = 1000*u.s @frame_transform_graph.transform(FunctionTransformWithFiniteDifference, FK5, Galactic2, finite_difference_dt=dt, symmetric_finite_difference=True, finite_difference_frameattr_name=None) def fk5_to_gal2(fk5_coo, gal_frame): trans = DynamicMatrixTransform(fk5_to_gal, FK5, Galactic2) return trans(fk5_coo, gal_frame) @frame_transform_graph.transform(FunctionTransformWithFiniteDifference, Galactic2, ICRS, finite_difference_dt=dt, symmetric_finite_difference=True, finite_difference_frameattr_name=None) def gal2_to_fk5(gal_coo, fk5_frame): trans = DynamicMatrixTransform(_gal_to_fk5, Galactic2, FK5) return trans(gal_coo, fk5_frame) c1 = FK5(ra=150*u.deg, dec=-17*u.deg, radial_velocity=83*u.km/u.s, pm_ra_cosdec=-41*u.mas/u.yr, pm_dec=16*u.mas/u.yr, distance=150*u.pc) c2 = c1.transform_to(Galactic2) c3 = c1.transform_to(Galactic) # compare the matrix and finite-difference calculations assert quantity_allclose(c2.pm_l_cosb, c3.pm_l_cosb, rtol=1e-4) assert quantity_allclose(c2.pm_b, c3.pm_b, rtol=1e-4) def test_gcrs_diffs(): time = Time('2017-01-01') gf = GCRS(obstime=time) sung = get_sun(time) # should have very little vhelio # qtr-year off sun location should be the direction of ~ maximal vhelio qtrsung = get_sun(time-.25*u.year) # now we use those essentially as directions where the velocities should # be either maximal or minimal - with or perpendiculat to Earh's orbit msungr = CartesianRepresentation(-sung.cartesian.xyz).represent_as(SphericalRepresentation) suni = ICRS(ra=msungr.lon, dec=msungr.lat, distance=100*u.au, pm_ra_cosdec=0*u.marcsec/u.yr, pm_dec=0*u.marcsec/u.yr, radial_velocity=0*u.km/u.s) qtrsuni = ICRS(ra=qtrsung.ra, dec=qtrsung.dec, distance=100*u.au, pm_ra_cosdec=0*u.marcsec/u.yr, pm_dec=0*u.marcsec/u.yr, radial_velocity=0*u.km/u.s) # Now we transform those parallel- and perpendicular-to Earth's orbit # directions to GCRS, which should shift the velocity to either include # the Earth's velocity vector, or not (for parallel and perpendicular, # respectively). sung = suni.transform_to(gf) qtrsung = qtrsuni.transform_to(gf) # should be high along the ecliptic-not-sun sun axis and # low along the sun axis assert np.abs(qtrsung.radial_velocity) > 30*u.km/u.s assert np.abs(qtrsung.radial_velocity) < 40*u.km/u.s assert np.abs(sung.radial_velocity) < 1*u.km/u.s suni2 = sung.transform_to(ICRS) assert np.all(np.abs(suni2.data.differentials['s'].d_xyz) < 3e-5*u.km/u.s) qtrisun2 = qtrsung.transform_to(ICRS) assert np.all(np.abs(qtrisun2.data.differentials['s'].d_xyz) < 3e-5*u.km/u.s) @pytest.mark.remote_data def test_altaz_diffs(): time = Time('J2015') + np.linspace(-1, 1, 1000)*u.day loc = get_builtin_sites()['greenwich'] aa = AltAz(obstime=time, location=loc) icoo = ICRS(np.zeros_like(time)*u.deg, 10*u.deg, 100*u.au, pm_ra_cosdec=np.zeros_like(time)*u.marcsec/u.yr, pm_dec=0*u.marcsec/u.yr, radial_velocity=0*u.km/u.s) acoo = icoo.transform_to(aa) # Make sure the change in radial velocity over ~2 days isn't too much # more than the rotation speed of the Earth - some excess is expected # because the orbit also shifts the RV, but it should be pretty small # over this short a time. assert np.ptp(acoo.radial_velocity)/2 < (2*np.pi*constants.R_earth/u.day)*1.2 # MAGIC NUMBER cdiff = acoo.data.differentials['s'].represent_as(CartesianDifferential, acoo.data) # The "total" velocity should be > c, because the *tangential* velocity # isn't a True velocity, but rather an induced velocity due to the Earth's # rotation at a distance of 100 AU assert np.all(np.sum(cdiff.d_xyz**2, axis=0)**0.5 > constants.c) _xfail = pytest.mark.xfail @pytest.mark.parametrize('distance', [1000*u.au, 10*u.pc, pytest.param(10*u.kpc, marks=_xfail), pytest.param(100*u.kpc, marks=_xfail)]) # TODO: make these not fail when the # finite-difference numerical stability # is improved def test_numerical_limits(distance): """ Tests the numerical stability of the default settings for the finite difference transformation calculation. This is *known* to fail for at >~1kpc, but this may be improved in future versions. """ time = Time('J2017') + np.linspace(-.5, .5, 100)*u.year icoo = ICRS(ra=0*u.deg, dec=10*u.deg, distance=distance, pm_ra_cosdec=0*u.marcsec/u.yr, pm_dec=0*u.marcsec/u.yr, radial_velocity=0*u.km/u.s) gcoo = icoo.transform_to(GCRS(obstime=time)) rv = gcoo.radial_velocity.to('km/s') # if its a lot bigger than this - ~the maximal velocity shift along # the direction above with a small allowance for noise - finite-difference # rounding errors have ruined the calculation assert np.ptp(rv) < 65*u.km/u.s def diff_info_plot(frame, time): """ Useful for plotting a frame with multiple times. *Not* used in the testing suite per se, but extremely useful for interactive plotting of results from tests in this module. """ from matplotlib import pyplot as plt fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(20, 12)) ax1.plot_date(time.plot_date, frame.data.differentials['s'].d_xyz.to(u.km/u.s).T, fmt='-') ax1.legend(['x', 'y', 'z']) ax2.plot_date(time.plot_date, np.sum(frame.data.differentials['s'].d_xyz.to(u.km/u.s)**2, axis=0)**0.5, fmt='-') ax2.set_title('total') sd = frame.data.differentials['s'].represent_as(SphericalDifferential, frame.data) ax3.plot_date(time.plot_date, sd.d_distance.to(u.km/u.s), fmt='-') ax3.set_title('radial') ax4.plot_date(time.plot_date, sd.d_lat.to(u.marcsec/u.yr), fmt='-', label='lat') ax4.plot_date(time.plot_date, sd.d_lon.to(u.marcsec/u.yr), fmt='-', label='lon') return fig
6a615c64c491f8e87b02fea94ee92a737060b0034fdeacf114c41b9a9cc6ef4d
""" This series of functions are used to generate the reference CSV files used by the accuracy tests. Running this as a comand-line script will generate them all. """ import os import numpy as np from astropy.table import Table, Column def ref_fk4_no_e_fk4(fnout='fk4_no_e_fk4.csv'): """ Accuracy tests for the FK4 (with no E-terms of aberration) to/from FK4 conversion, with arbitrary equinoxes and epoch of observation. """ import starlink.Ast as Ast np.random.seed(12345) N = 200 # Sample uniformly on the unit sphere. These will be either the FK4 # coordinates for the transformation to FK5, or the FK5 coordinates for the # transformation to FK4. ra = np.random.uniform(0., 360., N) dec = np.degrees(np.arcsin(np.random.uniform(-1., 1., N))) # Generate random observation epoch and equinoxes obstime = ["B{0:7.2f}".format(x) for x in np.random.uniform(1950., 2000., N)] ra_fk4ne, dec_fk4ne = [], [] ra_fk4, dec_fk4 = [], [] for i in range(N): # Set up frames for AST frame_fk4ne = Ast.SkyFrame('System=FK4-NO-E,Epoch={epoch},Equinox=B1950'.format(epoch=obstime[i])) frame_fk4 = Ast.SkyFrame('System=FK4,Epoch={epoch},Equinox=B1950'.format(epoch=obstime[i])) # FK4 to FK4 (no E-terms) frameset = frame_fk4.convert(frame_fk4ne) coords = np.degrees(frameset.tran([[np.radians(ra[i])], [np.radians(dec[i])]])) ra_fk4ne.append(coords[0, 0]) dec_fk4ne.append(coords[1, 0]) # FK4 (no E-terms) to FK4 frameset = frame_fk4ne.convert(frame_fk4) coords = np.degrees(frameset.tran([[np.radians(ra[i])], [np.radians(dec[i])]])) ra_fk4.append(coords[0, 0]) dec_fk4.append(coords[1, 0]) # Write out table to a CSV file t = Table() t.add_column(Column(name='obstime', data=obstime)) t.add_column(Column(name='ra_in', data=ra)) t.add_column(Column(name='dec_in', data=dec)) t.add_column(Column(name='ra_fk4ne', data=ra_fk4ne)) t.add_column(Column(name='dec_fk4ne', data=dec_fk4ne)) t.add_column(Column(name='ra_fk4', data=ra_fk4)) t.add_column(Column(name='dec_fk4', data=dec_fk4)) f = open(os.path.join('data', fnout), 'wb') f.write("# This file was generated with the {0} script, and the reference " "values were computed using AST\n".format(os.path.basename(__file__))) t.write(f, format='ascii', delimiter=',') def ref_fk4_no_e_fk5(fnout='fk4_no_e_fk5.csv'): """ Accuracy tests for the FK4 (with no E-terms of aberration) to/from FK5 conversion, with arbitrary equinoxes and epoch of observation. """ import starlink.Ast as Ast np.random.seed(12345) N = 200 # Sample uniformly on the unit sphere. These will be either the FK4 # coordinates for the transformation to FK5, or the FK5 coordinates for the # transformation to FK4. ra = np.random.uniform(0., 360., N) dec = np.degrees(np.arcsin(np.random.uniform(-1., 1., N))) # Generate random observation epoch and equinoxes obstime = ["B{0:7.2f}".format(x) for x in np.random.uniform(1950., 2000., N)] equinox_fk4 = ["B{0:7.2f}".format(x) for x in np.random.uniform(1925., 1975., N)] equinox_fk5 = ["J{0:7.2f}".format(x) for x in np.random.uniform(1975., 2025., N)] ra_fk4, dec_fk4 = [], [] ra_fk5, dec_fk5 = [], [] for i in range(N): # Set up frames for AST frame_fk4 = Ast.SkyFrame('System=FK4-NO-E,Epoch={epoch},Equinox={equinox_fk4}'.format(epoch=obstime[i], equinox_fk4=equinox_fk4[i])) frame_fk5 = Ast.SkyFrame('System=FK5,Epoch={epoch},Equinox={equinox_fk5}'.format(epoch=obstime[i], equinox_fk5=equinox_fk5[i])) # FK4 to FK5 frameset = frame_fk4.convert(frame_fk5) coords = np.degrees(frameset.tran([[np.radians(ra[i])], [np.radians(dec[i])]])) ra_fk5.append(coords[0, 0]) dec_fk5.append(coords[1, 0]) # FK5 to FK4 frameset = frame_fk5.convert(frame_fk4) coords = np.degrees(frameset.tran([[np.radians(ra[i])], [np.radians(dec[i])]])) ra_fk4.append(coords[0, 0]) dec_fk4.append(coords[1, 0]) # Write out table to a CSV file t = Table() t.add_column(Column(name='equinox_fk4', data=equinox_fk4)) t.add_column(Column(name='equinox_fk5', data=equinox_fk5)) t.add_column(Column(name='obstime', data=obstime)) t.add_column(Column(name='ra_in', data=ra)) t.add_column(Column(name='dec_in', data=dec)) t.add_column(Column(name='ra_fk5', data=ra_fk5)) t.add_column(Column(name='dec_fk5', data=dec_fk5)) t.add_column(Column(name='ra_fk4', data=ra_fk4)) t.add_column(Column(name='dec_fk4', data=dec_fk4)) f = open(os.path.join('data', fnout), 'wb') f.write("# This file was generated with the {0} script, and the reference " "values were computed using AST\n".format(os.path.basename(__file__))) t.write(f, format='ascii', delimiter=',') def ref_galactic_fk4(fnout='galactic_fk4.csv'): """ Accuracy tests for the ICRS (with no E-terms of aberration) to/from FK5 conversion, with arbitrary equinoxes and epoch of observation. """ import starlink.Ast as Ast np.random.seed(12345) N = 200 # Sample uniformly on the unit sphere. These will be either the ICRS # coordinates for the transformation to FK5, or the FK5 coordinates for the # transformation to ICRS. lon = np.random.uniform(0., 360., N) lat = np.degrees(np.arcsin(np.random.uniform(-1., 1., N))) # Generate random observation epoch and equinoxes obstime = ["B{0:7.2f}".format(x) for x in np.random.uniform(1950., 2000., N)] equinox_fk4 = ["J{0:7.2f}".format(x) for x in np.random.uniform(1975., 2025., N)] lon_gal, lat_gal = [], [] ra_fk4, dec_fk4 = [], [] for i in range(N): # Set up frames for AST frame_gal = Ast.SkyFrame('System=Galactic,Epoch={epoch}'.format(epoch=obstime[i])) frame_fk4 = Ast.SkyFrame('System=FK4,Epoch={epoch},Equinox={equinox_fk4}'.format(epoch=obstime[i], equinox_fk4=equinox_fk4[i])) # ICRS to FK5 frameset = frame_gal.convert(frame_fk4) coords = np.degrees(frameset.tran([[np.radians(lon[i])], [np.radians(lat[i])]])) ra_fk4.append(coords[0, 0]) dec_fk4.append(coords[1, 0]) # FK5 to ICRS frameset = frame_fk4.convert(frame_gal) coords = np.degrees(frameset.tran([[np.radians(lon[i])], [np.radians(lat[i])]])) lon_gal.append(coords[0, 0]) lat_gal.append(coords[1, 0]) # Write out table to a CSV file t = Table() t.add_column(Column(name='equinox_fk4', data=equinox_fk4)) t.add_column(Column(name='obstime', data=obstime)) t.add_column(Column(name='lon_in', data=lon)) t.add_column(Column(name='lat_in', data=lat)) t.add_column(Column(name='ra_fk4', data=ra_fk4)) t.add_column(Column(name='dec_fk4', data=dec_fk4)) t.add_column(Column(name='lon_gal', data=lon_gal)) t.add_column(Column(name='lat_gal', data=lat_gal)) f = open(os.path.join('data', fnout), 'wb') f.write("# This file was generated with the {0} script, and the reference " "values were computed using AST\n".format(os.path.basename(__file__))) t.write(f, format='ascii', delimiter=',') def ref_icrs_fk5(fnout='icrs_fk5.csv'): """ Accuracy tests for the ICRS (with no E-terms of aberration) to/from FK5 conversion, with arbitrary equinoxes and epoch of observation. """ import starlink.Ast as Ast np.random.seed(12345) N = 200 # Sample uniformly on the unit sphere. These will be either the ICRS # coordinates for the transformation to FK5, or the FK5 coordinates for the # transformation to ICRS. ra = np.random.uniform(0., 360., N) dec = np.degrees(np.arcsin(np.random.uniform(-1., 1., N))) # Generate random observation epoch and equinoxes obstime = ["B{0:7.2f}".format(x) for x in np.random.uniform(1950., 2000., N)] equinox_fk5 = ["J{0:7.2f}".format(x) for x in np.random.uniform(1975., 2025., N)] ra_icrs, dec_icrs = [], [] ra_fk5, dec_fk5 = [], [] for i in range(N): # Set up frames for AST frame_icrs = Ast.SkyFrame('System=ICRS,Epoch={epoch}'.format(epoch=obstime[i])) frame_fk5 = Ast.SkyFrame('System=FK5,Epoch={epoch},Equinox={equinox_fk5}'.format(epoch=obstime[i], equinox_fk5=equinox_fk5[i])) # ICRS to FK5 frameset = frame_icrs.convert(frame_fk5) coords = np.degrees(frameset.tran([[np.radians(ra[i])], [np.radians(dec[i])]])) ra_fk5.append(coords[0, 0]) dec_fk5.append(coords[1, 0]) # FK5 to ICRS frameset = frame_fk5.convert(frame_icrs) coords = np.degrees(frameset.tran([[np.radians(ra[i])], [np.radians(dec[i])]])) ra_icrs.append(coords[0, 0]) dec_icrs.append(coords[1, 0]) # Write out table to a CSV file t = Table() t.add_column(Column(name='equinox_fk5', data=equinox_fk5)) t.add_column(Column(name='obstime', data=obstime)) t.add_column(Column(name='ra_in', data=ra)) t.add_column(Column(name='dec_in', data=dec)) t.add_column(Column(name='ra_fk5', data=ra_fk5)) t.add_column(Column(name='dec_fk5', data=dec_fk5)) t.add_column(Column(name='ra_icrs', data=ra_icrs)) t.add_column(Column(name='dec_icrs', data=dec_icrs)) f = open(os.path.join('data', fnout), 'wb') f.write("# This file was generated with the {0} script, and the reference " "values were computed using AST\n".format(os.path.basename(__file__))) t.write(f, format='ascii', delimiter=',') if __name__ == '__main__': ref_fk4_no_e_fk4() ref_fk4_no_e_fk5() ref_galactic_fk4() ref_icrs_fk5()
1a76763f96794e4bb4b1e85a7003027ed4c900d110d5eed21d0b23c0ed7061e7
# Licensed under a 3-clause BSD style license - see LICENSE.rst import numpy as np from astropy import units as u from astropy.coordinates.builtin_frames import Galactic, FK4 from astropy.time import Time from astropy.table import Table from astropy.coordinates.angle_utilities import angular_separation from astropy.utils.data import get_pkg_data_contents # the number of tests to run from . import N_ACCURACY_TESTS TOLERANCE = 0.3 # arcseconds def test_galactic_fk4(): lines = get_pkg_data_contents('data/galactic_fk4.csv').split('\n') t = Table.read(lines, format='ascii', delimiter=',', guess=False) if N_ACCURACY_TESTS >= len(t): idxs = range(len(t)) else: idxs = np.random.randint(len(t), size=N_ACCURACY_TESTS) diffarcsec1 = [] diffarcsec2 = [] for i in idxs: # Extract row r = t[int(i)] # int here is to get around a py 3.x astropy.table bug # Galactic to FK4 c1 = Galactic(l=r['lon_in']*u.deg, b=r['lat_in']*u.deg) c2 = c1.transform_to(FK4(equinox=Time(r['equinox_fk4']))) # Find difference diff = angular_separation(c2.ra.radian, c2.dec.radian, np.radians(r['ra_fk4']), np.radians(r['dec_fk4'])) diffarcsec1.append(np.degrees(diff) * 3600.) # FK4 to Galactic c1 = FK4(ra=r['lon_in']*u.deg, dec=r['lat_in']*u.deg, obstime=Time(r['obstime']), equinox=Time(r['equinox_fk4'])) c2 = c1.transform_to(Galactic) # Find difference diff = angular_separation(c2.l.radian, c2.b.radian, np.radians(r['lon_gal']), np.radians(r['lat_gal'])) diffarcsec2.append(np.degrees(diff) * 3600.) np.testing.assert_array_less(diffarcsec1, TOLERANCE) np.testing.assert_array_less(diffarcsec2, TOLERANCE)
4c3696122f6355256fa84819b88cb593cf8bf22f4277a8bed8ba274c9b1e1f3e
# Licensed under a 3-clause BSD style license - see LICENSE.rst import numpy as np from astropy import units as u from astropy.coordinates.builtin_frames import ICRS, FK5 from astropy.time import Time from astropy.table import Table from astropy.coordinates.angle_utilities import angular_separation from astropy.utils.data import get_pkg_data_contents # the number of tests to run from . import N_ACCURACY_TESTS TOLERANCE = 0.03 # arcseconds def test_icrs_fk5(): lines = get_pkg_data_contents('data/icrs_fk5.csv').split('\n') t = Table.read(lines, format='ascii', delimiter=',', guess=False) if N_ACCURACY_TESTS >= len(t): idxs = range(len(t)) else: idxs = np.random.randint(len(t), size=N_ACCURACY_TESTS) diffarcsec1 = [] diffarcsec2 = [] for i in idxs: # Extract row r = t[int(i)] # int here is to get around a py 3.x astropy.table bug # ICRS to FK5 c1 = ICRS(ra=r['ra_in']*u.deg, dec=r['dec_in']*u.deg) c2 = c1.transform_to(FK5(equinox=Time(r['equinox_fk5']))) # Find difference diff = angular_separation(c2.ra.radian, c2.dec.radian, np.radians(r['ra_fk5']), np.radians(r['dec_fk5'])) diffarcsec1.append(np.degrees(diff) * 3600.) # FK5 to ICRS c1 = FK5(ra=r['ra_in']*u.deg, dec=r['dec_in']*u.deg, equinox=Time(r['equinox_fk5'])) c2 = c1.transform_to(ICRS) # Find difference diff = angular_separation(c2.ra.radian, c2.dec.radian, np.radians(r['ra_icrs']), np.radians(r['dec_icrs'])) diffarcsec2.append(np.degrees(diff) * 3600.) np.testing.assert_array_less(diffarcsec1, TOLERANCE) np.testing.assert_array_less(diffarcsec2, TOLERANCE)
ca26ee54a09f84370c65d1ba3064130b9e27715096ad7a3ff3526ed1fc31617a
# Licensed under a 3-clause BSD style license - see LICENSE.rst import numpy as np from astropy import units as u from astropy.coordinates.builtin_frames import FK4NoETerms, FK4 from astropy.time import Time from astropy.table import Table from astropy.coordinates.angle_utilities import angular_separation from astropy.utils.data import get_pkg_data_contents # the number of tests to run from . import N_ACCURACY_TESTS # It looks as though SLALIB, which AST relies on, assumes a simplified version # of the e-terms corretion, so we have to up the tolerance a bit to get things # to agree. TOLERANCE = 1.e-5 # arcseconds def test_fk4_no_e_fk4(): lines = get_pkg_data_contents('data/fk4_no_e_fk4.csv').split('\n') t = Table.read(lines, format='ascii', delimiter=',', guess=False) if N_ACCURACY_TESTS >= len(t): idxs = range(len(t)) else: idxs = np.random.randint(len(t), size=N_ACCURACY_TESTS) diffarcsec1 = [] diffarcsec2 = [] for i in idxs: # Extract row r = t[int(i)] # int here is to get around a py 3.x astropy.table bug # FK4 to FK4NoETerms c1 = FK4(ra=r['ra_in']*u.deg, dec=r['dec_in']*u.deg, obstime=Time(r['obstime'])) c2 = c1.transform_to(FK4NoETerms) # Find difference diff = angular_separation(c2.ra.radian, c2.dec.radian, np.radians(r['ra_fk4ne']), np.radians(r['dec_fk4ne'])) diffarcsec1.append(np.degrees(diff) * 3600.) # FK4NoETerms to FK4 c1 = FK4NoETerms(ra=r['ra_in']*u.deg, dec=r['dec_in']*u.deg, obstime=Time(r['obstime'])) c2 = c1.transform_to(FK4) # Find difference diff = angular_separation(c2.ra.radian, c2.dec.radian, np.radians(r['ra_fk4']), np.radians(r['dec_fk4'])) diffarcsec2.append(np.degrees(diff) * 3600.) np.testing.assert_array_less(diffarcsec1, TOLERANCE) np.testing.assert_array_less(diffarcsec2, TOLERANCE)
34ba96f647f92a86e50874998b39a92c1231e9c4162468e1bca82c3bf0607888
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Accuracy tests for Ecliptic coordinate systems. """ import numpy as np import pytest from astropy.units import allclose as quantity_allclose from astropy import units as u from astropy.coordinates import SkyCoord from astropy.coordinates.builtin_frames import FK5, ICRS, GCRS, GeocentricMeanEcliptic, BarycentricMeanEcliptic, HeliocentricMeanEcliptic, GeocentricTrueEcliptic, BarycentricTrueEcliptic, HeliocentricTrueEcliptic, HeliocentricEclipticIAU76 from astropy.constants import R_sun, R_earth def test_against_pytpm_doc_example(): """ Check that Astropy's Ecliptic systems give answers consistent with pyTPM Currently this is only testing against the example given in the pytpm docs """ fk5_in = SkyCoord('12h22m54.899s', '15d49m20.57s', frame=FK5(equinox='J2000')) pytpm_out = BarycentricMeanEcliptic(lon=178.78256462*u.deg, lat=16.7597002513*u.deg, equinox='J2000') astropy_out = fk5_in.transform_to(pytpm_out) assert pytpm_out.separation(astropy_out) < (1*u.arcsec) def test_ecliptic_heliobary(): """ Check that the ecliptic transformations for heliocentric and barycentric at least more or less make sense """ icrs = ICRS(1*u.deg, 2*u.deg, distance=1.5*R_sun) bary = icrs.transform_to(BarycentricMeanEcliptic) helio = icrs.transform_to(HeliocentricMeanEcliptic) # make sure there's a sizable distance shift - in 3d hundreds of km, but # this is 1D so we allow it to be somewhat smaller assert np.abs(bary.distance - helio.distance) > 1*u.km # now make something that's got the location of helio but in bary's frame. # this is a convenience to allow `separation` to work as expected helio_in_bary_frame = bary.realize_frame(helio.cartesian) assert bary.separation(helio_in_bary_frame) > 1*u.arcmin @pytest.mark.parametrize(('trueframe', 'meanframe'), [(BarycentricTrueEcliptic, BarycentricMeanEcliptic), (HeliocentricTrueEcliptic, HeliocentricMeanEcliptic), (GeocentricTrueEcliptic, GeocentricMeanEcliptic), (HeliocentricEclipticIAU76, HeliocentricMeanEcliptic)]) def test_ecliptic_true_mean(trueframe, meanframe): """ Check that the ecliptic true/mean transformations at least roundtrip """ icrs = ICRS(1*u.deg, 2*u.deg, distance=1.5*R_sun) truecoo = icrs.transform_to(trueframe) meancoo = icrs.transform_to(meanframe) truecoo2 = icrs.transform_to(trueframe) assert not quantity_allclose(truecoo.cartesian.xyz, meancoo.cartesian.xyz) assert quantity_allclose(truecoo.cartesian.xyz, truecoo2.cartesian.xyz) def test_ecl_geo(): """ Check that the geocentric version at least gets well away from GCRS. For a true "accuracy" test we need a comparison dataset that is similar to the geocentric/GCRS comparison we want to do here. Contributions welcome! """ gcrs = GCRS(10*u.deg, 20*u.deg, distance=1.5*R_earth) gecl = gcrs.transform_to(GeocentricMeanEcliptic) assert quantity_allclose(gecl.distance, gcrs.distance) def test_arraytransforms(): """ Test that transforms to/from ecliptic coordinates work on array coordinates (not testing for accuracy.) """ ra = np.ones((4, ), dtype=float) * u.deg dec = 2*np.ones((4, ), dtype=float) * u.deg distance = np.ones((4, ), dtype=float) * u.au test_icrs = ICRS(ra=ra, dec=dec, distance=distance) test_gcrs = GCRS(test_icrs.data) bary_arr = test_icrs.transform_to(BarycentricMeanEcliptic) assert bary_arr.shape == ra.shape helio_arr = test_icrs.transform_to(HeliocentricMeanEcliptic) assert helio_arr.shape == ra.shape geo_arr = test_gcrs.transform_to(GeocentricMeanEcliptic) assert geo_arr.shape == ra.shape # now check that we also can go back the other way without shape problems bary_icrs = bary_arr.transform_to(ICRS) assert bary_icrs.shape == test_icrs.shape helio_icrs = helio_arr.transform_to(ICRS) assert helio_icrs.shape == test_icrs.shape geo_gcrs = geo_arr.transform_to(GCRS) assert geo_gcrs.shape == test_gcrs.shape def test_roundtrip_scalar(): icrs = ICRS(ra=1*u.deg, dec=2*u.deg, distance=3*u.au) gcrs = GCRS(icrs.cartesian) bary = icrs.transform_to(BarycentricMeanEcliptic) helio = icrs.transform_to(HeliocentricMeanEcliptic) geo = gcrs.transform_to(GeocentricMeanEcliptic) bary_icrs = bary.transform_to(ICRS) helio_icrs = helio.transform_to(ICRS) geo_gcrs = geo.transform_to(GCRS) assert quantity_allclose(bary_icrs.cartesian.xyz, icrs.cartesian.xyz) assert quantity_allclose(helio_icrs.cartesian.xyz, icrs.cartesian.xyz) assert quantity_allclose(geo_gcrs.cartesian.xyz, gcrs.cartesian.xyz)
ab81547069743ec22fc08474dd67fd9ae9012fe9b44757b5826dc5569dbfc926
# Licensed under a 3-clause BSD style license - see LICENSE.rst """Accuracy tests for AltAz to ICRS coordinate transformations. We use "known good" examples computed with other coordinate libraries. Note that we use very low precision asserts because some people run tests on 32-bit machines and we want the tests to pass there. TODO: check if these tests pass on 32-bit machines and implement higher-precision checks on 64-bit machines. """ import pytest from astropy import units as u from astropy.time import Time from astropy.coordinates.builtin_frames import AltAz from astropy.coordinates import EarthLocation from astropy.coordinates import Angle, SkyCoord @pytest.mark.remote_data def test_against_hor2eq(): """Check that Astropy gives consistent results with an IDL hor2eq example. See : http://idlastro.gsfc.nasa.gov/ftp/pro/astro/hor2eq.pro Test is against these run outputs, run at 2000-01-01T12:00:00: # NORMAL ATMOSPHERE CASE IDL> hor2eq, ten(37,54,41), ten(264,55,06), 2451545.0d, ra, dec, /verb, obs='kpno', pres=781.0, temp=273.0 Latitude = +31 57 48.0 Longitude = *** 36 00.0 Julian Date = 2451545.000000 Az, El = 17 39 40.4 +37 54 41 (Observer Coords) Az, El = 17 39 40.4 +37 53 40 (Apparent Coords) LMST = +11 15 26.5 LAST = +11 15 25.7 Hour Angle = +03 38 30.1 (hh:mm:ss) Ra, Dec: 07 36 55.6 +15 25 02 (Apparent Coords) Ra, Dec: 07 36 55.2 +15 25 08 (J2000.0000) Ra, Dec: 07 36 55.2 +15 25 08 (J2000) IDL> print, ra, dec 114.23004 15.418818 # NO PRESSURE CASE IDL> hor2eq, ten(37,54,41), ten(264,55,06), 2451545.0d, ra, dec, /verb, obs='kpno', pres=0.0, temp=273.0 Latitude = +31 57 48.0 Longitude = *** 36 00.0 Julian Date = 2451545.000000 Az, El = 17 39 40.4 +37 54 41 (Observer Coords) Az, El = 17 39 40.4 +37 54 41 (Apparent Coords) LMST = +11 15 26.5 LAST = +11 15 25.7 Hour Angle = +03 38 26.4 (hh:mm:ss) Ra, Dec: 07 36 59.3 +15 25 31 (Apparent Coords) Ra, Dec: 07 36 58.9 +15 25 37 (J2000.0000) Ra, Dec: 07 36 58.9 +15 25 37 (J2000) IDL> print, ra, dec 114.24554 15.427022 """ # Observatory position for `kpno` from here: # http://idlastro.gsfc.nasa.gov/ftp/pro/astro/observatory.pro location = EarthLocation(lon=Angle('-111d36.0m'), lat=Angle('31d57.8m'), height=2120. * u.m) obstime = Time(2451545.0, format='jd', scale='ut1') altaz_frame = AltAz(obstime=obstime, location=location, temperature=0 * u.deg_C, pressure=0.781 * u.bar) altaz_frame_noatm = AltAz(obstime=obstime, location=location, temperature=0 * u.deg_C, pressure=0.0 * u.bar) altaz = SkyCoord('264d55m06s 37d54m41s', frame=altaz_frame) altaz_noatm = SkyCoord('264d55m06s 37d54m41s', frame=altaz_frame_noatm) radec_frame = 'icrs' radec_actual = altaz.transform_to(radec_frame) radec_actual_noatm = altaz_noatm.transform_to(radec_frame) radec_expected = SkyCoord('07h36m55.2s +15d25m08s', frame=radec_frame) distance = radec_actual.separation(radec_expected).to('arcsec') # this comes from running the example hor2eq but with the pressure set to 0 radec_expected_noatm = SkyCoord('07h36m58.9s +15d25m37s', frame=radec_frame) distance_noatm = radec_actual_noatm.separation(radec_expected_noatm).to('arcsec') # The baseline difference is ~2.3 arcsec with one atm of pressure. The # difference is mainly due to the somewhat different atmospheric model that # hor2eq assumes. This is confirmed by the second test which has the # atmosphere "off" - the residual difference is small enough to be embedded # in the assumptions about "J2000" or rounding errors. assert distance < 5 * u.arcsec assert distance_noatm < 0.4 * u.arcsec @pytest.mark.remote_data def test_against_pyephem(): """Check that Astropy gives consistent results with one PyEphem example. PyEphem: http://rhodesmill.org/pyephem/ See example input and output here: https://gist.github.com/zonca/1672906 https://github.com/phn/pytpm/issues/2#issuecomment-3698679 """ obstime = Time('2011-09-18 08:50:00') location = EarthLocation(lon=Angle('-109d24m53.1s'), lat=Angle('33d41m46.0s'), height=30000. * u.m) # We are using the default pressure and temperature in PyEphem # relative_humidity = ? # obswl = ? altaz_frame = AltAz(obstime=obstime, location=location, temperature=15 * u.deg_C, pressure=1.010 * u.bar) altaz = SkyCoord('6.8927d -60.7665d', frame=altaz_frame) radec_actual = altaz.transform_to('icrs') radec_expected = SkyCoord('196.497518d -4.569323d', frame='icrs') # EPHEM # radec_expected = SkyCoord('196.496220d -4.569390d', frame='icrs') # HORIZON distance = radec_actual.separation(radec_expected).to('arcsec') # TODO: why is this difference so large? # It currently is: 31.45187984720655 arcsec assert distance < 1e3 * u.arcsec # Add assert on current Astropy result so that we notice if something changes radec_expected = SkyCoord('196.495372d -4.560694d', frame='icrs') distance = radec_actual.separation(radec_expected).to('arcsec') # Current value: 0.0031402822944751997 arcsec assert distance < 1 * u.arcsec @pytest.mark.remote_data def test_against_jpl_horizons(): """Check that Astropy gives consistent results with the JPL Horizons example. The input parameters and reference results are taken from this page: (from the first row of the Results table at the bottom of that page) http://ssd.jpl.nasa.gov/?horizons_tutorial """ obstime = Time('1998-07-28 03:00') location = EarthLocation(lon=Angle('248.405300d'), lat=Angle('31.9585d'), height=2.06 * u.km) # No atmosphere altaz_frame = AltAz(obstime=obstime, location=location) altaz = SkyCoord('143.2970d 2.6223d', frame=altaz_frame) radec_actual = altaz.transform_to('icrs') radec_expected = SkyCoord('19h24m55.01s -40d56m28.9s', frame='icrs') distance = radec_actual.separation(radec_expected).to('arcsec') # Current value: 0.238111 arcsec assert distance < 1 * u.arcsec @pytest.mark.remote_data @pytest.mark.xfail(reason="Current output is completely incorrect") def test_fk5_equinox_and_epoch_j2000_0_to_topocentric_observed(): """ http://phn.github.io/pytpm/conversions.html#fk5-equinox-and-epoch-j2000-0-to-topocentric-observed """ # Observatory position for `kpno` from here: # http://idlastro.gsfc.nasa.gov/ftp/pro/astro/observatory.pro location = EarthLocation(lon=Angle('-111.598333d'), lat=Angle('31.956389d'), height=2093.093 * u.m) # TODO: height correct? obstime = Time('2010-01-01 12:00:00') # relative_humidity = ? # obswl = ? altaz_frame = AltAz(obstime=obstime, location=location, temperature=0 * u.deg_C, pressure=0.781 * u.bar) radec = SkyCoord('12h22m54.899s 15d49m20.57s', frame='fk5') altaz_actual = radec.transform_to(altaz_frame) altaz_expected = SkyCoord('264d55m06s 37d54m41s', frame='altaz') # altaz_expected = SkyCoord('343.586827647d 15.7683070508d', frame='altaz') # altaz_expected = SkyCoord('133.498195532d 22.0162383595d', frame='altaz') distance = altaz_actual.separation(altaz_expected) # print(altaz_actual) # print(altaz_expected) # print(distance) """TODO: Current output is completely incorrect ... xfailing this test for now. <SkyCoord (AltAz: obstime=2010-01-01 12:00:00.000, location=(-1994497.7199061865, -5037954.447348028, 3357437.2294832403) m, pressure=781.0 hPa, temperature=0.0 deg_C, relative_humidity=0, obswl=1.0 micron):00:00.000, location=(-1994497.7199061865, -5037954.447348028, 3357437.2294832403) m, pressure=781.0 hPa, temperature=0.0 deg_C, relative_humidity=0, obswl=1.0 micron): az=133.4869896371561 deg, alt=67.97857990957701 deg> <SkyCoord (AltAz: obstime=None, location=None, pressure=0.0 hPa, temperature=0.0 deg_C, relative_humidity=0, obswl=1.0 micron): az=264.91833333333335 deg, alt=37.91138888888889 deg> 68d02m45.732s """ assert distance < 1 * u.arcsec
0a93b6c07c04465f570c5d22a26c1c923b6b4c4cc5414b34611901be5ff1e72f
# Licensed under a 3-clause BSD style license - see PYFITS.rst import bz2 import gzip import errno import http.client import mmap import operator import pathlib import io import os import sys import tempfile import warnings import zipfile import re from functools import reduce import numpy as np from .util import (isreadable, iswritable, isfile, fileobj_open, fileobj_name, fileobj_closed, fileobj_mode, _array_from_file, _array_to_file, _write_string) from astropy.utils.data import download_file, _is_url from astropy.utils.decorators import classproperty, deprecated_renamed_argument from astropy.utils.exceptions import AstropyUserWarning # Maps astropy.io.fits-specific file mode names to the appropriate file # modes to use for the underlying raw files IO_FITS_MODES = { 'readonly': 'rb', 'copyonwrite': 'rb', 'update': 'rb+', 'append': 'ab+', 'ostream': 'wb', 'denywrite': 'rb'} # Maps OS-level file modes to the appropriate astropy.io.fits specific mode # to use when given file objects but no mode specified; obviously in # IO_FITS_MODES there are overlaps; for example 'readonly' and 'denywrite' # both require the file to be opened in 'rb' mode. But 'readonly' is the # default behavior for such files if not otherwise specified. # Note: 'ab' is only supported for 'ostream' which is output-only. FILE_MODES = { 'rb': 'readonly', 'rb+': 'update', 'wb': 'ostream', 'wb+': 'update', 'ab': 'ostream', 'ab+': 'append'} # A match indicates the file was opened in text mode, which is not allowed TEXT_RE = re.compile(r'^[rwa]((t?\+?)|(\+?t?))$') # readonly actually uses copyonwrite for mmap so that readonly without mmap and # with mmap still have to same behavior with regard to updating the array. To # get a truly readonly mmap use denywrite # the name 'denywrite' comes from a deprecated flag to mmap() on Linux--it # should be clarified that 'denywrite' mode is not directly analogous to the # use of that flag; it was just taken, for lack of anything better, as a name # that means something like "read only" but isn't readonly. MEMMAP_MODES = {'readonly': mmap.ACCESS_COPY, 'copyonwrite': mmap.ACCESS_COPY, 'update': mmap.ACCESS_WRITE, 'append': mmap.ACCESS_COPY, 'denywrite': mmap.ACCESS_READ} # TODO: Eventually raise a warning, and maybe even later disable the use of # 'copyonwrite' and 'denywrite' modes unless memmap=True. For now, however, # that would generate too many warnings for too many users. If nothing else, # wait until the new logging system is in place. GZIP_MAGIC = b'\x1f\x8b\x08' PKZIP_MAGIC = b'\x50\x4b\x03\x04' BZIP2_MAGIC = b'\x42\x5a' def _normalize_fits_mode(mode): if mode is not None and mode not in IO_FITS_MODES: if TEXT_RE.match(mode): raise ValueError( "Text mode '{}' not supported: " "files must be opened in binary mode".format(mode)) new_mode = FILE_MODES.get(mode) if new_mode not in IO_FITS_MODES: raise ValueError("Mode '{}' not recognized".format(mode)) mode = new_mode return mode class _File: """ Represents a FITS file on disk (or in some other file-like object). """ @deprecated_renamed_argument('clobber', 'overwrite', '2.0') def __init__(self, fileobj=None, mode=None, memmap=None, overwrite=False, cache=True): self.strict_memmap = bool(memmap) memmap = True if memmap is None else memmap if fileobj is None: self._file = None self.closed = False self.binary = True self.mode = mode self.memmap = memmap self.compression = None self.readonly = False self.writeonly = False self.simulateonly = True self.close_on_error = False return else: self.simulateonly = False # If fileobj is of type pathlib.Path if isinstance(fileobj, pathlib.Path): fileobj = str(fileobj) elif isinstance(fileobj, bytes): # Using bytes as filename is tricky, it's deprecated for Windows # in Python 3.5 (because it could lead to false-positives) but # was fixed and un-deprecated in Python 3.6. # However it requires that the bytes object is encoded with the # file system encoding. # Probably better to error out and ask for a str object instead. # TODO: This could be revised when Python 3.5 support is dropped # See also: https://github.com/astropy/astropy/issues/6789 raise TypeError("names should be `str` not `bytes`.") # Holds mmap instance for files that use mmap self._mmap = None if mode is not None and mode not in IO_FITS_MODES: raise ValueError("Mode '{}' not recognized".format(mode)) if isfile(fileobj): objmode = _normalize_fits_mode(fileobj_mode(fileobj)) if mode is not None and mode != objmode: raise ValueError( "Requested FITS mode '{}' not compatible with open file " "handle mode '{}'".format(mode, objmode)) mode = objmode if mode is None: mode = 'readonly' # Handle raw URLs if (isinstance(fileobj, str) and mode not in ('ostream', 'append', 'update') and _is_url(fileobj)): self.name = download_file(fileobj, cache=cache) # Handle responses from URL requests that have already been opened elif isinstance(fileobj, http.client.HTTPResponse): if mode in ('ostream', 'append', 'update'): raise ValueError( "Mode {} not supported for HTTPResponse".format(mode)) fileobj = io.BytesIO(fileobj.read()) else: self.name = fileobj_name(fileobj) self.closed = False self.binary = True self.mode = mode self.memmap = memmap # Underlying fileobj is a file-like object, but an actual file object self.file_like = False # Should the object be closed on error: see # https://github.com/astropy/astropy/issues/6168 self.close_on_error = False # More defaults to be adjusted below as necessary self.compression = None self.readonly = False self.writeonly = False # Initialize the internal self._file object if isfile(fileobj): self._open_fileobj(fileobj, mode, overwrite) elif isinstance(fileobj, str): self._open_filename(fileobj, mode, overwrite) else: self._open_filelike(fileobj, mode, overwrite) self.fileobj_mode = fileobj_mode(self._file) if isinstance(fileobj, gzip.GzipFile): self.compression = 'gzip' elif isinstance(fileobj, zipfile.ZipFile): # Reading from zip files is supported but not writing (yet) self.compression = 'zip' elif isinstance(fileobj, bz2.BZ2File): self.compression = 'bzip2' if (mode in ('readonly', 'copyonwrite', 'denywrite') or (self.compression and mode == 'update')): self.readonly = True elif (mode == 'ostream' or (self.compression and mode == 'append')): self.writeonly = True # For 'ab+' mode, the pointer is at the end after the open in # Linux, but is at the beginning in Solaris. if (mode == 'ostream' or self.compression or not hasattr(self._file, 'seek')): # For output stream start with a truncated file. # For compressed files we can't really guess at the size self.size = 0 else: pos = self._file.tell() self._file.seek(0, 2) self.size = self._file.tell() self._file.seek(pos) if self.memmap: if not isfile(self._file): self.memmap = False elif not self.readonly and not self._mmap_available: # Test mmap.flush--see # https://github.com/astropy/astropy/issues/968 self.memmap = False def __repr__(self): return '<{}.{} {}>'.format(self.__module__, self.__class__.__name__, self._file) # Support the 'with' statement def __enter__(self): return self def __exit__(self, type, value, traceback): self.close() def readable(self): if self.writeonly: return False return isreadable(self._file) def read(self, size=None): if not hasattr(self._file, 'read'): raise EOFError try: return self._file.read(size) except OSError: # On some versions of Python, it appears, GzipFile will raise an # OSError if you try to read past its end (as opposed to just # returning '') if self.compression == 'gzip': return '' raise def readarray(self, size=None, offset=0, dtype=np.uint8, shape=None): """ Similar to file.read(), but returns the contents of the underlying file as a numpy array (or mmap'd array if memmap=True) rather than a string. Usually it's best not to use the `size` argument with this method, but it's provided for compatibility. """ if not hasattr(self._file, 'read'): raise EOFError if not isinstance(dtype, np.dtype): dtype = np.dtype(dtype) if size and size % dtype.itemsize != 0: raise ValueError('size {} not a multiple of {}'.format(size, dtype)) if isinstance(shape, int): shape = (shape,) if not (size or shape): warnings.warn('No size or shape given to readarray(); assuming a ' 'shape of (1,)', AstropyUserWarning) shape = (1,) if size and not shape: shape = (size // dtype.itemsize,) if size and shape: actualsize = np.prod(shape) * dtype.itemsize if actualsize > size: raise ValueError('size {} is too few bytes for a {} array of ' '{}'.format(size, shape, dtype)) elif actualsize < size: raise ValueError('size {} is too many bytes for a {} array of ' '{}'.format(size, shape, dtype)) filepos = self._file.tell() try: if self.memmap: if self._mmap is None: # Instantiate Memmap array of the file offset at 0 (so we # can return slices of it to offset anywhere else into the # file) access_mode = MEMMAP_MODES[self.mode] # For reasons unknown the file needs to point to (near) # the beginning or end of the file. No idea how close to # the beginning or end. # If I had to guess there is some bug in the mmap module # of CPython or perhaps in microsoft's underlying code # for generating the mmap. self._file.seek(0, 0) # This would also work: # self._file.seek(0, 2) # moves to the end try: self._mmap = mmap.mmap(self._file.fileno(), 0, access=access_mode, offset=0) except OSError as exc: # NOTE: mode='readonly' results in the memory-mapping # using the ACCESS_COPY mode in mmap so that users can # modify arrays. However, on some systems, the OS raises # a '[Errno 12] Cannot allocate memory' OSError if the # address space is smaller than the file. The solution # is to open the file in mode='denywrite', which at # least allows the file to be opened even if the # resulting arrays will be truly read-only. if exc.errno == errno.ENOMEM and self.mode == 'readonly': warnings.warn("Could not memory map array with " "mode='readonly', falling back to " "mode='denywrite', which means that " "the array will be read-only", AstropyUserWarning) self._mmap = mmap.mmap(self._file.fileno(), 0, access=MEMMAP_MODES['denywrite'], offset=0) else: raise return np.ndarray(shape=shape, dtype=dtype, offset=offset, buffer=self._mmap) else: count = reduce(operator.mul, shape) self._file.seek(offset) data = _array_from_file(self._file, dtype, count) data.shape = shape return data finally: # Make sure we leave the file in the position we found it; on # some platforms (e.g. Windows) mmaping a file handle can also # reset its file pointer self._file.seek(filepos) def writable(self): if self.readonly: return False return iswritable(self._file) def write(self, string): if hasattr(self._file, 'write'): _write_string(self._file, string) def writearray(self, array): """ Similar to file.write(), but writes a numpy array instead of a string. Also like file.write(), a flush() or close() may be needed before the file on disk reflects the data written. """ if hasattr(self._file, 'write'): _array_to_file(array, self._file) def flush(self): if hasattr(self._file, 'flush'): self._file.flush() def seek(self, offset, whence=0): if not hasattr(self._file, 'seek'): return self._file.seek(offset, whence) pos = self._file.tell() if self.size and pos > self.size: warnings.warn('File may have been truncated: actual file length ' '({}) is smaller than the expected size ({})' .format(self.size, pos), AstropyUserWarning) def tell(self): if not hasattr(self._file, 'tell'): raise EOFError return self._file.tell() def truncate(self, size=None): if hasattr(self._file, 'truncate'): self._file.truncate(size) def close(self): """ Close the 'physical' FITS file. """ if hasattr(self._file, 'close'): self._file.close() self._maybe_close_mmap() # Set self._memmap to None anyways since no new .data attributes can be # loaded after the file is closed self._mmap = None self.closed = True self.close_on_error = False def _maybe_close_mmap(self, refcount_delta=0): """ When mmap is in use these objects hold a reference to the mmap of the file (so there is only one, shared by all HDUs that reference this file). This will close the mmap if there are no arrays referencing it. """ if (self._mmap is not None and sys.getrefcount(self._mmap) == 2 + refcount_delta): self._mmap.close() self._mmap = None def _overwrite_existing(self, overwrite, fileobj, closed): """Overwrite an existing file if ``overwrite`` is ``True``, otherwise raise an OSError. The exact behavior of this method depends on the _File object state and is only meant for use within the ``_open_*`` internal methods. """ # The file will be overwritten... if ((self.file_like and hasattr(fileobj, 'len') and fileobj.len > 0) or (os.path.exists(self.name) and os.path.getsize(self.name) != 0)): if overwrite: if self.file_like and hasattr(fileobj, 'truncate'): fileobj.truncate(0) else: if not closed: fileobj.close() os.remove(self.name) else: raise OSError("File {!r} already exists.".format(self.name)) def _try_read_compressed(self, obj_or_name, magic, mode, ext=''): """Attempt to determine if the given file is compressed""" if ext == '.gz' or magic.startswith(GZIP_MAGIC): if mode == 'append': raise OSError("'append' mode is not supported with gzip files." "Use 'update' mode instead") # Handle gzip files kwargs = dict(mode=IO_FITS_MODES[mode]) if isinstance(obj_or_name, str): kwargs['filename'] = obj_or_name else: kwargs['fileobj'] = obj_or_name self._file = gzip.GzipFile(**kwargs) self.compression = 'gzip' elif ext == '.zip' or magic.startswith(PKZIP_MAGIC): # Handle zip files self._open_zipfile(self.name, mode) self.compression = 'zip' elif ext == '.bz2' or magic.startswith(BZIP2_MAGIC): # Handle bzip2 files if mode in ['update', 'append']: raise OSError("update and append modes are not supported " "with bzip2 files") # bzip2 only supports 'w' and 'r' modes bzip2_mode = 'w' if mode == 'ostream' else 'r' self._file = bz2.BZ2File(obj_or_name, mode=bzip2_mode) self.compression = 'bzip2' return self.compression is not None def _open_fileobj(self, fileobj, mode, overwrite): """Open a FITS file from a file object (including compressed files).""" closed = fileobj_closed(fileobj) fmode = fileobj_mode(fileobj) or IO_FITS_MODES[mode] if mode == 'ostream': self._overwrite_existing(overwrite, fileobj, closed) if not closed: self._file = fileobj elif isfile(fileobj): self._file = fileobj_open(self.name, IO_FITS_MODES[mode]) # Attempt to determine if the file represented by the open file object # is compressed try: # We need to account for the possibility that the underlying file # handle may have been opened with either 'ab' or 'ab+', which # means that the current file position is at the end of the file. if mode in ['ostream', 'append']: self._file.seek(0) magic = self._file.read(4) # No matter whether the underlying file was opened with 'ab' or # 'ab+', we need to return to the beginning of the file in order # to properly process the FITS header (and handle the possibility # of a compressed file). self._file.seek(0) except (OSError,OSError): return self._try_read_compressed(fileobj, magic, mode) def _open_filelike(self, fileobj, mode, overwrite): """Open a FITS file from a file-like object, i.e. one that has read and/or write methods. """ self.file_like = True self._file = fileobj if fileobj_closed(fileobj): raise OSError("Cannot read from/write to a closed file-like " "object ({!r}).".format(fileobj)) if isinstance(fileobj, zipfile.ZipFile): self._open_zipfile(fileobj, mode) # We can bypass any additional checks at this point since now # self._file points to the temp file extracted from the zip return # If there is not seek or tell methods then set the mode to # output streaming. if (not hasattr(self._file, 'seek') or not hasattr(self._file, 'tell')): self.mode = mode = 'ostream' if mode == 'ostream': self._overwrite_existing(overwrite, fileobj, False) # Any "writeable" mode requires a write() method on the file object if (self.mode in ('update', 'append', 'ostream') and not hasattr(self._file, 'write')): raise OSError("File-like object does not have a 'write' " "method, required for mode '{}'.".format(self.mode)) # Any mode except for 'ostream' requires readability if self.mode != 'ostream' and not hasattr(self._file, 'read'): raise OSError("File-like object does not have a 'read' " "method, required for mode {!r}.".format(self.mode)) def _open_filename(self, filename, mode, overwrite): """Open a FITS file from a filename string.""" if mode == 'ostream': self._overwrite_existing(overwrite, None, True) if os.path.exists(self.name): with fileobj_open(self.name, 'rb') as f: magic = f.read(4) else: magic = b'' ext = os.path.splitext(self.name)[1] if not self._try_read_compressed(self.name, magic, mode, ext=ext): self._file = fileobj_open(self.name, IO_FITS_MODES[mode]) self.close_on_error = True # Make certain we're back at the beginning of the file # BZ2File does not support seek when the file is open for writing, but # when opening a file for write, bz2.BZ2File always truncates anyway. if not (isinstance(self._file, bz2.BZ2File) and mode == 'ostream'): self._file.seek(0) @classproperty(lazy=True) def _mmap_available(cls): """Tests that mmap, and specifically mmap.flush works. This may be the case on some uncommon platforms (see https://github.com/astropy/astropy/issues/968). If mmap.flush is found not to work, ``self.memmap = False`` is set and a warning is issued. """ tmpfd, tmpname = tempfile.mkstemp() try: # Windows does not allow mappings on empty files os.write(tmpfd, b' ') os.fsync(tmpfd) try: mm = mmap.mmap(tmpfd, 1, access=mmap.ACCESS_WRITE) except OSError as exc: warnings.warn('Failed to create mmap: {}; mmap use will be ' 'disabled'.format(str(exc)), AstropyUserWarning) del exc return False try: mm.flush() except OSError: warnings.warn('mmap.flush is unavailable on this platform; ' 'using mmap in writeable mode will be disabled', AstropyUserWarning) return False finally: mm.close() finally: os.close(tmpfd) os.remove(tmpname) return True def _open_zipfile(self, fileobj, mode): """Limited support for zipfile.ZipFile objects containing a single a file. Allows reading only for now by extracting the file to a tempfile. """ if mode in ('update', 'append'): raise OSError( "Writing to zipped fits files is not currently " "supported") if not isinstance(fileobj, zipfile.ZipFile): zfile = zipfile.ZipFile(fileobj) close = True else: zfile = fileobj close = False namelist = zfile.namelist() if len(namelist) != 1: raise OSError( "Zip files with multiple members are not supported.") self._file = tempfile.NamedTemporaryFile(suffix='.fits') self._file.write(zfile.read(namelist[0])) if close: zfile.close() # We just wrote the contents of the first file in the archive to a new # temp file, which now serves as our underlying file object. So it's # necessary to reset the position back to the beginning self._file.seek(0)
a7ad88efcb9e47cf2b00ddb091984aee775acb2a6de7a123c5f1a0de801f203d
# Licensed under a 3-clause BSD style license - see LICENSE.rst import os import re import warnings from collections import OrderedDict from astropy.io import registry as io_registry from astropy import units as u from astropy.table import Table, serialize, meta, Column, MaskedColumn from astropy.table.table import has_info_class from astropy.time import Time from astropy.utils.exceptions import AstropyUserWarning from astropy.utils.data_info import MixinInfo, serialize_context_as from . import HDUList, TableHDU, BinTableHDU, GroupsHDU from .column import KEYWORD_NAMES, _fortran_to_python_format from .convenience import table_to_hdu from .hdu.hdulist import fitsopen as fits_open from .util import first # FITS file signature as per RFC 4047 FITS_SIGNATURE = (b"\x53\x49\x4d\x50\x4c\x45\x20\x20\x3d\x20\x20\x20\x20\x20" b"\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20" b"\x20\x54") # Keywords to remove for all tables that are read in REMOVE_KEYWORDS = ['XTENSION', 'BITPIX', 'NAXIS', 'NAXIS1', 'NAXIS2', 'PCOUNT', 'GCOUNT', 'TFIELDS', 'THEAP'] # Column-specific keywords regex COLUMN_KEYWORD_REGEXP = '(' + '|'.join(KEYWORD_NAMES) + ')[0-9]+' def is_column_keyword(keyword): return re.match(COLUMN_KEYWORD_REGEXP, keyword) is not None def is_fits(origin, filepath, fileobj, *args, **kwargs): """ Determine whether `origin` is a FITS file. Parameters ---------- origin : str or readable file-like object Path or file object containing a potential FITS file. Returns ------- is_fits : bool Returns `True` if the given file is a FITS file. """ if fileobj is not None: pos = fileobj.tell() sig = fileobj.read(30) fileobj.seek(pos) return sig == FITS_SIGNATURE elif filepath is not None: if filepath.lower().endswith(('.fits', '.fits.gz', '.fit', '.fit.gz', '.fts', '.fts.gz')): return True elif isinstance(args[0], (HDUList, TableHDU, BinTableHDU, GroupsHDU)): return True else: return False def _decode_mixins(tbl): """Decode a Table ``tbl`` that has astropy Columns + appropriate meta-data into the corresponding table with mixin columns (as appropriate). """ # If available read in __serialized_columns__ meta info which is stored # in FITS COMMENTS between two sentinels. try: i0 = tbl.meta['comments'].index('--BEGIN-ASTROPY-SERIALIZED-COLUMNS--') i1 = tbl.meta['comments'].index('--END-ASTROPY-SERIALIZED-COLUMNS--') except (ValueError, KeyError): return tbl # The YAML data are split into COMMENT cards, with lines longer than 70 # characters being split with a continuation character \ (backslash). # Strip the backslashes and join together. continuation_line = False lines = [] for line in tbl.meta['comments'][i0 + 1:i1]: if continuation_line: lines[-1] = lines[-1] + line[:70] else: lines.append(line[:70]) continuation_line = len(line) == 71 del tbl.meta['comments'][i0:i1 + 1] if not tbl.meta['comments']: del tbl.meta['comments'] info = meta.get_header_from_yaml(lines) # Add serialized column information to table meta for use in constructing mixins tbl.meta['__serialized_columns__'] = info['meta']['__serialized_columns__'] # Use the `datatype` attribute info to update column attributes that are # NOT already handled via standard FITS column keys (name, dtype, unit). for col in info['datatype']: for attr in ['description', 'meta']: if attr in col: setattr(tbl[col['name']].info, attr, col[attr]) # Construct new table with mixins, using tbl.meta['__serialized_columns__'] # as guidance. tbl = serialize._construct_mixins_from_columns(tbl) return tbl def read_table_fits(input, hdu=None, astropy_native=False, memmap=False, character_as_bytes=True): """ Read a Table object from an FITS file If the ``astropy_native`` argument is ``True``, then input FITS columns which are representations of an astropy core object will be converted to that class and stored in the ``Table`` as "mixin columns". Currently this is limited to FITS columns which adhere to the FITS Time standard, in which case they will be converted to a `~astropy.time.Time` column in the output table. Parameters ---------- input : str or file-like object or compatible `astropy.io.fits` HDU object If a string, the filename to read the table from. If a file object, or a compatible HDU object, the object to extract the table from. The following `astropy.io.fits` HDU objects can be used as input: - :class:`~astropy.io.fits.hdu.table.TableHDU` - :class:`~astropy.io.fits.hdu.table.BinTableHDU` - :class:`~astropy.io.fits.hdu.table.GroupsHDU` - :class:`~astropy.io.fits.hdu.hdulist.HDUList` hdu : int or str, optional The HDU to read the table from. astropy_native : bool, optional Read in FITS columns as native astropy objects where possible instead of standard Table Column objects. Default is False. memmap : bool, optional Whether to use memory mapping, which accesses data on disk as needed. If you are only accessing part of the data, this is often more efficient. If you want to access all the values in the table, and you are able to fit the table in memory, you may be better off leaving memory mapping off. However, if your table would not fit in memory, you should set this to `True`. character_as_bytes : bool, optional If `True`, string columns are stored as Numpy byte arrays (dtype ``S``) and are converted on-the-fly to unicode strings when accessing individual elements. If you need to use Numpy unicode arrays (dtype ``U``) internally, you should set this to `False`, but note that this will use more memory. If set to `False`, string columns will not be memory-mapped even if ``memmap`` is `True`. """ if isinstance(input, HDUList): # Parse all table objects tables = OrderedDict() for ihdu, hdu_item in enumerate(input): if isinstance(hdu_item, (TableHDU, BinTableHDU, GroupsHDU)): tables[ihdu] = hdu_item if len(tables) > 1: if hdu is None: warnings.warn("hdu= was not specified but multiple tables" " are present, reading in first available" " table (hdu={0})".format(first(tables)), AstropyUserWarning) hdu = first(tables) # hdu might not be an integer, so we first need to convert it # to the correct HDU index hdu = input.index_of(hdu) if hdu in tables: table = tables[hdu] else: raise ValueError("No table found in hdu={0}".format(hdu)) elif len(tables) == 1: table = tables[first(tables)] else: raise ValueError("No table found") elif isinstance(input, (TableHDU, BinTableHDU, GroupsHDU)): table = input else: hdulist = fits_open(input, character_as_bytes=character_as_bytes, memmap=memmap) try: return read_table_fits(hdulist, hdu=hdu, astropy_native=astropy_native) finally: hdulist.close() # Check if table is masked masked = any(col.null is not None for col in table.columns) # TODO: in future, it may make more sense to do this column-by-column, # rather than via the structured array. # In the loop below we access the data using data[col.name] rather than # col.array to make sure that the data is scaled correctly if needed. data = table.data columns = [] for col in data.columns: # Set column data if masked: column = MaskedColumn(data=data[col.name], name=col.name, copy=False) if col.null is not None: column.set_fill_value(col.null) column.mask[column.data == col.null] = True else: column = Column(data=data[col.name], name=col.name, copy=False) # Copy over units if col.unit is not None: column.unit = u.Unit(col.unit, format='fits', parse_strict='silent') # Copy over display format if col.disp is not None: column.format = _fortran_to_python_format(col.disp) columns.append(column) # Create Table object t = Table(columns, masked=masked, copy=False) # TODO: deal properly with unsigned integers hdr = table.header if astropy_native: # Avoid circular imports, and also only import if necessary. from .fitstime import fits_to_time hdr = fits_to_time(hdr, t) for key, value, comment in hdr.cards: if key in ['COMMENT', 'HISTORY']: # Convert to io.ascii format if key == 'COMMENT': key = 'comments' if key in t.meta: t.meta[key].append(value) else: t.meta[key] = [value] elif key in t.meta: # key is duplicate if isinstance(t.meta[key], list): t.meta[key].append(value) else: t.meta[key] = [t.meta[key], value] elif is_column_keyword(key) or key in REMOVE_KEYWORDS: pass else: t.meta[key] = value # TODO: implement masking # Decode any mixin columns that have been stored as standard Columns. t = _decode_mixins(t) return t def _encode_mixins(tbl): """Encode a Table ``tbl`` that may have mixin columns to a Table with only astropy Columns + appropriate meta-data to allow subsequent decoding. """ # Determine if information will be lost without serializing meta. This is hardcoded # to the set difference between column info attributes and what FITS can store # natively (name, dtype, unit). See _get_col_attributes() in table/meta.py for where # this comes from. info_lost = any(any(getattr(col.info, attr, None) not in (None, {}) for attr in ('description', 'meta')) for col in tbl.itercols()) # If PyYAML is not available then check to see if there are any mixin cols # that *require* YAML serialization. FITS already has support for Time, # Quantity, so if those are the only mixins the proceed without doing the # YAML bit, for backward compatibility (i.e. not requiring YAML to write # Time or Quantity). In this case other mixin column meta (e.g. # description or meta) will be silently dropped, consistent with astropy <= # 2.0 behavior. try: import yaml # noqa except ImportError: for col in tbl.itercols(): if (has_info_class(col, MixinInfo) and col.__class__ not in (u.Quantity, Time)): raise TypeError("cannot write type {} column '{}' " "to FITS without PyYAML installed." .format(col.__class__.__name__, col.info.name)) else: if info_lost: warnings.warn("table contains column(s) with defined 'format'," " 'description', or 'meta' info attributes. These" " will be dropped unless you install PyYAML.", AstropyUserWarning) return tbl # Convert the table to one with no mixins, only Column objects. This adds # meta data which is extracted with meta.get_yaml_from_table. This ignores # Time-subclass columns and leave them in the table so that the downstream # FITS Time handling does the right thing. with serialize_context_as('fits'): encode_tbl = serialize.represent_mixins_as_columns( tbl, exclude_classes=(Time,)) # If the encoded table is unchanged then there were no mixins. But if there # is column metadata (format, description, meta) that would be lost, then # still go through the serialized columns machinery. if encode_tbl is tbl and not info_lost: return tbl # Get the YAML serialization of information describing the table columns. # This is re-using ECSV code that combined existing table.meta with with # the extra __serialized_columns__ key. For FITS the table.meta is handled # by the native FITS connect code, so don't include that in the YAML # output. ser_col = '__serialized_columns__' # encode_tbl might not have a __serialized_columns__ key if there were no mixins, # but machinery below expects it to be available, so just make an empty dict. encode_tbl.meta.setdefault(ser_col, {}) tbl_meta_copy = encode_tbl.meta.copy() try: encode_tbl.meta = {ser_col: encode_tbl.meta[ser_col]} meta_yaml_lines = meta.get_yaml_from_table(encode_tbl) finally: encode_tbl.meta = tbl_meta_copy del encode_tbl.meta[ser_col] if 'comments' not in encode_tbl.meta: encode_tbl.meta['comments'] = [] encode_tbl.meta['comments'].append('--BEGIN-ASTROPY-SERIALIZED-COLUMNS--') for line in meta_yaml_lines: if len(line) == 0: lines = [''] else: # Split line into 70 character chunks for COMMENT cards idxs = list(range(0, len(line) + 70, 70)) lines = [line[i0:i1] + '\\' for i0, i1 in zip(idxs[:-1], idxs[1:])] lines[-1] = lines[-1][:-1] encode_tbl.meta['comments'].extend(lines) encode_tbl.meta['comments'].append('--END-ASTROPY-SERIALIZED-COLUMNS--') return encode_tbl def write_table_fits(input, output, overwrite=False): """ Write a Table object to a FITS file Parameters ---------- input : Table The table to write out. output : str The filename to write the table to. overwrite : bool Whether to overwrite any existing file without warning. """ # Encode any mixin columns into standard Columns. input = _encode_mixins(input) table_hdu = table_to_hdu(input, character_as_bytes=True) # Check if output file already exists if isinstance(output, str) and os.path.exists(output): if overwrite: os.remove(output) else: raise OSError("File exists: {0}".format(output)) table_hdu.writeto(output) io_registry.register_reader('fits', Table, read_table_fits) io_registry.register_writer('fits', Table, write_table_fits) io_registry.register_identifier('fits', Table, is_fits)
47ead76c5e59091a327187103fe53224094304d3dd10e004bdfcb24b2fda5022
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Facilities for diffing two FITS files. Includes objects for diffing entire FITS files, individual HDUs, FITS headers, or just FITS data. Used to implement the fitsdiff program. """ import fnmatch import glob import io import operator import os.path import textwrap import warnings from collections import defaultdict from inspect import signature from itertools import islice import numpy as np from astropy import __version__ from .card import Card, BLANK_CARD from .header import Header from astropy.utils.decorators import deprecated_renamed_argument # HDUList is used in one of the doctests from .hdu.hdulist import fitsopen, HDUList # pylint: disable=W0611 from .hdu.table import _TableLikeHDU from astropy.utils.exceptions import AstropyDeprecationWarning from astropy.utils.diff import (report_diff_values, fixed_width_indent, where_not_allclose, diff_values) __all__ = ['FITSDiff', 'HDUDiff', 'HeaderDiff', 'ImageDataDiff', 'RawDataDiff', 'TableDataDiff'] # Column attributes of interest for comparison _COL_ATTRS = [('unit', 'units'), ('null', 'null values'), ('bscale', 'bscales'), ('bzero', 'bzeros'), ('disp', 'display formats'), ('dim', 'dimensions')] class _BaseDiff: """ Base class for all FITS diff objects. When instantiating a FITS diff object, the first two arguments are always the two objects to diff (two FITS files, two FITS headers, etc.). Instantiating a ``_BaseDiff`` also causes the diff itself to be executed. The returned ``_BaseDiff`` instance has a number of attribute that describe the results of the diff operation. The most basic attribute, present on all ``_BaseDiff`` instances, is ``.identical`` which is `True` if the two objects being compared are identical according to the diff method for objects of that type. """ def __init__(self, a, b): """ The ``_BaseDiff`` class does not implement a ``_diff`` method and should not be instantiated directly. Instead instantiate the appropriate subclass of ``_BaseDiff`` for the objects being compared (for example, use `HeaderDiff` to compare two `Header` objects. """ self.a = a self.b = b # For internal use in report output self._fileobj = None self._indent = 0 self._diff() def __bool__(self): """ A ``_BaseDiff`` object acts as `True` in a boolean context if the two objects compared are identical. Otherwise it acts as `False`. """ return not self.identical @classmethod def fromdiff(cls, other, a, b): """ Returns a new Diff object of a specific subclass from an existing diff object, passing on the values for any arguments they share in common (such as ignore_keywords). For example:: >>> from astropy.io import fits >>> hdul1, hdul2 = fits.HDUList(), fits.HDUList() >>> headera, headerb = fits.Header(), fits.Header() >>> fd = fits.FITSDiff(hdul1, hdul2, ignore_keywords=['*']) >>> hd = fits.HeaderDiff.fromdiff(fd, headera, headerb) >>> list(hd.ignore_keywords) ['*'] """ sig = signature(cls.__init__) # The first 3 arguments of any Diff initializer are self, a, and b. kwargs = {} for arg in list(sig.parameters.keys())[3:]: if hasattr(other, arg): kwargs[arg] = getattr(other, arg) return cls(a, b, **kwargs) @property def identical(self): """ `True` if all the ``.diff_*`` attributes on this diff instance are empty, implying that no differences were found. Any subclass of ``_BaseDiff`` must have at least one ``.diff_*`` attribute, which contains a non-empty value if and only if some difference was found between the two objects being compared. """ return not any(getattr(self, attr) for attr in self.__dict__ if attr.startswith('diff_')) @deprecated_renamed_argument('clobber', 'overwrite', '2.0') def report(self, fileobj=None, indent=0, overwrite=False): """ Generates a text report on the differences (if any) between two objects, and either returns it as a string or writes it to a file-like object. Parameters ---------- fileobj : file-like object, string, or None (optional) If `None`, this method returns the report as a string. Otherwise it returns `None` and writes the report to the given file-like object (which must have a ``.write()`` method at a minimum), or to a new file at the path specified. indent : int The number of 4 space tabs to indent the report. overwrite : bool, optional If ``True``, overwrite the output file if it exists. Raises an ``OSError`` if ``False`` and the output file exists. Default is ``False``. .. versionchanged:: 1.3 ``overwrite`` replaces the deprecated ``clobber`` argument. Returns ------- report : str or None """ return_string = False filepath = None if isinstance(fileobj, str): if os.path.exists(fileobj) and not overwrite: raise OSError("File {0} exists, aborting (pass in " "overwrite=True to overwrite)".format(fileobj)) else: filepath = fileobj fileobj = open(filepath, 'w') elif fileobj is None: fileobj = io.StringIO() return_string = True self._fileobj = fileobj self._indent = indent # This is used internally by _writeln try: self._report() finally: if filepath: fileobj.close() if return_string: return fileobj.getvalue() def _writeln(self, text): self._fileobj.write(fixed_width_indent(text, self._indent) + '\n') def _diff(self): raise NotImplementedError def _report(self): raise NotImplementedError class FITSDiff(_BaseDiff): """Diff two FITS files by filename, or two `HDUList` objects. `FITSDiff` objects have the following diff attributes: - ``diff_hdu_count``: If the FITS files being compared have different numbers of HDUs, this contains a 2-tuple of the number of HDUs in each file. - ``diff_hdus``: If any HDUs with the same index are different, this contains a list of 2-tuples of the HDU index and the `HDUDiff` object representing the differences between the two HDUs. """ def __init__(self, a, b, ignore_hdus=[], ignore_keywords=[], ignore_comments=[], ignore_fields=[], numdiffs=10, rtol=0.0, atol=0.0, ignore_blanks=True, ignore_blank_cards=True, tolerance=None): """ Parameters ---------- a : str or `HDUList` The filename of a FITS file on disk, or an `HDUList` object. b : str or `HDUList` The filename of a FITS file on disk, or an `HDUList` object to compare to the first file. ignore_hdus : sequence, optional HDU names to ignore when comparing two FITS files or HDU lists; the presence of these HDUs and their contents are ignored. Wildcard strings may also be included in the list. ignore_keywords : sequence, optional Header keywords to ignore when comparing two headers; the presence of these keywords and their values are ignored. Wildcard strings may also be included in the list. ignore_comments : sequence, optional A list of header keywords whose comments should be ignored in the comparison. May contain wildcard strings as with ignore_keywords. ignore_fields : sequence, optional The (case-insensitive) names of any table columns to ignore if any table data is to be compared. numdiffs : int, optional The number of pixel/table values to output when reporting HDU data differences. Though the count of differences is the same either way, this allows controlling the number of different values that are kept in memory or output. If a negative value is given, then numdiffs is treated as unlimited (default: 10). rtol : float, optional The relative difference to allow when comparing two float values either in header values, image arrays, or table columns (default: 0.0). Values which satisfy the expression .. math:: \\left| a - b \\right| > \\text{atol} + \\text{rtol} \\cdot \\left| b \\right| are considered to be different. The underlying function used for comparison is `numpy.allclose`. .. versionchanged:: 2.0 ``rtol`` replaces the deprecated ``tolerance`` argument. atol : float, optional The allowed absolute difference. See also ``rtol`` parameter. .. versionadded:: 2.0 ignore_blanks : bool, optional Ignore extra whitespace at the end of string values either in headers or data. Extra leading whitespace is not ignored (default: True). ignore_blank_cards : bool, optional Ignore all cards that are blank, i.e. they only contain whitespace (default: True). """ if isinstance(a, str): try: a = fitsopen(a) except Exception as exc: raise OSError("error opening file a ({}): {}: {}".format( a, exc.__class__.__name__, exc.args[0])) close_a = True else: close_a = False if isinstance(b, str): try: b = fitsopen(b) except Exception as exc: raise OSError("error opening file b ({}): {}: {}".format( b, exc.__class__.__name__, exc.args[0])) close_b = True else: close_b = False # Normalize keywords/fields to ignore to upper case self.ignore_hdus = set(k.upper() for k in ignore_hdus) self.ignore_keywords = set(k.upper() for k in ignore_keywords) self.ignore_comments = set(k.upper() for k in ignore_comments) self.ignore_fields = set(k.upper() for k in ignore_fields) self.numdiffs = numdiffs self.rtol = rtol self.atol = atol if tolerance is not None: # This should be removed in the next astropy version warnings.warn( '"tolerance" was deprecated in version 2.0 and will be removed in ' 'a future version. Use argument "rtol" instead.', AstropyDeprecationWarning) self.rtol = tolerance # when tolerance is provided *always* ignore `rtol` # during the transition/deprecation period self.ignore_blanks = ignore_blanks self.ignore_blank_cards = ignore_blank_cards # Some hdu names may be pattern wildcards. Find them. self.ignore_hdu_patterns = set() for name in list(self.ignore_hdus): if name != '*' and glob.has_magic(name): self.ignore_hdus.remove(name) self.ignore_hdu_patterns.add(name) self.diff_hdu_count = () self.diff_hdus = [] try: super().__init__(a, b) finally: if close_a: a.close() if close_b: b.close() def _diff(self): if len(self.a) != len(self.b): self.diff_hdu_count = (len(self.a), len(self.b)) # Record filenames for use later in _report self.filenamea = self.a.filename() if not self.filenamea: self.filenamea = '<{} object at {:#x}>'.format( self.a.__class__.__name__, id(self.a)) self.filenameb = self.b.filename() if not self.filenameb: self.filenameb = '<{} object at {:#x}>'.format( self.b.__class__.__name__, id(self.b)) if self.ignore_hdus: self.a = HDUList([h for h in self.a if h.name not in self.ignore_hdus]) self.b = HDUList([h for h in self.b if h.name not in self.ignore_hdus]) if self.ignore_hdu_patterns: a_names = [hdu.name for hdu in self.a] b_names = [hdu.name for hdu in self.b] for pattern in self.ignore_hdu_patterns: self.a = HDUList([h for h in self.a if h.name not in fnmatch.filter( a_names, pattern)]) self.b = HDUList([h for h in self.b if h.name not in fnmatch.filter( b_names, pattern)]) # For now, just compare the extensions one by one in order. # Might allow some more sophisticated types of diffing later. # TODO: Somehow or another simplify the passing around of diff # options--this will become important as the number of options grows for idx in range(min(len(self.a), len(self.b))): hdu_diff = HDUDiff.fromdiff(self, self.a[idx], self.b[idx]) if not hdu_diff.identical: self.diff_hdus.append((idx, hdu_diff)) def _report(self): wrapper = textwrap.TextWrapper(initial_indent=' ', subsequent_indent=' ') self._fileobj.write('\n') self._writeln(' fitsdiff: {}'.format(__version__)) self._writeln(' a: {}\n b: {}'.format(self.filenamea, self.filenameb)) if self.ignore_hdus: ignore_hdus = ' '.join(sorted(self.ignore_hdus)) self._writeln(' HDU(s) not to be compared:\n{}' .format(wrapper.fill(ignore_hdus))) if self.ignore_hdu_patterns: ignore_hdu_patterns = ' '.join(sorted(self.ignore_hdu_patterns)) self._writeln(' HDU(s) not to be compared:\n{}' .format(wrapper.fill(ignore_hdu_patterns))) if self.ignore_keywords: ignore_keywords = ' '.join(sorted(self.ignore_keywords)) self._writeln(' Keyword(s) not to be compared:\n{}' .format(wrapper.fill(ignore_keywords))) if self.ignore_comments: ignore_comments = ' '.join(sorted(self.ignore_comments)) self._writeln(' Keyword(s) whose comments are not to be compared' ':\n{}'.format(wrapper.fill(ignore_comments))) if self.ignore_fields: ignore_fields = ' '.join(sorted(self.ignore_fields)) self._writeln(' Table column(s) not to be compared:\n{}' .format(wrapper.fill(ignore_fields))) self._writeln(' Maximum number of different data values to be ' 'reported: {}'.format(self.numdiffs)) self._writeln(' Relative tolerance: {}, Absolute tolerance: {}' .format(self.rtol, self.atol)) if self.diff_hdu_count: self._fileobj.write('\n') self._writeln('Files contain different numbers of HDUs:') self._writeln(' a: {}'.format(self.diff_hdu_count[0])) self._writeln(' b: {}'.format(self.diff_hdu_count[1])) if not self.diff_hdus: self._writeln('No differences found between common HDUs.') return elif not self.diff_hdus: self._fileobj.write('\n') self._writeln('No differences found.') return for idx, hdu_diff in self.diff_hdus: # print out the extension heading if idx == 0: self._fileobj.write('\n') self._writeln('Primary HDU:') else: self._fileobj.write('\n') self._writeln('Extension HDU {}:'.format(idx)) hdu_diff.report(self._fileobj, indent=self._indent + 1) class HDUDiff(_BaseDiff): """ Diff two HDU objects, including their headers and their data (but only if both HDUs contain the same type of data (image, table, or unknown). `HDUDiff` objects have the following diff attributes: - ``diff_extnames``: If the two HDUs have different EXTNAME values, this contains a 2-tuple of the different extension names. - ``diff_extvers``: If the two HDUS have different EXTVER values, this contains a 2-tuple of the different extension versions. - ``diff_extlevels``: If the two HDUs have different EXTLEVEL values, this contains a 2-tuple of the different extension levels. - ``diff_extension_types``: If the two HDUs have different XTENSION values, this contains a 2-tuple of the different extension types. - ``diff_headers``: Contains a `HeaderDiff` object for the headers of the two HDUs. This will always contain an object--it may be determined whether the headers are different through ``diff_headers.identical``. - ``diff_data``: Contains either a `ImageDataDiff`, `TableDataDiff`, or `RawDataDiff` as appropriate for the data in the HDUs, and only if the two HDUs have non-empty data of the same type (`RawDataDiff` is used for HDUs containing non-empty data of an indeterminate type). """ def __init__(self, a, b, ignore_keywords=[], ignore_comments=[], ignore_fields=[], numdiffs=10, rtol=0.0, atol=0.0, ignore_blanks=True, ignore_blank_cards=True, tolerance=None): """ Parameters ---------- a : `HDUList` An `HDUList` object. b : str or `HDUList` An `HDUList` object to compare to the first `HDUList` object. ignore_keywords : sequence, optional Header keywords to ignore when comparing two headers; the presence of these keywords and their values are ignored. Wildcard strings may also be included in the list. ignore_comments : sequence, optional A list of header keywords whose comments should be ignored in the comparison. May contain wildcard strings as with ignore_keywords. ignore_fields : sequence, optional The (case-insensitive) names of any table columns to ignore if any table data is to be compared. numdiffs : int, optional The number of pixel/table values to output when reporting HDU data differences. Though the count of differences is the same either way, this allows controlling the number of different values that are kept in memory or output. If a negative value is given, then numdiffs is treated as unlimited (default: 10). rtol : float, optional The relative difference to allow when comparing two float values either in header values, image arrays, or table columns (default: 0.0). Values which satisfy the expression .. math:: \\left| a - b \\right| > \\text{atol} + \\text{rtol} \\cdot \\left| b \\right| are considered to be different. The underlying function used for comparison is `numpy.allclose`. .. versionchanged:: 2.0 ``rtol`` replaces the deprecated ``tolerance`` argument. atol : float, optional The allowed absolute difference. See also ``rtol`` parameter. .. versionadded:: 2.0 ignore_blanks : bool, optional Ignore extra whitespace at the end of string values either in headers or data. Extra leading whitespace is not ignored (default: True). ignore_blank_cards : bool, optional Ignore all cards that are blank, i.e. they only contain whitespace (default: True). """ self.ignore_keywords = {k.upper() for k in ignore_keywords} self.ignore_comments = {k.upper() for k in ignore_comments} self.ignore_fields = {k.upper() for k in ignore_fields} self.rtol = rtol self.atol = atol if tolerance is not None: # This should be removed in the next astropy version warnings.warn( '"tolerance" was deprecated in version 2.0 and will be removed in ' 'a future version. Use argument "rtol" instead.', AstropyDeprecationWarning) self.rtol = tolerance # when tolerance is provided *always* ignore `rtol` # during the transition/deprecation period self.numdiffs = numdiffs self.ignore_blanks = ignore_blanks self.diff_extnames = () self.diff_extvers = () self.diff_extlevels = () self.diff_extension_types = () self.diff_headers = None self.diff_data = None super().__init__(a, b) def _diff(self): if self.a.name != self.b.name: self.diff_extnames = (self.a.name, self.b.name) if self.a.ver != self.b.ver: self.diff_extvers = (self.a.ver, self.b.ver) if self.a.level != self.b.level: self.diff_extlevels = (self.a.level, self.b.level) if self.a.header.get('XTENSION') != self.b.header.get('XTENSION'): self.diff_extension_types = (self.a.header.get('XTENSION'), self.b.header.get('XTENSION')) self.diff_headers = HeaderDiff.fromdiff(self, self.a.header.copy(), self.b.header.copy()) if self.a.data is None or self.b.data is None: # TODO: Perhaps have some means of marking this case pass elif self.a.is_image and self.b.is_image: self.diff_data = ImageDataDiff.fromdiff(self, self.a.data, self.b.data) elif (isinstance(self.a, _TableLikeHDU) and isinstance(self.b, _TableLikeHDU)): # TODO: Replace this if/when _BaseHDU grows a .is_table property self.diff_data = TableDataDiff.fromdiff(self, self.a.data, self.b.data) elif not self.diff_extension_types: # Don't diff the data for unequal extension types that are not # recognized image or table types self.diff_data = RawDataDiff.fromdiff(self, self.a.data, self.b.data) def _report(self): if self.identical: self._writeln(" No differences found.") if self.diff_extension_types: self._writeln(" Extension types differ:\n a: {}\n " "b: {}".format(*self.diff_extension_types)) if self.diff_extnames: self._writeln(" Extension names differ:\n a: {}\n " "b: {}".format(*self.diff_extnames)) if self.diff_extvers: self._writeln(" Extension versions differ:\n a: {}\n " "b: {}".format(*self.diff_extvers)) if self.diff_extlevels: self._writeln(" Extension levels differ:\n a: {}\n " "b: {}".format(*self.diff_extlevels)) if not self.diff_headers.identical: self._fileobj.write('\n') self._writeln(" Headers contain differences:") self.diff_headers.report(self._fileobj, indent=self._indent + 1) if self.diff_data is not None and not self.diff_data.identical: self._fileobj.write('\n') self._writeln(" Data contains differences:") self.diff_data.report(self._fileobj, indent=self._indent + 1) class HeaderDiff(_BaseDiff): """ Diff two `Header` objects. `HeaderDiff` objects have the following diff attributes: - ``diff_keyword_count``: If the two headers contain a different number of keywords, this contains a 2-tuple of the keyword count for each header. - ``diff_keywords``: If either header contains one or more keywords that don't appear at all in the other header, this contains a 2-tuple consisting of a list of the keywords only appearing in header a, and a list of the keywords only appearing in header b. - ``diff_duplicate_keywords``: If a keyword appears in both headers at least once, but contains a different number of duplicates (for example, a different number of HISTORY cards in each header), an item is added to this dict with the keyword as the key, and a 2-tuple of the different counts of that keyword as the value. For example:: {'HISTORY': (20, 19)} means that header a contains 20 HISTORY cards, while header b contains only 19 HISTORY cards. - ``diff_keyword_values``: If any of the common keyword between the two headers have different values, they appear in this dict. It has a structure similar to ``diff_duplicate_keywords``, with the keyword as the key, and a 2-tuple of the different values as the value. For example:: {'NAXIS': (2, 3)} means that the NAXIS keyword has a value of 2 in header a, and a value of 3 in header b. This excludes any keywords matched by the ``ignore_keywords`` list. - ``diff_keyword_comments``: Like ``diff_keyword_values``, but contains differences between keyword comments. `HeaderDiff` objects also have a ``common_keywords`` attribute that lists all keywords that appear in both headers. """ def __init__(self, a, b, ignore_keywords=[], ignore_comments=[], rtol=0.0, atol=0.0, ignore_blanks=True, ignore_blank_cards=True, tolerance=None): """ Parameters ---------- a : `HDUList` An `HDUList` object. b : `HDUList` An `HDUList` object to compare to the first `HDUList` object. ignore_keywords : sequence, optional Header keywords to ignore when comparing two headers; the presence of these keywords and their values are ignored. Wildcard strings may also be included in the list. ignore_comments : sequence, optional A list of header keywords whose comments should be ignored in the comparison. May contain wildcard strings as with ignore_keywords. numdiffs : int, optional The number of pixel/table values to output when reporting HDU data differences. Though the count of differences is the same either way, this allows controlling the number of different values that are kept in memory or output. If a negative value is given, then numdiffs is treated as unlimited (default: 10). rtol : float, optional The relative difference to allow when comparing two float values either in header values, image arrays, or table columns (default: 0.0). Values which satisfy the expression .. math:: \\left| a - b \\right| > \\text{atol} + \\text{rtol} \\cdot \\left| b \\right| are considered to be different. The underlying function used for comparison is `numpy.allclose`. .. versionchanged:: 2.0 ``rtol`` replaces the deprecated ``tolerance`` argument. atol : float, optional The allowed absolute difference. See also ``rtol`` parameter. .. versionadded:: 2.0 ignore_blanks : bool, optional Ignore extra whitespace at the end of string values either in headers or data. Extra leading whitespace is not ignored (default: True). ignore_blank_cards : bool, optional Ignore all cards that are blank, i.e. they only contain whitespace (default: True). """ self.ignore_keywords = {k.upper() for k in ignore_keywords} self.ignore_comments = {k.upper() for k in ignore_comments} self.rtol = rtol self.atol = atol if tolerance is not None: # This should be removed in the next astropy version warnings.warn( '"tolerance" was deprecated in version 2.0 and will be removed in ' 'a future version. Use argument "rtol" instead.', AstropyDeprecationWarning) self.rtol = tolerance # when tolerance is provided *always* ignore `rtol` # during the transition/deprecation period self.ignore_blanks = ignore_blanks self.ignore_blank_cards = ignore_blank_cards self.ignore_keyword_patterns = set() self.ignore_comment_patterns = set() for keyword in list(self.ignore_keywords): keyword = keyword.upper() if keyword != '*' and glob.has_magic(keyword): self.ignore_keywords.remove(keyword) self.ignore_keyword_patterns.add(keyword) for keyword in list(self.ignore_comments): keyword = keyword.upper() if keyword != '*' and glob.has_magic(keyword): self.ignore_comments.remove(keyword) self.ignore_comment_patterns.add(keyword) # Keywords appearing in each header self.common_keywords = [] # Set to the number of keywords in each header if the counts differ self.diff_keyword_count = () # Set if the keywords common to each header (excluding ignore_keywords) # appear in different positions within the header # TODO: Implement this self.diff_keyword_positions = () # Keywords unique to each header (excluding keywords in # ignore_keywords) self.diff_keywords = () # Keywords that have different numbers of duplicates in each header # (excluding keywords in ignore_keywords) self.diff_duplicate_keywords = {} # Keywords common to each header but having different values (excluding # keywords in ignore_keywords) self.diff_keyword_values = defaultdict(list) # Keywords common to each header but having different comments # (excluding keywords in ignore_keywords or in ignore_comments) self.diff_keyword_comments = defaultdict(list) if isinstance(a, str): a = Header.fromstring(a) if isinstance(b, str): b = Header.fromstring(b) if not (isinstance(a, Header) and isinstance(b, Header)): raise TypeError('HeaderDiff can only diff astropy.io.fits.Header ' 'objects or strings containing FITS headers.') super().__init__(a, b) # TODO: This doesn't pay much attention to the *order* of the keywords, # except in the case of duplicate keywords. The order should be checked # too, or at least it should be an option. def _diff(self): if self.ignore_blank_cards: cardsa = [c for c in self.a.cards if str(c) != BLANK_CARD] cardsb = [c for c in self.b.cards if str(c) != BLANK_CARD] else: cardsa = list(self.a.cards) cardsb = list(self.b.cards) # build dictionaries of keyword values and comments def get_header_values_comments(cards): values = {} comments = {} for card in cards: value = card.value if self.ignore_blanks and isinstance(value, str): value = value.rstrip() values.setdefault(card.keyword, []).append(value) comments.setdefault(card.keyword, []).append(card.comment) return values, comments valuesa, commentsa = get_header_values_comments(cardsa) valuesb, commentsb = get_header_values_comments(cardsb) # Normalize all keyword to upper-case for comparison's sake; # TODO: HIERARCH keywords should be handled case-sensitively I think keywordsa = {k.upper() for k in valuesa} keywordsb = {k.upper() for k in valuesb} self.common_keywords = sorted(keywordsa.intersection(keywordsb)) if len(cardsa) != len(cardsb): self.diff_keyword_count = (len(cardsa), len(cardsb)) # Any other diff attributes should exclude ignored keywords keywordsa = keywordsa.difference(self.ignore_keywords) keywordsb = keywordsb.difference(self.ignore_keywords) if self.ignore_keyword_patterns: for pattern in self.ignore_keyword_patterns: keywordsa = keywordsa.difference(fnmatch.filter(keywordsa, pattern)) keywordsb = keywordsb.difference(fnmatch.filter(keywordsb, pattern)) if '*' in self.ignore_keywords: # Any other differences between keywords are to be ignored return left_only_keywords = sorted(keywordsa.difference(keywordsb)) right_only_keywords = sorted(keywordsb.difference(keywordsa)) if left_only_keywords or right_only_keywords: self.diff_keywords = (left_only_keywords, right_only_keywords) # Compare count of each common keyword for keyword in self.common_keywords: if keyword in self.ignore_keywords: continue if self.ignore_keyword_patterns: skip = False for pattern in self.ignore_keyword_patterns: if fnmatch.fnmatch(keyword, pattern): skip = True break if skip: continue counta = len(valuesa[keyword]) countb = len(valuesb[keyword]) if counta != countb: self.diff_duplicate_keywords[keyword] = (counta, countb) # Compare keywords' values and comments for a, b in zip(valuesa[keyword], valuesb[keyword]): if diff_values(a, b, rtol=self.rtol, atol=self.atol): self.diff_keyword_values[keyword].append((a, b)) else: # If there are duplicate keywords we need to be able to # index each duplicate; if the values of a duplicate # are identical use None here self.diff_keyword_values[keyword].append(None) if not any(self.diff_keyword_values[keyword]): # No differences found; delete the array of Nones del self.diff_keyword_values[keyword] if '*' in self.ignore_comments or keyword in self.ignore_comments: continue if self.ignore_comment_patterns: skip = False for pattern in self.ignore_comment_patterns: if fnmatch.fnmatch(keyword, pattern): skip = True break if skip: continue for a, b in zip(commentsa[keyword], commentsb[keyword]): if diff_values(a, b): self.diff_keyword_comments[keyword].append((a, b)) else: self.diff_keyword_comments[keyword].append(None) if not any(self.diff_keyword_comments[keyword]): del self.diff_keyword_comments[keyword] def _report(self): if self.diff_keyword_count: self._writeln(' Headers have different number of cards:') self._writeln(' a: {}'.format(self.diff_keyword_count[0])) self._writeln(' b: {}'.format(self.diff_keyword_count[1])) if self.diff_keywords: for keyword in self.diff_keywords[0]: if keyword in Card._commentary_keywords: val = self.a[keyword][0] else: val = self.a[keyword] self._writeln(' Extra keyword {!r:8} in a: {!r}'.format( keyword, val)) for keyword in self.diff_keywords[1]: if keyword in Card._commentary_keywords: val = self.b[keyword][0] else: val = self.b[keyword] self._writeln(' Extra keyword {!r:8} in b: {!r}'.format( keyword, val)) if self.diff_duplicate_keywords: for keyword, count in sorted(self.diff_duplicate_keywords.items()): self._writeln(' Inconsistent duplicates of keyword {!r:8}:' .format(keyword)) self._writeln(' Occurs {} time(s) in a, {} times in (b)' .format(*count)) if self.diff_keyword_values or self.diff_keyword_comments: for keyword in self.common_keywords: report_diff_keyword_attr(self._fileobj, 'values', self.diff_keyword_values, keyword, ind=self._indent) report_diff_keyword_attr(self._fileobj, 'comments', self.diff_keyword_comments, keyword, ind=self._indent) # TODO: It might be good if there was also a threshold option for percentage of # different pixels: For example ignore if only 1% of the pixels are different # within some threshold. There are lots of possibilities here, but hold off # for now until specific cases come up. class ImageDataDiff(_BaseDiff): """ Diff two image data arrays (really any array from a PRIMARY HDU or an IMAGE extension HDU, though the data unit is assumed to be "pixels"). `ImageDataDiff` objects have the following diff attributes: - ``diff_dimensions``: If the two arrays contain either a different number of dimensions or different sizes in any dimension, this contains a 2-tuple of the shapes of each array. Currently no further comparison is performed on images that don't have the exact same dimensions. - ``diff_pixels``: If the two images contain any different pixels, this contains a list of 2-tuples of the array index where the difference was found, and another 2-tuple containing the different values. For example, if the pixel at (0, 0) contains different values this would look like:: [(0, 0), (1.1, 2.2)] where 1.1 and 2.2 are the values of that pixel in each array. This array only contains up to ``self.numdiffs`` differences, for storage efficiency. - ``diff_total``: The total number of different pixels found between the arrays. Although ``diff_pixels`` does not necessarily contain all the different pixel values, this can be used to get a count of the total number of differences found. - ``diff_ratio``: Contains the ratio of ``diff_total`` to the total number of pixels in the arrays. """ def __init__(self, a, b, numdiffs=10, rtol=0.0, atol=0.0, tolerance=None): """ Parameters ---------- a : `HDUList` An `HDUList` object. b : `HDUList` An `HDUList` object to compare to the first `HDUList` object. numdiffs : int, optional The number of pixel/table values to output when reporting HDU data differences. Though the count of differences is the same either way, this allows controlling the number of different values that are kept in memory or output. If a negative value is given, then numdiffs is treated as unlimited (default: 10). rtol : float, optional The relative difference to allow when comparing two float values either in header values, image arrays, or table columns (default: 0.0). Values which satisfy the expression .. math:: \\left| a - b \\right| > \\text{atol} + \\text{rtol} \\cdot \\left| b \\right| are considered to be different. The underlying function used for comparison is `numpy.allclose`. .. versionchanged:: 2.0 ``rtol`` replaces the deprecated ``tolerance`` argument. atol : float, optional The allowed absolute difference. See also ``rtol`` parameter. .. versionadded:: 2.0 """ self.numdiffs = numdiffs self.rtol = rtol self.atol = atol if tolerance is not None: # This should be removed in the next astropy version warnings.warn( '"tolerance" was deprecated in version 2.0 and will be removed in ' 'a future version. Use argument "rtol" instead.', AstropyDeprecationWarning) self.rtol = tolerance # when tolerance is provided *always* ignore `rtol` # during the transition/deprecation period self.diff_dimensions = () self.diff_pixels = [] self.diff_ratio = 0 # self.diff_pixels only holds up to numdiffs differing pixels, but this # self.diff_total stores the total count of differences between # the images, but not the different values self.diff_total = 0 super().__init__(a, b) def _diff(self): if self.a.shape != self.b.shape: self.diff_dimensions = (self.a.shape, self.b.shape) # Don't do any further comparison if the dimensions differ # TODO: Perhaps we could, however, diff just the intersection # between the two images return # Find the indices where the values are not equal # If neither a nor b are floating point (or complex), ignore rtol and # atol if not (np.issubdtype(self.a.dtype, np.inexact) or np.issubdtype(self.b.dtype, np.inexact)): rtol = 0 atol = 0 else: rtol = self.rtol atol = self.atol diffs = where_not_allclose(self.a, self.b, atol=atol, rtol=rtol) self.diff_total = len(diffs[0]) if self.diff_total == 0: # Then we're done return if self.numdiffs < 0: numdiffs = self.diff_total else: numdiffs = self.numdiffs self.diff_pixels = [(idx, (self.a[idx], self.b[idx])) for idx in islice(zip(*diffs), 0, numdiffs)] self.diff_ratio = float(self.diff_total) / float(len(self.a.flat)) def _report(self): if self.diff_dimensions: dimsa = ' x '.join(str(d) for d in reversed(self.diff_dimensions[0])) dimsb = ' x '.join(str(d) for d in reversed(self.diff_dimensions[1])) self._writeln(' Data dimensions differ:') self._writeln(' a: {}'.format(dimsa)) self._writeln(' b: {}'.format(dimsb)) # For now we don't do any further comparison if the dimensions # differ; though in the future it might be nice to be able to # compare at least where the images intersect self._writeln(' No further data comparison performed.') return if not self.diff_pixels: return for index, values in self.diff_pixels: index = [x + 1 for x in reversed(index)] self._writeln(' Data differs at {}:'.format(index)) report_diff_values(values[0], values[1], fileobj=self._fileobj, indent_width=self._indent + 1) if self.diff_total > self.numdiffs: self._writeln(' ...') self._writeln(' {} different pixels found ({:.2%} different).' .format(self.diff_total, self.diff_ratio)) class RawDataDiff(ImageDataDiff): """ `RawDataDiff` is just a special case of `ImageDataDiff` where the images are one-dimensional, and the data is treated as a 1-dimensional array of bytes instead of pixel values. This is used to compare the data of two non-standard extension HDUs that were not recognized as containing image or table data. `ImageDataDiff` objects have the following diff attributes: - ``diff_dimensions``: Same as the ``diff_dimensions`` attribute of `ImageDataDiff` objects. Though the "dimension" of each array is just an integer representing the number of bytes in the data. - ``diff_bytes``: Like the ``diff_pixels`` attribute of `ImageDataDiff` objects, but renamed to reflect the minor semantic difference that these are raw bytes and not pixel values. Also the indices are integers instead of tuples. - ``diff_total`` and ``diff_ratio``: Same as `ImageDataDiff`. """ def __init__(self, a, b, numdiffs=10): """ Parameters ---------- a : `HDUList` An `HDUList` object. b : `HDUList` An `HDUList` object to compare to the first `HDUList` object. numdiffs : int, optional The number of pixel/table values to output when reporting HDU data differences. Though the count of differences is the same either way, this allows controlling the number of different values that are kept in memory or output. If a negative value is given, then numdiffs is treated as unlimited (default: 10). """ self.diff_dimensions = () self.diff_bytes = [] super().__init__(a, b, numdiffs=numdiffs) def _diff(self): super()._diff() if self.diff_dimensions: self.diff_dimensions = (self.diff_dimensions[0][0], self.diff_dimensions[1][0]) self.diff_bytes = [(x[0], y) for x, y in self.diff_pixels] del self.diff_pixels def _report(self): if self.diff_dimensions: self._writeln(' Data sizes differ:') self._writeln(' a: {} bytes'.format(self.diff_dimensions[0])) self._writeln(' b: {} bytes'.format(self.diff_dimensions[1])) # For now we don't do any further comparison if the dimensions # differ; though in the future it might be nice to be able to # compare at least where the images intersect self._writeln(' No further data comparison performed.') return if not self.diff_bytes: return for index, values in self.diff_bytes: self._writeln(' Data differs at byte {}:'.format(index)) report_diff_values(values[0], values[1], fileobj=self._fileobj, indent_width=self._indent + 1) self._writeln(' ...') self._writeln(' {} different bytes found ({:.2%} different).' .format(self.diff_total, self.diff_ratio)) class TableDataDiff(_BaseDiff): """ Diff two table data arrays. It doesn't matter whether the data originally came from a binary or ASCII table--the data should be passed in as a recarray. `TableDataDiff` objects have the following diff attributes: - ``diff_column_count``: If the tables being compared have different numbers of columns, this contains a 2-tuple of the column count in each table. Even if the tables have different column counts, an attempt is still made to compare any columns they have in common. - ``diff_columns``: If either table contains columns unique to that table, either in name or format, this contains a 2-tuple of lists. The first element is a list of columns (these are full `Column` objects) that appear only in table a. The second element is a list of tables that appear only in table b. This only lists columns with different column definitions, and has nothing to do with the data in those columns. - ``diff_column_names``: This is like ``diff_columns``, but lists only the names of columns unique to either table, rather than the full `Column` objects. - ``diff_column_attributes``: Lists columns that are in both tables but have different secondary attributes, such as TUNIT or TDISP. The format is a list of 2-tuples: The first a tuple of the column name and the attribute, the second a tuple of the different values. - ``diff_values``: `TableDataDiff` compares the data in each table on a column-by-column basis. If any different data is found, it is added to this list. The format of this list is similar to the ``diff_pixels`` attribute on `ImageDataDiff` objects, though the "index" consists of a (column_name, row) tuple. For example:: [('TARGET', 0), ('NGC1001', 'NGC1002')] shows that the tables contain different values in the 0-th row of the 'TARGET' column. - ``diff_total`` and ``diff_ratio``: Same as `ImageDataDiff`. `TableDataDiff` objects also have a ``common_columns`` attribute that lists the `Column` objects for columns that are identical in both tables, and a ``common_column_names`` attribute which contains a set of the names of those columns. """ def __init__(self, a, b, ignore_fields=[], numdiffs=10, rtol=0.0, atol=0.0, tolerance=None): """ Parameters ---------- a : `HDUList` An `HDUList` object. b : `HDUList` An `HDUList` object to compare to the first `HDUList` object. ignore_fields : sequence, optional The (case-insensitive) names of any table columns to ignore if any table data is to be compared. numdiffs : int, optional The number of pixel/table values to output when reporting HDU data differences. Though the count of differences is the same either way, this allows controlling the number of different values that are kept in memory or output. If a negative value is given, then numdiffs is treated as unlimited (default: 10). rtol : float, optional The relative difference to allow when comparing two float values either in header values, image arrays, or table columns (default: 0.0). Values which satisfy the expression .. math:: \\left| a - b \\right| > \\text{atol} + \\text{rtol} \\cdot \\left| b \\right| are considered to be different. The underlying function used for comparison is `numpy.allclose`. .. versionchanged:: 2.0 ``rtol`` replaces the deprecated ``tolerance`` argument. atol : float, optional The allowed absolute difference. See also ``rtol`` parameter. .. versionadded:: 2.0 """ self.ignore_fields = set(ignore_fields) self.numdiffs = numdiffs self.rtol = rtol self.atol = atol if tolerance is not None: # This should be removed in the next astropy version warnings.warn( '"tolerance" was deprecated in version 2.0 and will be removed in ' 'a future version. Use argument "rtol" instead.', AstropyDeprecationWarning) self.rtol = tolerance # when tolerance is provided *always* ignore `rtol` # during the transition/deprecation period self.common_columns = [] self.common_column_names = set() # self.diff_columns contains columns with different column definitions, # but not different column data. Column data is only compared in # columns that have the same definitions self.diff_rows = () self.diff_column_count = () self.diff_columns = () # If two columns have the same name+format, but other attributes are # different (such as TUNIT or such) they are listed here self.diff_column_attributes = [] # Like self.diff_columns, but just contains a list of the column names # unique to each table, and in the order they appear in the tables self.diff_column_names = () self.diff_values = [] self.diff_ratio = 0 self.diff_total = 0 super().__init__(a, b) def _diff(self): # Much of the code for comparing columns is similar to the code for # comparing headers--consider refactoring colsa = self.a.columns colsb = self.b.columns if len(colsa) != len(colsb): self.diff_column_count = (len(colsa), len(colsb)) # Even if the number of columns are unequal, we still do comparison of # any common columns colsa = {c.name.lower(): c for c in colsa} colsb = {c.name.lower(): c for c in colsb} if '*' in self.ignore_fields: # If all columns are to be ignored, ignore any further differences # between the columns return # Keep the user's original ignore_fields list for reporting purposes, # but internally use a case-insensitive version ignore_fields = {f.lower() for f in self.ignore_fields} # It might be nice if there were a cleaner way to do this, but for now # it'll do for fieldname in ignore_fields: fieldname = fieldname.lower() if fieldname in colsa: del colsa[fieldname] if fieldname in colsb: del colsb[fieldname] colsa_set = set(colsa.values()) colsb_set = set(colsb.values()) self.common_columns = sorted(colsa_set.intersection(colsb_set), key=operator.attrgetter('name')) self.common_column_names = {col.name.lower() for col in self.common_columns} left_only_columns = {col.name.lower(): col for col in colsa_set.difference(colsb_set)} right_only_columns = {col.name.lower(): col for col in colsb_set.difference(colsa_set)} if left_only_columns or right_only_columns: self.diff_columns = (left_only_columns, right_only_columns) self.diff_column_names = ([], []) if left_only_columns: for col in self.a.columns: if col.name.lower() in left_only_columns: self.diff_column_names[0].append(col.name) if right_only_columns: for col in self.b.columns: if col.name.lower() in right_only_columns: self.diff_column_names[1].append(col.name) # If the tables have a different number of rows, we don't compare the # columns right now. # TODO: It might be nice to optionally compare the first n rows where n # is the minimum of the row counts between the two tables. if len(self.a) != len(self.b): self.diff_rows = (len(self.a), len(self.b)) return # If the tables contain no rows there's no data to compare, so we're # done at this point. (See ticket #178) if len(self.a) == len(self.b) == 0: return # Like in the old fitsdiff, compare tables on a column by column basis # The difficulty here is that, while FITS column names are meant to be # case-insensitive, Astropy still allows, for the sake of flexibility, # two columns with the same name but different case. When columns are # accessed in FITS tables, a case-sensitive is tried first, and failing # that a case-insensitive match is made. # It's conceivable that the same column could appear in both tables # being compared, but with different case. # Though it *may* lead to inconsistencies in these rare cases, this # just assumes that there are no duplicated column names in either # table, and that the column names can be treated case-insensitively. for col in self.common_columns: name_lower = col.name.lower() if name_lower in ignore_fields: continue cola = colsa[name_lower] colb = colsb[name_lower] for attr, _ in _COL_ATTRS: vala = getattr(cola, attr, None) valb = getattr(colb, attr, None) if diff_values(vala, valb): self.diff_column_attributes.append( ((col.name.upper(), attr), (vala, valb))) arra = self.a[col.name] arrb = self.b[col.name] if (np.issubdtype(arra.dtype, np.floating) and np.issubdtype(arrb.dtype, np.floating)): diffs = where_not_allclose(arra, arrb, rtol=self.rtol, atol=self.atol) elif 'P' in col.format: diffs = ([idx for idx in range(len(arra)) if not np.allclose(arra[idx], arrb[idx], rtol=self.rtol, atol=self.atol)],) else: diffs = np.where(arra != arrb) self.diff_total += len(set(diffs[0])) if self.numdiffs >= 0: if len(self.diff_values) >= self.numdiffs: # Don't save any more diff values continue # Add no more diff'd values than this max_diffs = self.numdiffs - len(self.diff_values) else: max_diffs = len(diffs[0]) last_seen_idx = None for idx in islice(diffs[0], 0, max_diffs): if idx == last_seen_idx: # Skip duplicate indices, which my occur when the column # data contains multi-dimensional values; we're only # interested in storing row-by-row differences continue last_seen_idx = idx self.diff_values.append(((col.name, idx), (arra[idx], arrb[idx]))) total_values = len(self.a) * len(self.a.dtype.fields) self.diff_ratio = float(self.diff_total) / float(total_values) def _report(self): if self.diff_column_count: self._writeln(' Tables have different number of columns:') self._writeln(' a: {}'.format(self.diff_column_count[0])) self._writeln(' b: {}'.format(self.diff_column_count[1])) if self.diff_column_names: # Show columns with names unique to either table for name in self.diff_column_names[0]: format = self.diff_columns[0][name.lower()].format self._writeln(' Extra column {} of format {} in a'.format( name, format)) for name in self.diff_column_names[1]: format = self.diff_columns[1][name.lower()].format self._writeln(' Extra column {} of format {} in b'.format( name, format)) col_attrs = dict(_COL_ATTRS) # Now go through each table again and show columns with common # names but other property differences... for col_attr, vals in self.diff_column_attributes: name, attr = col_attr self._writeln(' Column {} has different {}:'.format( name, col_attrs[attr])) report_diff_values(vals[0], vals[1], fileobj=self._fileobj, indent_width=self._indent + 1) if self.diff_rows: self._writeln(' Table rows differ:') self._writeln(' a: {}'.format(self.diff_rows[0])) self._writeln(' b: {}'.format(self.diff_rows[1])) self._writeln(' No further data comparison performed.') return if not self.diff_values: return # Finally, let's go through and report column data differences: for indx, values in self.diff_values: self._writeln(' Column {} data differs in row {}:'.format(*indx)) report_diff_values(values[0], values[1], fileobj=self._fileobj, indent_width=self._indent + 1) if self.diff_values and self.numdiffs < self.diff_total: self._writeln(' ...{} additional difference(s) found.'.format( (self.diff_total - self.numdiffs))) if self.diff_total > self.numdiffs: self._writeln(' ...') self._writeln(' {} different table data element(s) found ' '({:.2%} different).' .format(self.diff_total, self.diff_ratio)) def report_diff_keyword_attr(fileobj, attr, diffs, keyword, ind=0): """ Write a diff between two header keyword values or comments to the specified file-like object. """ if keyword in diffs: vals = diffs[keyword] for idx, val in enumerate(vals): if val is None: continue if idx == 0: dup = '' else: dup = '[{}]'.format(idx + 1) fileobj.write( fixed_width_indent(' Keyword {:8}{} has different {}:\n' .format(keyword, dup, attr), ind)) report_diff_values(val[0], val[1], fileobj=fileobj, indent_width=ind + 1)
a61a544ee28c51ba84ac16eb8662d19e2ce119f815d027bc30863e812bb811b0
# Licensed under a 3-clause BSD style license - see PYFITS.rst """ A package for reading and writing FITS files and manipulating their contents. A module for reading and writing Flexible Image Transport System (FITS) files. This file format was endorsed by the International Astronomical Union in 1999 and mandated by NASA as the standard format for storing high energy astrophysics data. For details of the FITS standard, see the NASA/Science Office of Standards and Technology publication, NOST 100-2.0. """ from astropy import config as _config # Set module-global boolean variables # TODO: Make it possible to set these variables via environment variables # again, once support for that is added to Astropy class Conf(_config.ConfigNamespace): """ Configuration parameters for `astropy.io.fits`. """ enable_record_valued_keyword_cards = _config.ConfigItem( True, 'If True, enable support for record-valued keywords as described by ' 'FITS WCS distortion paper. Otherwise they are treated as normal ' 'keywords.', aliases=['astropy.io.fits.enabled_record_valued_keyword_cards']) extension_name_case_sensitive = _config.ConfigItem( False, 'If True, extension names (i.e. the ``EXTNAME`` keyword) should be ' 'treated as case-sensitive.') strip_header_whitespace = _config.ConfigItem( True, 'If True, automatically remove trailing whitespace for string values in ' 'headers. Otherwise the values are returned verbatim, with all ' 'whitespace intact.') use_memmap = _config.ConfigItem( True, 'If True, use memory-mapped file access to read/write the data in ' 'FITS files. This generally provides better performance, especially ' 'for large files, but may affect performance in I/O-heavy ' 'applications.') lazy_load_hdus = _config.ConfigItem( True, 'If True, use lazy loading of HDUs when opening FITS files by ' 'default; that is fits.open() will only seek for and read HDUs on ' 'demand rather than reading all HDUs at once. See the documentation ' 'for fits.open() for more datails.') enable_uint = _config.ConfigItem( True, 'If True, default to recognizing the convention for representing ' 'unsigned integers in FITS--if an array has BITPIX > 0, BSCALE = 1, ' 'and BZERO = 2**BITPIX, represent the data as unsigned integers ' 'per this convention.') conf = Conf() # Public API compatibility imports # These need to come after the global config variables, as some of the # submodules use them from . import card from . import column from . import convenience from . import hdu from .card import * from .column import * from .convenience import * from .diff import * from .fitsrec import FITS_record, FITS_rec from .hdu import * from .hdu.groups import GroupData from .hdu.hdulist import fitsopen as open from .hdu.image import Section from .header import Header from .verify import VerifyError __all__ = (['Conf', 'conf'] + card.__all__ + column.__all__ + convenience.__all__ + hdu.__all__ + ['FITS_record', 'FITS_rec', 'GroupData', 'open', 'Section', 'Header', 'VerifyError', 'conf'])
7f3fa5efcdd3dbccfdc8d7454cd8061d25c1d48ade46eada3ad5f8a1a927f06b
# Licensed under a 3-clause BSD style license - see PYFITS.rst import copy import operator import re import sys import warnings import weakref import numbers from functools import reduce from collections import OrderedDict from contextlib import suppress import numpy as np from numpy import char as chararray from .card import Card, CARD_LENGTH from .util import (pairwise, _is_int, _convert_array, encode_ascii, cmp, NotifierMixin) from .verify import VerifyError, VerifyWarning from astropy.utils import lazyproperty, isiterable, indent from astropy.utils.exceptions import AstropyUserWarning __all__ = ['Column', 'ColDefs', 'Delayed'] # mapping from TFORM data type to numpy data type (code) # L: Logical (Boolean) # B: Unsigned Byte # I: 16-bit Integer # J: 32-bit Integer # K: 64-bit Integer # E: Single-precision Floating Point # D: Double-precision Floating Point # C: Single-precision Complex # M: Double-precision Complex # A: Character FITS2NUMPY = {'L': 'i1', 'B': 'u1', 'I': 'i2', 'J': 'i4', 'K': 'i8', 'E': 'f4', 'D': 'f8', 'C': 'c8', 'M': 'c16', 'A': 'a'} # the inverse dictionary of the above NUMPY2FITS = {val: key for key, val in FITS2NUMPY.items()} # Normally booleans are represented as ints in Astropy, but if passed in a numpy # boolean array, that should be supported NUMPY2FITS['b1'] = 'L' # Add unsigned types, which will be stored as signed ints with a TZERO card. NUMPY2FITS['u2'] = 'I' NUMPY2FITS['u4'] = 'J' NUMPY2FITS['u8'] = 'K' # Add half precision floating point numbers which will be up-converted to # single precision. NUMPY2FITS['f2'] = 'E' # This is the order in which values are converted to FITS types # Note that only double precision floating point/complex are supported FORMATORDER = ['L', 'B', 'I', 'J', 'K', 'D', 'M', 'A'] # Convert single precision floating point/complex to double precision. FITSUPCONVERTERS = {'E': 'D', 'C': 'M'} # mapping from ASCII table TFORM data type to numpy data type # A: Character # I: Integer (32-bit) # J: Integer (64-bit; non-standard) # F: Float (64-bit; fixed decimal notation) # E: Float (64-bit; exponential notation) # D: Float (64-bit; exponential notation, always 64-bit by convention) ASCII2NUMPY = {'A': 'a', 'I': 'i4', 'J': 'i8', 'F': 'f8', 'E': 'f8', 'D': 'f8'} # Maps FITS ASCII column format codes to the appropriate Python string # formatting codes for that type. ASCII2STR = {'A': '', 'I': 'd', 'J': 'd', 'F': 'f', 'E': 'E', 'D': 'E'} # For each ASCII table format code, provides a default width (and decimal # precision) for when one isn't given explicitly in the column format ASCII_DEFAULT_WIDTHS = {'A': (1, 0), 'I': (10, 0), 'J': (15, 0), 'E': (15, 7), 'F': (16, 7), 'D': (25, 17)} # TDISPn for both ASCII and Binary tables TDISP_RE_DICT = {} TDISP_RE_DICT['F'] = re.compile(r'(?:(?P<formatc>[F])(?:(?P<width>[0-9]+)\.{1}' r'(?P<precision>[0-9])+)+)|') TDISP_RE_DICT['A'] = TDISP_RE_DICT['L'] = \ re.compile(r'(?:(?P<formatc>[AL])(?P<width>[0-9]+)+)|') TDISP_RE_DICT['I'] = TDISP_RE_DICT['B'] = \ TDISP_RE_DICT['O'] = TDISP_RE_DICT['Z'] = \ re.compile(r'(?:(?P<formatc>[IBOZ])(?:(?P<width>[0-9]+)' r'(?:\.{0,1}(?P<precision>[0-9]+))?))|') TDISP_RE_DICT['E'] = TDISP_RE_DICT['G'] = \ TDISP_RE_DICT['D'] = \ re.compile(r'(?:(?P<formatc>[EGD])(?:(?P<width>[0-9]+)\.' r'(?P<precision>[0-9]+))+)' r'(?:E{0,1}(?P<exponential>[0-9]+)?)|') TDISP_RE_DICT['EN'] = TDISP_RE_DICT['ES'] = \ re.compile(r'(?:(?P<formatc>E[NS])(?:(?P<width>[0-9]+)\.{1}' r'(?P<precision>[0-9])+)+)') # mapping from TDISP format to python format # A: Character # L: Logical (Boolean) # I: 16-bit Integer # Can't predefine zero padding and space padding before hand without # knowing the value being formatted, so grabbing precision and using that # to zero pad, ignoring width. Same with B, O, and Z # B: Binary Integer # O: Octal Integer # Z: Hexadecimal Integer # F: Float (64-bit; fixed decimal notation) # EN: Float (engineering fortran format, exponential multiple of thee # ES: Float (scientific, same as EN but non-zero leading digit # E: Float, exponential notation # Can't get exponential restriction to work without knowing value # before hand, so just using width and precision, same with D, G, EN, and # ES formats # D: Double-precision Floating Point with exponential # (E but for double precision) # G: Double-precision Floating Point, may or may not show exponent TDISP_FMT_DICT = {'I' : '{{:{width}d}}', 'B' : '{{:{width}b}}', 'O' : '{{:{width}o}}', 'Z' : '{{:{width}x}}', 'F' : '{{:{width}.{precision}f}}', 'G' : '{{:{width}.{precision}g}}'} TDISP_FMT_DICT['A'] = TDISP_FMT_DICT['L'] = '{{:>{width}}}' TDISP_FMT_DICT['E'] = TDISP_FMT_DICT['D'] = \ TDISP_FMT_DICT['EN'] = TDISP_FMT_DICT['ES'] ='{{:{width}.{precision}e}}' # tuple of column/field definition common names and keyword names, make # sure to preserve the one-to-one correspondence when updating the list(s). # Use lists, instead of dictionaries so the names can be displayed in a # preferred order. KEYWORD_NAMES = ('TTYPE', 'TFORM', 'TUNIT', 'TNULL', 'TSCAL', 'TZERO', 'TDISP', 'TBCOL', 'TDIM', 'TCTYP', 'TCUNI', 'TCRPX', 'TCRVL', 'TCDLT', 'TRPOS') KEYWORD_ATTRIBUTES = ('name', 'format', 'unit', 'null', 'bscale', 'bzero', 'disp', 'start', 'dim', 'coord_type', 'coord_unit', 'coord_ref_point', 'coord_ref_value', 'coord_inc', 'time_ref_pos') """This is a list of the attributes that can be set on `Column` objects.""" KEYWORD_TO_ATTRIBUTE = OrderedDict(zip(KEYWORD_NAMES, KEYWORD_ATTRIBUTES)) ATTRIBUTE_TO_KEYWORD = OrderedDict(zip(KEYWORD_ATTRIBUTES, KEYWORD_NAMES)) # TODO: Define a list of default comments to associate with each table keyword # TFORMn regular expression TFORMAT_RE = re.compile(r'(?P<repeat>^[0-9]*)(?P<format>[LXBIJKAEDCMPQ])' r'(?P<option>[!-~]*)', re.I) # TFORMn for ASCII tables; two different versions depending on whether # the format is floating-point or not; allows empty values for width # in which case defaults are used TFORMAT_ASCII_RE = re.compile(r'(?:(?P<format>[AIJ])(?P<width>[0-9]+)?)|' r'(?:(?P<formatf>[FED])' r'(?:(?P<widthf>[0-9]+)\.' r'(?P<precision>[0-9]+))?)') TTYPE_RE = re.compile(r'[0-9a-zA-Z_]+') """ Regular expression for valid table column names. See FITS Standard v3.0 section 7.2.2. """ # table definition keyword regular expression TDEF_RE = re.compile(r'(?P<label>^T[A-Z]*)(?P<num>[1-9][0-9 ]*$)') # table dimension keyword regular expression (fairly flexible with whitespace) TDIM_RE = re.compile(r'\(\s*(?P<dims>(?:\d+,\s*)+\s*\d+)\s*\)\s*') # value for ASCII table cell with value = TNULL # this can be reset by user. ASCIITNULL = 0 # The default placeholder to use for NULL values in ASCII tables when # converting from binary to ASCII tables DEFAULT_ASCII_TNULL = '---' class Delayed: """Delayed file-reading data.""" def __init__(self, hdu=None, field=None): self.hdu = weakref.proxy(hdu) self.field = field def __getitem__(self, key): # This forces the data for the HDU to be read, which will replace # the corresponding Delayed objects in the Tables Columns to be # transformed into ndarrays. It will also return the value of the # requested data element. return self.hdu.data[key][self.field] class _BaseColumnFormat(str): """ Base class for binary table column formats (just called _ColumnFormat) and ASCII table column formats (_AsciiColumnFormat). """ def __eq__(self, other): if not other: return False if isinstance(other, str): if not isinstance(other, self.__class__): try: other = self.__class__(other) except ValueError: return False else: return False return self.canonical == other.canonical def __hash__(self): return hash(self.canonical) @lazyproperty def dtype(self): """ The Numpy dtype object created from the format's associated recformat. """ return np.dtype(self.recformat) @classmethod def from_column_format(cls, format): """Creates a column format object from another column format object regardless of their type. That is, this can convert a _ColumnFormat to an _AsciiColumnFormat or vice versa at least in cases where a direct translation is possible. """ return cls.from_recformat(format.recformat) class _ColumnFormat(_BaseColumnFormat): """ Represents a FITS binary table column format. This is an enhancement over using a normal string for the format, since the repeat count, format code, and option are available as separate attributes, and smart comparison is used. For example 1J == J. """ def __new__(cls, format): self = super().__new__(cls, format) self.repeat, self.format, self.option = _parse_tformat(format) self.format = self.format.upper() if self.format in ('P', 'Q'): # TODO: There should be a generic factory that returns either # _FormatP or _FormatQ as appropriate for a given TFORMn if self.format == 'P': recformat = _FormatP.from_tform(format) else: recformat = _FormatQ.from_tform(format) # Format of variable length arrays self.p_format = recformat.format else: self.p_format = None return self @classmethod def from_recformat(cls, recformat): """Creates a column format from a Numpy record dtype format.""" return cls(_convert_format(recformat, reverse=True)) @lazyproperty def recformat(self): """Returns the equivalent Numpy record format string.""" return _convert_format(self) @lazyproperty def canonical(self): """ Returns a 'canonical' string representation of this format. This is in the proper form of rTa where T is the single character data type code, a is the optional part, and r is the repeat. If repeat == 1 (the default) it is left out of this representation. """ if self.repeat == 1: repeat = '' else: repeat = str(self.repeat) return '{}{}{}'.format(repeat, self.format, self.option) class _AsciiColumnFormat(_BaseColumnFormat): """Similar to _ColumnFormat but specifically for columns in ASCII tables. The formats of ASCII table columns and binary table columns are inherently incompatible in FITS. They don't support the same ranges and types of values, and even reuse format codes in subtly different ways. For example the format code 'Iw' in ASCII columns refers to any integer whose string representation is at most w characters wide, so 'I' can represent effectively any integer that will fit in a FITS columns. Whereas for binary tables 'I' very explicitly refers to a 16-bit signed integer. Conversions between the two column formats can be performed using the ``to/from_binary`` methods on this class, or the ``to/from_ascii`` methods on the `_ColumnFormat` class. But again, not all conversions are possible and may result in a `ValueError`. """ def __new__(cls, format, strict=False): self = super().__new__(cls, format) self.format, self.width, self.precision = \ _parse_ascii_tformat(format, strict) # This is to support handling logical (boolean) data from binary tables # in an ASCII table self._pseudo_logical = False return self @classmethod def from_column_format(cls, format): inst = cls.from_recformat(format.recformat) # Hack if format.format == 'L': inst._pseudo_logical = True return inst @classmethod def from_recformat(cls, recformat): """Creates a column format from a Numpy record dtype format.""" return cls(_convert_ascii_format(recformat, reverse=True)) @lazyproperty def recformat(self): """Returns the equivalent Numpy record format string.""" return _convert_ascii_format(self) @lazyproperty def canonical(self): """ Returns a 'canonical' string representation of this format. This is in the proper form of Tw.d where T is the single character data type code, w is the width in characters for this field, and d is the number of digits after the decimal place (for format codes 'E', 'F', and 'D' only). """ if self.format in ('E', 'F', 'D'): return '{}{}.{}'.format(self.format, self.width, self.precision) return '{}{}'.format(self.format, self.width) class _FormatX(str): """For X format in binary tables.""" def __new__(cls, repeat=1): nbytes = ((repeat - 1) // 8) + 1 # use an array, even if it is only ONE u1 (i.e. use tuple always) obj = super().__new__(cls, repr((nbytes,)) + 'u1') obj.repeat = repeat return obj def __getnewargs__(self): return (self.repeat,) @property def tform(self): return '{}X'.format(self.repeat) # TODO: Table column formats need to be verified upon first reading the file; # as it is, an invalid P format will raise a VerifyError from some deep, # unexpected place class _FormatP(str): """For P format in variable length table.""" # As far as I can tell from my reading of the FITS standard, a type code is # *required* for P and Q formats; there is no default _format_re_template = (r'(?P<repeat>\d+)?{}(?P<dtype>[LXBIJKAEDCM])' r'(?:\((?P<max>\d*)\))?') _format_code = 'P' _format_re = re.compile(_format_re_template.format(_format_code)) _descriptor_format = '2i4' def __new__(cls, dtype, repeat=None, max=None): obj = super().__new__(cls, cls._descriptor_format) obj.format = NUMPY2FITS[dtype] obj.dtype = dtype obj.repeat = repeat obj.max = max return obj def __getnewargs__(self): return (self.dtype, self.repeat, self.max) @classmethod def from_tform(cls, format): m = cls._format_re.match(format) if not m or m.group('dtype') not in FITS2NUMPY: raise VerifyError('Invalid column format: {}'.format(format)) repeat = m.group('repeat') array_dtype = m.group('dtype') max = m.group('max') if not max: max = None return cls(FITS2NUMPY[array_dtype], repeat=repeat, max=max) @property def tform(self): repeat = '' if self.repeat is None else self.repeat max = '' if self.max is None else self.max return '{}{}{}({})'.format(repeat, self._format_code, self.format, max) class _FormatQ(_FormatP): """Carries type description of the Q format for variable length arrays. The Q format is like the P format but uses 64-bit integers in the array descriptors, allowing for heaps stored beyond 2GB into a file. """ _format_code = 'Q' _format_re = re.compile(_FormatP._format_re_template.format(_format_code)) _descriptor_format = '2i8' class ColumnAttribute: """ Descriptor for attributes of `Column` that are associated with keywords in the FITS header and describe properties of the column as specified in the FITS standard. Each `ColumnAttribute` may have a ``validator`` method defined on it. This validates values set on this attribute to ensure that they meet the FITS standard. Invalid values will raise a warning and will not be used in formatting the column. The validator should take two arguments--the `Column` it is being assigned to, and the new value for the attribute, and it must raise an `AssertionError` if the value is invalid. The `ColumnAttribute` itself is a decorator that can be used to define the ``validator`` for each column attribute. For example:: @ColumnAttribute('TTYPE') def name(col, name): if not isinstance(name, str): raise AssertionError The actual object returned by this decorator is the `ColumnAttribute` instance though, not the ``name`` function. As such ``name`` is not a method of the class it is defined in. The setter for `ColumnAttribute` also updates the header of any table HDU this column is attached to in order to reflect the change. The ``validator`` should ensure that the value is valid for inclusion in a FITS header. """ def __init__(self, keyword): self._keyword = keyword self._validator = None # The name of the attribute associated with this keyword is currently # determined from the KEYWORD_NAMES/ATTRIBUTES lists. This could be # make more flexible in the future, for example, to support custom # column attributes. self._attr = '_' + KEYWORD_TO_ATTRIBUTE[self._keyword] def __get__(self, obj, objtype=None): if obj is None: return self else: return getattr(obj, self._attr) def __set__(self, obj, value): if self._validator is not None: self._validator(obj, value) old_value = getattr(obj, self._attr, None) setattr(obj, self._attr, value) obj._notify('column_attribute_changed', obj, self._attr[1:], old_value, value) def __call__(self, func): """ Set the validator for this column attribute. Returns ``self`` so that this can be used as a decorator, as described in the docs for this class. """ self._validator = func return self def __repr__(self): return "{0}('{1}')".format(self.__class__.__name__, self._keyword) class Column(NotifierMixin): """ Class which contains the definition of one column, e.g. ``ttype``, ``tform``, etc. and the array containing values for the column. """ def __init__(self, name=None, format=None, unit=None, null=None, bscale=None, bzero=None, disp=None, start=None, dim=None, array=None, ascii=None, coord_type=None, coord_unit=None, coord_ref_point=None, coord_ref_value=None, coord_inc=None, time_ref_pos=None): """ Construct a `Column` by specifying attributes. All attributes except ``format`` can be optional; see :ref:`column_creation` and :ref:`creating_ascii_table` for more information regarding ``TFORM`` keyword. Parameters ---------- name : str, optional column name, corresponding to ``TTYPE`` keyword format : str column format, corresponding to ``TFORM`` keyword unit : str, optional column unit, corresponding to ``TUNIT`` keyword null : str, optional null value, corresponding to ``TNULL`` keyword bscale : int-like, optional bscale value, corresponding to ``TSCAL`` keyword bzero : int-like, optional bzero value, corresponding to ``TZERO`` keyword disp : str, optional display format, corresponding to ``TDISP`` keyword start : int, optional column starting position (ASCII table only), corresponding to ``TBCOL`` keyword dim : str, optional column dimension corresponding to ``TDIM`` keyword array : iterable, optional a `list`, `numpy.ndarray` (or other iterable that can be used to initialize an ndarray) providing initial data for this column. The array will be automatically converted, if possible, to the data format of the column. In the case were non-trivial ``bscale`` and/or ``bzero`` arguments are given, the values in the array must be the *physical* values--that is, the values of column as if the scaling has already been applied (the array stored on the column object will then be converted back to its storage values). ascii : bool, optional set `True` if this describes a column for an ASCII table; this may be required to disambiguate the column format coord_type : str, optional coordinate/axis type corresponding to ``TCTYP`` keyword coord_unit : str, optional coordinate/axis unit corresponding to ``TCUNI`` keyword coord_ref_point : int-like, optional pixel coordinate of the reference point corresponding to ``TCRPX`` keyword coord_ref_value : int-like, optional coordinate value at reference point corresponding to ``TCRVL`` keyword coord_inc : int-like, optional coordinate increment at reference point corresponding to ``TCDLT`` keyword time_ref_pos : str, optional reference position for a time coordinate column corresponding to ``TRPOS`` keyword """ if format is None: raise ValueError('Must specify format to construct Column.') # any of the input argument (except array) can be a Card or just # a number/string kwargs = {'ascii': ascii} for attr in KEYWORD_ATTRIBUTES: value = locals()[attr] # get the argument's value if isinstance(value, Card): value = value.value kwargs[attr] = value valid_kwargs, invalid_kwargs = self._verify_keywords(**kwargs) if invalid_kwargs: msg = ['The following keyword arguments to Column were invalid:'] for val in invalid_kwargs.values(): msg.append(indent(val[1])) raise VerifyError('\n'.join(msg)) for attr in KEYWORD_ATTRIBUTES: setattr(self, attr, valid_kwargs.get(attr)) # TODO: Try to eliminate the following two special cases # for recformat and dim: # This is not actually stored as an attribute on columns for some # reason recformat = valid_kwargs['recformat'] # The 'dim' keyword's original value is stored in self.dim, while # *only* the tuple form is stored in self._dims. self._dims = self.dim self.dim = dim # Awful hack to use for now to keep track of whether the column holds # pseudo-unsigned int data self._pseudo_unsigned_ints = False # if the column data is not ndarray, make it to be one, i.e. # input arrays can be just list or tuple, not required to be ndarray # does not include Object array because there is no guarantee # the elements in the object array are consistent. if not isinstance(array, (np.ndarray, chararray.chararray, Delayed)): try: # try to convert to a ndarray first if array is not None: array = np.array(array) except Exception: try: # then try to convert it to a strings array itemsize = int(recformat[1:]) array = chararray.array(array, itemsize=itemsize) except ValueError: # then try variable length array # Note: This includes _FormatQ by inheritance if isinstance(recformat, _FormatP): array = _VLF(array, dtype=recformat.dtype) else: raise ValueError('Data is inconsistent with the ' 'format `{}`.'.format(format)) array = self._convert_to_valid_data_type(array) # We have required (through documentation) that arrays passed in to # this constructor are already in their physical values, so we make # note of that here if isinstance(array, np.ndarray): self._physical_values = True else: self._physical_values = False self._parent_fits_rec = None self.array = array def __repr__(self): text = '' for attr in KEYWORD_ATTRIBUTES: value = getattr(self, attr) if value is not None: text += attr + ' = ' + repr(value) + '; ' return text[:-2] def __eq__(self, other): """ Two columns are equal if their name and format are the same. Other attributes aren't taken into account at this time. """ # According to the FITS standard column names must be case-insensitive a = (self.name.lower(), self.format) b = (other.name.lower(), other.format) return a == b def __hash__(self): """ Like __eq__, the hash of a column should be based on the unique column name and format, and be case-insensitive with respect to the column name. """ return hash((self.name.lower(), self.format)) @property def array(self): """ The Numpy `~numpy.ndarray` associated with this `Column`. If the column was instantiated with an array passed to the ``array`` argument, this will return that array. However, if the column is later added to a table, such as via `BinTableHDU.from_columns` as is typically the case, this attribute will be updated to reference the associated field in the table, which may no longer be the same array. """ # Ideally the .array attribute never would have existed in the first # place, or would have been internal-only. This is a legacy of the # older design from Astropy that needs to have continued support, for # now. # One of the main problems with this design was that it created a # reference cycle. When the .array attribute was updated after # creating a FITS_rec from the column (as explained in the docstring) a # reference cycle was created. This is because the code in BinTableHDU # (and a few other places) does essentially the following: # # data._coldefs = columns # The ColDefs object holding this Column # for col in columns: # col.array = data.field(col.name) # # This way each columns .array attribute now points to the field in the # table data. It's actually a pretty confusing interface (since it # replaces the array originally pointed to by .array), but it's the way # things have been for a long, long time. # # However, this results, in *many* cases, in a reference cycle. # Because the array returned by data.field(col.name), while sometimes # an array that owns its own data, is usually like a slice of the # original data. It has the original FITS_rec as the array .base. # This results in the following reference cycle (for the n-th column): # # data -> data._coldefs -> data._coldefs[n] -> # data._coldefs[n].array -> data._coldefs[n].array.base -> data # # Because ndarray objects do not handled by Python's garbage collector # the reference cycle cannot be broken. Therefore the FITS_rec's # refcount never goes to zero, its __del__ is never called, and its # memory is never freed. This didn't occur in *all* cases, but it did # occur in many cases. # # To get around this, Column.array is no longer a simple attribute # like it was previously. Now each Column has a ._parent_fits_rec # attribute which is a weakref to a FITS_rec object. Code that # previously assigned each col.array to field in a FITS_rec (as in # the example a few paragraphs above) is still used, however now # array.setter checks if a reference cycle will be created. And if # so, instead of saving directly to the Column's __dict__, it creates # the ._prent_fits_rec weakref, and all lookups of the column's .array # go through that instead. # # This alone does not fully solve the problem. Because # _parent_fits_rec is a weakref, if the user ever holds a reference to # the Column, but deletes all references to the underlying FITS_rec, # the .array attribute would suddenly start returning None instead of # the array data. This problem is resolved on FITS_rec's end. See the # note in the FITS_rec._coldefs property for the rest of the story. # If the Columns's array is not a reference to an existing FITS_rec, # then it is just stored in self.__dict__; otherwise check the # _parent_fits_rec reference if it 's still available. if 'array' in self.__dict__: return self.__dict__['array'] elif self._parent_fits_rec is not None: parent = self._parent_fits_rec() if parent is not None: return parent[self.name] else: return None @array.setter def array(self, array): # The following looks over the bases of the given array to check if it # has a ._coldefs attribute (i.e. is a FITS_rec) and that that _coldefs # contains this Column itself, and would create a reference cycle if we # stored the array directly in self.__dict__. # In this case it instead sets up the _parent_fits_rec weakref to the # underlying FITS_rec, so that array.getter can return arrays through # self._parent_fits_rec().field(self.name), rather than storing a # hard reference to the field like it used to. base = array while True: if (hasattr(base, '_coldefs') and isinstance(base._coldefs, ColDefs)): for col in base._coldefs: if col is self and self._parent_fits_rec is None: self._parent_fits_rec = weakref.ref(base) # Just in case the user already set .array to their own # array. if 'array' in self.__dict__: del self.__dict__['array'] return if getattr(base, 'base', None) is not None: base = base.base else: break self.__dict__['array'] = array @array.deleter def array(self): try: del self.__dict__['array'] except KeyError: pass self._parent_fits_rec = None @ColumnAttribute('TTYPE') def name(col, name): if name is None: # Allow None to indicate deleting the name, or to just indicate an # unspecified name (when creating a new Column). return # Check that the name meets the recommended standard--other column # names are *allowed*, but will be discouraged if isinstance(name, str) and not TTYPE_RE.match(name): warnings.warn( 'It is strongly recommended that column names contain only ' 'upper and lower-case ASCII letters, digits, or underscores ' 'for maximum compatibility with other software ' '(got {0!r}).'.format(name), VerifyWarning) # This ensures that the new name can fit into a single FITS card # without any special extension like CONTINUE cards or the like. if (not isinstance(name, str) or len(str(Card('TTYPE', name))) != CARD_LENGTH): raise AssertionError( 'Column name must be a string able to fit in a single ' 'FITS card--typically this means a maximum of 68 ' 'characters, though it may be fewer if the string ' 'contains special characters like quotes.') @ColumnAttribute('TCTYP') def coord_type(col, coord_type): if coord_type is None: return if (not isinstance(coord_type, str) or len(coord_type) > 8): raise AssertionError( 'Coordinate/axis type must be a string of atmost 8 ' 'characters.') @ColumnAttribute('TCUNI') def coord_unit(col, coord_unit): if (coord_unit is not None and not isinstance(coord_unit, str)): raise AssertionError( 'Coordinate/axis unit must be a string.') @ColumnAttribute('TCRPX') def coord_ref_point(col, coord_ref_point): if (coord_ref_point is not None and not isinstance(coord_ref_point, numbers.Real)): raise AssertionError( 'Pixel coordinate of the reference point must be ' 'real floating type.') @ColumnAttribute('TCRVL') def coord_ref_value(col, coord_ref_value): if (coord_ref_value is not None and not isinstance(coord_ref_value, numbers.Real)): raise AssertionError( 'Coordinate value at reference point must be real ' 'floating type.') @ColumnAttribute('TCDLT') def coord_inc(col, coord_inc): if (coord_inc is not None and not isinstance(coord_inc, numbers.Real)): raise AssertionError( 'Coordinate increment must be real floating type.') @ColumnAttribute('TRPOS') def time_ref_pos(col, time_ref_pos): if (time_ref_pos is not None and not isinstance(time_ref_pos, str)): raise AssertionError( 'Time reference position must be a string.') format = ColumnAttribute('TFORM') unit = ColumnAttribute('TUNIT') null = ColumnAttribute('TNULL') bscale = ColumnAttribute('TSCAL') bzero = ColumnAttribute('TZERO') disp = ColumnAttribute('TDISP') start = ColumnAttribute('TBCOL') dim = ColumnAttribute('TDIM') @lazyproperty def ascii(self): """Whether this `Column` represents a column in an ASCII table.""" return isinstance(self.format, _AsciiColumnFormat) @lazyproperty def dtype(self): return self.format.dtype def copy(self): """ Return a copy of this `Column`. """ tmp = Column(format='I') # just use a throw-away format tmp.__dict__ = self.__dict__.copy() return tmp @staticmethod def _convert_format(format, cls): """The format argument to this class's initializer may come in many forms. This uses the given column format class ``cls`` to convert to a format of that type. TODO: There should be an abc base class for column format classes """ # Short circuit in case we're already a _BaseColumnFormat--there is at # least one case in which this can happen if isinstance(format, _BaseColumnFormat): return format, format.recformat if format in NUMPY2FITS: with suppress(VerifyError): # legit recarray format? recformat = format format = cls.from_recformat(format) try: # legit FITS format? format = cls(format) recformat = format.recformat except VerifyError: raise VerifyError('Illegal format `{}`.'.format(format)) return format, recformat @classmethod def _verify_keywords(cls, name=None, format=None, unit=None, null=None, bscale=None, bzero=None, disp=None, start=None, dim=None, ascii=None, coord_type=None, coord_unit=None, coord_ref_point=None, coord_ref_value=None, coord_inc=None, time_ref_pos=None): """ Given the keyword arguments used to initialize a Column, specifically those that typically read from a FITS header (so excluding array), verify that each keyword has a valid value. Returns a 2-tuple of dicts. The first maps valid keywords to their values. The second maps invalid keywords to a 2-tuple of their value, and a message explaining why they were found invalid. """ valid = {} invalid = {} format, recformat = cls._determine_formats(format, start, dim, ascii) valid.update(format=format, recformat=recformat) # Currently we don't have any validation for name, unit, bscale, or # bzero so include those by default # TODO: Add validation for these keywords, obviously for k, v in [('name', name), ('unit', unit), ('bscale', bscale), ('bzero', bzero)]: if v is not None and v != '': valid[k] = v # Validate null option # Note: Enough code exists that thinks empty strings are sensible # inputs for these options that we need to treat '' as None if null is not None and null != '': msg = None if isinstance(format, _AsciiColumnFormat): null = str(null) if len(null) > format.width: msg = ( "ASCII table null option (TNULLn) is longer than " "the column's character width and will be truncated " "(got {!r}).".format(null)) else: tnull_formats = ('B', 'I', 'J', 'K') if not _is_int(null): # Make this an exception instead of a warning, since any # non-int value is meaningless msg = ( 'Column null option (TNULLn) must be an integer for ' 'binary table columns (got {!r}). The invalid value ' 'will be ignored for the purpose of formatting ' 'the data in this column.'.format(null)) elif not (format.format in tnull_formats or (format.format in ('P', 'Q') and format.p_format in tnull_formats)): # TODO: We should also check that TNULLn's integer value # is in the range allowed by the column's format msg = ( 'Column null option (TNULLn) is invalid for binary ' 'table columns of type {!r} (got {!r}). The invalid ' 'value will be ignored for the purpose of formatting ' 'the data in this column.'.format(format, null)) if msg is None: valid['null'] = null else: invalid['null'] = (null, msg) # Validate the disp option # TODO: Add full parsing and validation of TDISPn keywords if disp is not None and disp != '': msg = None if not isinstance(disp, str): msg = ( 'Column disp option (TDISPn) must be a string (got {!r}).' 'The invalid value will be ignored for the purpose of ' 'formatting the data in this column.'.format(disp)) elif (isinstance(format, _AsciiColumnFormat) and disp[0].upper() == 'L'): # disp is at least one character long and has the 'L' format # which is not recognized for ASCII tables msg = ( "Column disp option (TDISPn) may not use the 'L' format " "with ASCII table columns. The invalid value will be " "ignored for the purpose of formatting the data in this " "column.") if msg is None: valid['disp'] = disp else: invalid['disp'] = (disp, msg) # Validate the start option if start is not None and start != '': msg = None if not isinstance(format, _AsciiColumnFormat): # The 'start' option only applies to ASCII columns msg = ( 'Column start option (TBCOLn) is not allowed for binary ' 'table columns (got {!r}). The invalid keyword will be ' 'ignored for the purpose of formatting the data in this ' 'column.'.format(start)) else: try: start = int(start) except (TypeError, ValueError): pass if not _is_int(start) or start < 1: msg = ( 'Column start option (TBCOLn) must be a positive integer ' '(got {!r}). The invalid value will be ignored for the ' 'purpose of formatting the data in this column.'.format(start)) if msg is None: valid['start'] = start else: invalid['start'] = (start, msg) # Process TDIMn options # ASCII table columns can't have a TDIMn keyword associated with it; # for now we just issue a warning and ignore it. # TODO: This should be checked by the FITS verification code if dim is not None and dim != '': msg = None dims_tuple = tuple() # NOTE: If valid, the dim keyword's value in the the valid dict is # a tuple, not the original string; if invalid just the original # string is returned if isinstance(format, _AsciiColumnFormat): msg = ( 'Column dim option (TDIMn) is not allowed for ASCII table ' 'columns (got {!r}). The invalid keyword will be ignored ' 'for the purpose of formatting this column.'.format(dim)) elif isinstance(dim, str): dims_tuple = _parse_tdim(dim) elif isinstance(dim, tuple): dims_tuple = dim else: msg = ( "`dim` argument must be a string containing a valid value " "for the TDIMn header keyword associated with this column, " "or a tuple containing the C-order dimensions for the " "column. The invalid value will be ignored for the purpose " "of formatting this column.") if dims_tuple: if reduce(operator.mul, dims_tuple) > format.repeat: msg = ( "The repeat count of the column format {!r} for column {!r} " "is fewer than the number of elements per the TDIM " "argument {!r}. The invalid TDIMn value will be ignored " "for the purpose of formatting this column.".format( name, format, dim)) if msg is None: valid['dim'] = dims_tuple else: invalid['dim'] = (dim, msg) if coord_type is not None and coord_type != '': msg = None if not isinstance(coord_type, str): msg = ( "Coordinate/axis type option (TCTYPn) must be a string " "(got {!r}). The invalid keyword will be ignored for the " "purpose of formatting this column.".format(coord_type)) elif len(coord_type) > 8: msg = ( "Coordinate/axis type option (TCTYPn) must be a string " "of atmost 8 characters (got {!r}). The invalid keyword " "will be ignored for the purpose of formatting this " "column.".format(coord_type)) if msg is None: valid['coord_type'] = coord_type else: invalid['coord_type'] = (coord_type, msg) if coord_unit is not None and coord_unit != '': msg = None if not isinstance(coord_unit, str): msg = ( "Coordinate/axis unit option (TCUNIn) must be a string " "(got {!r}). The invalid keyword will be ignored for the " "purpose of formatting this column.".format(coord_unit)) if msg is None: valid['coord_unit'] = coord_unit else: invalid['coord_unit'] = (coord_unit, msg) for k, v in [('coord_ref_point', coord_ref_point), ('coord_ref_value', coord_ref_value), ('coord_inc', coord_inc)]: if v is not None and v != '': msg = None if not isinstance(v, numbers.Real): msg = ( "Column {} option ({}n) must be a real floating type (got {!r}). " "The invalid value will be ignored for the purpose of formatting " "the data in this column.".format(k, ATTRIBUTE_TO_KEYWORD[k], v)) if msg is None: valid[k] = v else: invalid[k] = (v, msg) if time_ref_pos is not None and time_ref_pos != '': msg=None if not isinstance(time_ref_pos, str): msg = ( "Time coordinate reference position option (TRPOSn) must be " "a string (got {!r}). The invalid keyword will be ignored for " "the purpose of formatting this column.".format(time_ref_pos)) if msg is None: valid['time_ref_pos'] = time_ref_pos else: invalid['time_ref_pos'] = (time_ref_pos, msg) return valid, invalid @classmethod def _determine_formats(cls, format, start, dim, ascii): """ Given a format string and whether or not the Column is for an ASCII table (ascii=None means unspecified, but lean toward binary table where ambiguous) create an appropriate _BaseColumnFormat instance for the column's format, and determine the appropriate recarray format. The values of the start and dim keyword arguments are also useful, as the former is only valid for ASCII tables and the latter only for BINARY tables. """ # If the given format string is unambiguously a Numpy dtype or one of # the Numpy record format type specifiers supported by Astropy then that # should take priority--otherwise assume it is a FITS format if isinstance(format, np.dtype): format, _, _ = _dtype_to_recformat(format) # check format if ascii is None and not isinstance(format, _BaseColumnFormat): # We're just give a string which could be either a Numpy format # code, or a format for a binary column array *or* a format for an # ASCII column array--there may be many ambiguities here. Try our # best to guess what the user intended. format, recformat = cls._guess_format(format, start, dim) elif not ascii and not isinstance(format, _BaseColumnFormat): format, recformat = cls._convert_format(format, _ColumnFormat) elif ascii and not isinstance(format, _AsciiColumnFormat): format, recformat = cls._convert_format(format, _AsciiColumnFormat) else: # The format is already acceptable and unambiguous recformat = format.recformat return format, recformat @classmethod def _guess_format(cls, format, start, dim): if start and dim: # This is impossible; this can't be a valid FITS column raise ValueError( 'Columns cannot have both a start (TCOLn) and dim ' '(TDIMn) option, since the former is only applies to ' 'ASCII tables, and the latter is only valid for binary ' 'tables.') elif start: # Only ASCII table columns can have a 'start' option guess_format = _AsciiColumnFormat elif dim: # Only binary tables can have a dim option guess_format = _ColumnFormat else: # If the format is *technically* a valid binary column format # (i.e. it has a valid format code followed by arbitrary # "optional" codes), but it is also strictly a valid ASCII # table format, then assume an ASCII table column was being # requested (the more likely case, after all). with suppress(VerifyError): format = _AsciiColumnFormat(format, strict=True) # A safe guess which reflects the existing behavior of previous # Astropy versions guess_format = _ColumnFormat try: format, recformat = cls._convert_format(format, guess_format) except VerifyError: # For whatever reason our guess was wrong (for example if we got # just 'F' that's not a valid binary format, but it an ASCII format # code albeit with the width/precision omitted guess_format = (_AsciiColumnFormat if guess_format is _ColumnFormat else _ColumnFormat) # If this fails too we're out of options--it is truly an invalid # format, or at least not supported format, recformat = cls._convert_format(format, guess_format) return format, recformat def _convert_to_valid_data_type(self, array): # Convert the format to a type we understand if isinstance(array, Delayed): return array elif array is None: return array else: format = self.format dims = self._dims if dims: shape = dims[:-1] if 'A' in format else dims shape = (len(array),) + shape array = array.reshape(shape) if 'P' in format or 'Q' in format: return array elif 'A' in format: if array.dtype.char in 'SU': if dims: # The 'last' dimension (first in the order given # in the TDIMn keyword itself) is the number of # characters in each string fsize = dims[-1] else: fsize = np.dtype(format.recformat).itemsize return chararray.array(array, itemsize=fsize, copy=False) else: return _convert_array(array, np.dtype(format.recformat)) elif 'L' in format: # boolean needs to be scaled back to storage values ('T', 'F') if array.dtype == np.dtype('bool'): return np.where(array == np.False_, ord('F'), ord('T')) else: return np.where(array == 0, ord('F'), ord('T')) elif 'X' in format: return _convert_array(array, np.dtype('uint8')) else: # Preserve byte order of the original array for now; see #77 numpy_format = array.dtype.byteorder + format.recformat # Handle arrays passed in as unsigned ints as pseudo-unsigned # int arrays; blatantly tacked in here for now--we need columns # to have explicit knowledge of whether they treated as # pseudo-unsigned bzeros = {2: np.uint16(2**15), 4: np.uint32(2**31), 8: np.uint64(2**63)} if (array.dtype.kind == 'u' and array.dtype.itemsize in bzeros and self.bscale in (1, None, '') and self.bzero == bzeros[array.dtype.itemsize]): # Basically the array is uint, has scale == 1.0, and the # bzero is the appropriate value for a pseudo-unsigned # integer of the input dtype, then go ahead and assume that # uint is assumed numpy_format = numpy_format.replace('i', 'u') self._pseudo_unsigned_ints = True # The .base here means we're dropping the shape information, # which is only used to format recarray fields, and is not # useful for converting input arrays to the correct data type dtype = np.dtype(numpy_format).base return _convert_array(array, dtype) class ColDefs(NotifierMixin): """ Column definitions class. It has attributes corresponding to the `Column` attributes (e.g. `ColDefs` has the attribute ``names`` while `Column` has ``name``). Each attribute in `ColDefs` is a list of corresponding attribute values from all `Column` objects. """ _padding_byte = '\x00' _col_format_cls = _ColumnFormat def __new__(cls, input, ascii=False): klass = cls if (hasattr(input, '_columns_type') and issubclass(input._columns_type, ColDefs)): klass = input._columns_type elif (hasattr(input, '_col_format_cls') and issubclass(input._col_format_cls, _AsciiColumnFormat)): klass = _AsciiColDefs if ascii: # force ASCII if this has been explicitly requested klass = _AsciiColDefs return object.__new__(klass) def __getnewargs__(self): return (self._arrays,) def __init__(self, input, ascii=False): """ Parameters ---------- input : sequence of `Column`, `ColDefs`, other An existing table HDU, an existing `ColDefs`, or any multi-field Numpy array or `numpy.recarray`. ascii : bool Use True to ensure that ASCII table columns are used. """ from .hdu.table import _TableBaseHDU from .fitsrec import FITS_rec if isinstance(input, ColDefs): self._init_from_coldefs(input) elif (isinstance(input, FITS_rec) and hasattr(input, '_coldefs') and input._coldefs): # If given a FITS_rec object we can directly copy its columns, but # only if its columns have already been defined, otherwise this # will loop back in on itself and blow up self._init_from_coldefs(input._coldefs) elif isinstance(input, np.ndarray) and input.dtype.fields is not None: # Construct columns from the fields of a record array self._init_from_array(input) elif isiterable(input): # if the input is a list of Columns self._init_from_sequence(input) elif isinstance(input, _TableBaseHDU): # Construct columns from fields in an HDU header self._init_from_table(input) else: raise TypeError('Input to ColDefs must be a table HDU, a list ' 'of Columns, or a record/field array.') # Listen for changes on all columns for col in self.columns: col._add_listener(self) def _init_from_coldefs(self, coldefs): """Initialize from an existing ColDefs object (just copy the columns and convert their formats if necessary). """ self.columns = [self._copy_column(col) for col in coldefs] def _init_from_sequence(self, columns): for idx, col in enumerate(columns): if not isinstance(col, Column): raise TypeError('Element {} in the ColDefs input is not a ' 'Column.'.format(idx)) self._init_from_coldefs(columns) def _init_from_array(self, array): self.columns = [] for idx in range(len(array.dtype)): cname = array.dtype.names[idx] ftype = array.dtype.fields[cname][0] format = self._col_format_cls.from_recformat(ftype) # Determine the appropriate dimensions for items in the column # (typically just 1D) dim = array.dtype[idx].shape[::-1] if dim and (len(dim) > 1 or 'A' in format): if 'A' in format: # n x m string arrays must include the max string # length in their dimensions (e.g. l x n x m) dim = (array.dtype[idx].base.itemsize,) + dim dim = repr(dim).replace(' ', '') else: dim = None # Check for unsigned ints. bzero = None if ftype.base.kind == 'u': if 'I' in format: bzero = np.uint16(2**15) elif 'J' in format: bzero = np.uint32(2**31) elif 'K' in format: bzero = np.uint64(2**63) c = Column(name=cname, format=format, array=array.view(np.ndarray)[cname], bzero=bzero, dim=dim) self.columns.append(c) def _init_from_table(self, table): hdr = table._header nfields = hdr['TFIELDS'] # go through header keywords to pick out column definition keywords # definition dictionaries for each field col_keywords = [{} for i in range(nfields)] for keyword, value in hdr.items(): key = TDEF_RE.match(keyword) try: keyword = key.group('label') except Exception: continue # skip if there is no match if keyword in KEYWORD_NAMES: col = int(key.group('num')) if 0 < col <= nfields: attr = KEYWORD_TO_ATTRIBUTE[keyword] if attr == 'format': # Go ahead and convert the format value to the # appropriate ColumnFormat container now value = self._col_format_cls(value) col_keywords[col - 1][attr] = value # Verify the column keywords and display any warnings if necessary; # we only want to pass on the valid keywords for idx, kwargs in enumerate(col_keywords): valid_kwargs, invalid_kwargs = Column._verify_keywords(**kwargs) for val in invalid_kwargs.values(): warnings.warn( 'Invalid keyword for column {}: {}'.format(idx + 1, val[1]), VerifyWarning) # Special cases for recformat and dim # TODO: Try to eliminate the need for these special cases del valid_kwargs['recformat'] if 'dim' in valid_kwargs: valid_kwargs['dim'] = kwargs['dim'] col_keywords[idx] = valid_kwargs # data reading will be delayed for col in range(nfields): col_keywords[col]['array'] = Delayed(table, col) # now build the columns self.columns = [Column(**attrs) for attrs in col_keywords] # Add the table HDU is a listener to changes to the columns # (either changes to individual columns, or changes to the set of # columns (add/remove/etc.)) self._add_listener(table) def __copy__(self): return self.__class__(self) def __deepcopy__(self, memo): return self.__class__([copy.deepcopy(c, memo) for c in self.columns]) def _copy_column(self, column): """Utility function used currently only by _init_from_coldefs to help convert columns from binary format to ASCII format or vice versa if necessary (otherwise performs a straight copy). """ if isinstance(column.format, self._col_format_cls): # This column has a FITS format compatible with this column # definitions class (that is ascii or binary) return column.copy() new_column = column.copy() # Try to use the Numpy recformat as the equivalency between the # two formats; if that conversion can't be made then these # columns can't be transferred # TODO: Catch exceptions here and raise an explicit error about # column format conversion new_column.format = self._col_format_cls.from_column_format( column.format) # Handle a few special cases of column format options that are not # compatible between ASCII an binary tables # TODO: This is sort of hacked in right now; we really need # separate classes for ASCII and Binary table Columns, and they # should handle formatting issues like these if not isinstance(new_column.format, _AsciiColumnFormat): # the column is a binary table column... new_column.start = None if new_column.null is not None: # We can't just "guess" a value to represent null # values in the new column, so just disable this for # now; users may modify it later new_column.null = None else: # the column is an ASCII table column... if new_column.null is not None: new_column.null = DEFAULT_ASCII_TNULL if (new_column.disp is not None and new_column.disp.upper().startswith('L')): # ASCII columns may not use the logical data display format; # for now just drop the TDISPn option for this column as we # don't have a systematic conversion of boolean data to ASCII # tables yet new_column.disp = None return new_column def __getattr__(self, name): """ Automatically returns the values for the given keyword attribute for all `Column`s in this list. Implements for example self.units, self.formats, etc. """ cname = name[:-1] if cname in KEYWORD_ATTRIBUTES and name[-1] == 's': attr = [] for col in self.columns: val = getattr(col, cname) attr.append(val if val is not None else '') return attr raise AttributeError(name) @lazyproperty def dtype(self): # Note: This previously returned a dtype that just used the raw field # widths based on the format's repeat count, and did not incorporate # field *shapes* as provided by TDIMn keywords. # Now this incorporates TDIMn from the start, which makes *this* method # a little more complicated, but simplifies code elsewhere (for example # fields will have the correct shapes even in the raw recarray). formats = [] offsets = [0] for format_, dim in zip(self.formats, self._dims): dt = format_.dtype if len(offsets) < len(self.formats): # Note: the size of the *original* format_ may be greater than # one would expect from the number of elements determined by # dim. The FITS format allows this--the rest of the field is # filled with undefined values. offsets.append(offsets[-1] + dt.itemsize) if dim: if format_.format == 'A': dt = np.dtype((dt.char + str(dim[-1]), dim[:-1])) else: dt = np.dtype((dt.base, dim)) formats.append(dt) return np.dtype({'names': self.names, 'formats': formats, 'offsets': offsets}) @lazyproperty def names(self): return [col.name for col in self.columns] @lazyproperty def formats(self): return [col.format for col in self.columns] @lazyproperty def _arrays(self): return [col.array for col in self.columns] @lazyproperty def _recformats(self): return [fmt.recformat for fmt in self.formats] @lazyproperty def _dims(self): """Returns the values of the TDIMn keywords parsed into tuples.""" return [col._dims for col in self.columns] def __getitem__(self, key): if isinstance(key, str): key = _get_index(self.names, key) x = self.columns[key] if _is_int(key): return x else: return ColDefs(x) def __len__(self): return len(self.columns) def __repr__(self): rep = 'ColDefs(' if hasattr(self, 'columns') and self.columns: # The hasattr check is mostly just useful in debugging sessions # where self.columns may not be defined yet rep += '\n ' rep += '\n '.join([repr(c) for c in self.columns]) rep += '\n' rep += ')' return rep def __add__(self, other, option='left'): if isinstance(other, Column): b = [other] elif isinstance(other, ColDefs): b = list(other.columns) else: raise TypeError('Wrong type of input.') if option == 'left': tmp = list(self.columns) + b else: tmp = b + list(self.columns) return ColDefs(tmp) def __radd__(self, other): return self.__add__(other, 'right') def __sub__(self, other): if not isinstance(other, (list, tuple)): other = [other] _other = [_get_index(self.names, key) for key in other] indx = list(range(len(self))) for x in _other: indx.remove(x) tmp = [self[i] for i in indx] return ColDefs(tmp) def _update_column_attribute_changed(self, column, attr, old_value, new_value): """ Handle column attribute changed notifications from columns that are members of this `ColDefs`. `ColDefs` itself does not currently do anything with this, and just bubbles the notification up to any listening table HDUs that may need to update their headers, etc. However, this also informs the table of the numerical index of the column that changed. """ idx = 0 for idx, col in enumerate(self.columns): if col is column: break if attr == 'name': del self.names elif attr == 'format': del self.formats self._notify('column_attribute_changed', column, idx, attr, old_value, new_value) def add_col(self, column): """ Append one `Column` to the column definition. """ if not isinstance(column, Column): raise AssertionError self._arrays.append(column.array) # Obliterate caches of certain things del self.dtype del self._recformats del self._dims del self.names del self.formats self.columns.append(column) # Listen for changes on the new column column._add_listener(self) # If this ColDefs is being tracked by a Table, inform the # table that its data is now invalid. self._notify('column_added', self, column) return self def del_col(self, col_name): """ Delete (the definition of) one `Column`. col_name : str or int The column's name or index """ indx = _get_index(self.names, col_name) col = self.columns[indx] del self._arrays[indx] # Obliterate caches of certain things del self.dtype del self._recformats del self._dims del self.names del self.formats del self.columns[indx] col._remove_listener(self) # If this ColDefs is being tracked by a table HDU, inform the HDU (or # any other listeners) that the column has been removed # Just send a reference to self, and the index of the column that was # removed self._notify('column_removed', self, indx) return self def change_attrib(self, col_name, attrib, new_value): """ Change an attribute (in the ``KEYWORD_ATTRIBUTES`` list) of a `Column`. Parameters ---------- col_name : str or int The column name or index to change attrib : str The attribute name new_value : object The new value for the attribute """ setattr(self[col_name], attrib, new_value) def change_name(self, col_name, new_name): """ Change a `Column`'s name. Parameters ---------- col_name : str The current name of the column new_name : str The new name of the column """ if new_name != col_name and new_name in self.names: raise ValueError('New name {} already exists.'.format(new_name)) else: self.change_attrib(col_name, 'name', new_name) def change_unit(self, col_name, new_unit): """ Change a `Column`'s unit. Parameters ---------- col_name : str or int The column name or index new_unit : str The new unit for the column """ self.change_attrib(col_name, 'unit', new_unit) def info(self, attrib='all', output=None): """ Get attribute(s) information of the column definition. Parameters ---------- attrib : str Can be one or more of the attributes listed in ``astropy.io.fits.column.KEYWORD_ATTRIBUTES``. The default is ``"all"`` which will print out all attributes. It forgives plurals and blanks. If there are two or more attribute names, they must be separated by comma(s). output : file, optional File-like object to output to. Outputs to stdout by default. If `False`, returns the attributes as a `dict` instead. Notes ----- This function doesn't return anything by default; it just prints to stdout. """ if output is None: output = sys.stdout if attrib.strip().lower() in ['all', '']: lst = KEYWORD_ATTRIBUTES else: lst = attrib.split(',') for idx in range(len(lst)): lst[idx] = lst[idx].strip().lower() if lst[idx][-1] == 's': lst[idx] = list[idx][:-1] ret = {} for attr in lst: if output: if attr not in KEYWORD_ATTRIBUTES: output.write("'{}' is not an attribute of the column " "definitions.\n".format(attr)) continue output.write("{}:\n".format(attr)) output.write(' {}\n'.format(getattr(self, attr + 's'))) else: ret[attr] = getattr(self, attr + 's') if not output: return ret class _AsciiColDefs(ColDefs): """ColDefs implementation for ASCII tables.""" _padding_byte = ' ' _col_format_cls = _AsciiColumnFormat def __init__(self, input, ascii=True): super().__init__(input) # if the format of an ASCII column has no width, add one if not isinstance(input, _AsciiColDefs): self._update_field_metrics() else: for idx, s in enumerate(input.starts): self.columns[idx].start = s self._spans = input.spans self._width = input._width @lazyproperty def dtype(self): dtype = {} for j in range(len(self)): data_type = 'S' + str(self.spans[j]) dtype[self.names[j]] = (data_type, self.starts[j] - 1) return np.dtype(dtype) @property def spans(self): """A list of the widths of each field in the table.""" return self._spans @lazyproperty def _recformats(self): if len(self) == 1: widths = [] else: widths = [y - x for x, y in pairwise(self.starts)] # Widths is the width of each field *including* any space between # fields; this is so that we can map the fields to string records in a # Numpy recarray widths.append(self._width - self.starts[-1] + 1) return ['a' + str(w) for w in widths] def add_col(self, column): super().add_col(column) self._update_field_metrics() def del_col(self, col_name): super().del_col(col_name) self._update_field_metrics() def _update_field_metrics(self): """ Updates the list of the start columns, the list of the widths of each field, and the total width of each record in the table. """ spans = [0] * len(self.columns) end_col = 0 # Refers to the ASCII text column, not the table col for idx, col in enumerate(self.columns): width = col.format.width # Update the start columns and column span widths taking into # account the case that the starting column of a field may not # be the column immediately after the previous field if not col.start: col.start = end_col + 1 end_col = col.start + width - 1 spans[idx] = width self._spans = spans self._width = end_col # Utilities class _VLF(np.ndarray): """Variable length field object.""" def __new__(cls, input, dtype='a'): """ Parameters ---------- input a sequence of variable-sized elements. """ if dtype == 'a': try: # this handles ['abc'] and [['a','b','c']] # equally, beautiful! input = [chararray.array(x, itemsize=1) for x in input] except Exception: raise ValueError( 'Inconsistent input data array: {0}'.format(input)) a = np.array(input, dtype=object) self = np.ndarray.__new__(cls, shape=(len(input),), buffer=a, dtype=object) self.max = 0 self.element_dtype = dtype return self def __array_finalize__(self, obj): if obj is None: return self.max = obj.max self.element_dtype = obj.element_dtype def __setitem__(self, key, value): """ To make sure the new item has consistent data type to avoid misalignment. """ if isinstance(value, np.ndarray) and value.dtype == self.dtype: pass elif isinstance(value, chararray.chararray) and value.itemsize == 1: pass elif self.element_dtype == 'a': value = chararray.array(value, itemsize=1) else: value = np.array(value, dtype=self.element_dtype) np.ndarray.__setitem__(self, key, value) self.max = max(self.max, len(value)) def _get_index(names, key): """ Get the index of the ``key`` in the ``names`` list. The ``key`` can be an integer or string. If integer, it is the index in the list. If string, a. Field (column) names are case sensitive: you can have two different columns called 'abc' and 'ABC' respectively. b. When you *refer* to a field (presumably with the field method), it will try to match the exact name first, so in the example in (a), field('abc') will get the first field, and field('ABC') will get the second field. If there is no exact name matched, it will try to match the name with case insensitivity. So, in the last example, field('Abc') will cause an exception since there is no unique mapping. If there is a field named "XYZ" and no other field name is a case variant of "XYZ", then field('xyz'), field('Xyz'), etc. will get this field. """ if _is_int(key): indx = int(key) elif isinstance(key, str): # try to find exact match first try: indx = names.index(key.rstrip()) except ValueError: # try to match case-insentively, _key = key.lower().rstrip() names = [n.lower().rstrip() for n in names] count = names.count(_key) # occurrence of _key in names if count == 1: indx = names.index(_key) elif count == 0: raise KeyError("Key '{}' does not exist.".format(key)) else: # multiple match raise KeyError("Ambiguous key name '{}'.".format(key)) else: raise KeyError("Illegal key '{!r}'.".format(key)) return indx def _unwrapx(input, output, repeat): """ Unwrap the X format column into a Boolean array. Parameters ---------- input input ``Uint8`` array of shape (`s`, `nbytes`) output output Boolean array of shape (`s`, `repeat`) repeat number of bits """ pow2 = np.array([128, 64, 32, 16, 8, 4, 2, 1], dtype='uint8') nbytes = ((repeat - 1) // 8) + 1 for i in range(nbytes): _min = i * 8 _max = min((i + 1) * 8, repeat) for j in range(_min, _max): output[..., j] = np.bitwise_and(input[..., i], pow2[j - i * 8]) def _wrapx(input, output, repeat): """ Wrap the X format column Boolean array into an ``UInt8`` array. Parameters ---------- input input Boolean array of shape (`s`, `repeat`) output output ``Uint8`` array of shape (`s`, `nbytes`) repeat number of bits """ output[...] = 0 # reset the output nbytes = ((repeat - 1) // 8) + 1 unused = nbytes * 8 - repeat for i in range(nbytes): _min = i * 8 _max = min((i + 1) * 8, repeat) for j in range(_min, _max): if j != _min: np.left_shift(output[..., i], 1, output[..., i]) np.add(output[..., i], input[..., j], output[..., i]) # shift the unused bits np.left_shift(output[..., i], unused, output[..., i]) def _makep(array, descr_output, format, nrows=None): """ Construct the P (or Q) format column array, both the data descriptors and the data. It returns the output "data" array of data type `dtype`. The descriptor location will have a zero offset for all columns after this call. The final offset will be calculated when the file is written. Parameters ---------- array input object array descr_output output "descriptor" array of data type int32 (for P format arrays) or int64 (for Q format arrays)--must be nrows long in its first dimension format the _FormatP object representing the format of the variable array nrows : int, optional number of rows to create in the column; defaults to the number of rows in the input array """ # TODO: A great deal of this is redundant with FITS_rec._convert_p; see if # we can merge the two somehow. _offset = 0 if not nrows: nrows = len(array) data_output = _VLF([None] * nrows, dtype=format.dtype) if format.dtype == 'a': _nbytes = 1 else: _nbytes = np.array([], dtype=format.dtype).itemsize for idx in range(nrows): if idx < len(array): rowval = array[idx] else: if format.dtype == 'a': rowval = ' ' * data_output.max else: rowval = [0] * data_output.max if format.dtype == 'a': data_output[idx] = chararray.array(encode_ascii(rowval), itemsize=1) else: data_output[idx] = np.array(rowval, dtype=format.dtype) descr_output[idx, 0] = len(data_output[idx]) descr_output[idx, 1] = _offset _offset += len(data_output[idx]) * _nbytes return data_output def _parse_tformat(tform): """Parse ``TFORMn`` keyword for a binary table into a ``(repeat, format, option)`` tuple. """ try: (repeat, format, option) = TFORMAT_RE.match(tform.strip()).groups() except Exception: # TODO: Maybe catch this error use a default type (bytes, maybe?) for # unrecognized column types. As long as we can determine the correct # byte width somehow.. raise VerifyError('Format {!r} is not recognized.'.format(tform)) if repeat == '': repeat = 1 else: repeat = int(repeat) return (repeat, format.upper(), option) def _parse_ascii_tformat(tform, strict=False): """ Parse the ``TFORMn`` keywords for ASCII tables into a ``(format, width, precision)`` tuple (the latter is always zero unless format is one of 'E', 'F', or 'D'). """ match = TFORMAT_ASCII_RE.match(tform.strip()) if not match: raise VerifyError('Format {!r} is not recognized.'.format(tform)) # Be flexible on case format = match.group('format') if format is None: # Floating point format format = match.group('formatf').upper() width = match.group('widthf') precision = match.group('precision') if width is None or precision is None: if strict: raise VerifyError('Format {!r} is not unambiguously an ASCII ' 'table format.') else: width = 0 if width is None else width precision = 1 if precision is None else precision else: format = format.upper() width = match.group('width') if width is None: if strict: raise VerifyError('Format {!r} is not unambiguously an ASCII ' 'table format.') else: # Just use a default width of 0 if unspecified width = 0 precision = 0 def convert_int(val): msg = ('Format {!r} is not valid--field width and decimal precision ' 'must be integers.') try: val = int(val) except (ValueError, TypeError): raise VerifyError(msg.format(tform)) return val if width and precision: # This should only be the case for floating-point formats width, precision = convert_int(width), convert_int(precision) elif width: # Just for integer/string formats; ignore precision width = convert_int(width) else: # For any format, if width was unspecified use the set defaults width, precision = ASCII_DEFAULT_WIDTHS[format] if width <= 0: raise VerifyError("Format {!r} not valid--field width must be a " "positive integeter.".format(tform)) if precision >= width: raise VerifyError("Format {!r} not valid--the number of decimal digits " "must be less than the format's total " "width {}.".format(tform, width)) return format, width, precision def _parse_tdim(tdim): """Parse the ``TDIM`` value into a tuple (may return an empty tuple if the value ``TDIM`` value is empty or invalid). """ m = tdim and TDIM_RE.match(tdim) if m: dims = m.group('dims') return tuple(int(d.strip()) for d in dims.split(','))[::-1] # Ignore any dim values that don't specify a multidimensional column return tuple() def _scalar_to_format(value): """ Given a scalar value or string, returns the minimum FITS column format that can represent that value. 'minimum' is defined by the order given in FORMATORDER. """ # First, if value is a string, try to convert to the appropriate scalar # value for type_ in (int, float, complex): try: value = type_(value) break except ValueError: continue numpy_dtype_str = np.min_scalar_type(value).str numpy_dtype_str = numpy_dtype_str[1:] # Strip endianness try: fits_format = NUMPY2FITS[numpy_dtype_str] return FITSUPCONVERTERS.get(fits_format, fits_format) except KeyError: return "A" + str(len(value)) def _cmp_recformats(f1, f2): """ Compares two numpy recformats using the ordering given by FORMATORDER. """ if f1[0] == 'a' and f2[0] == 'a': return cmp(int(f1[1:]), int(f2[1:])) else: f1, f2 = NUMPY2FITS[f1], NUMPY2FITS[f2] return cmp(FORMATORDER.index(f1), FORMATORDER.index(f2)) def _convert_fits2record(format): """ Convert FITS format spec to record format spec. """ repeat, dtype, option = _parse_tformat(format) if dtype in FITS2NUMPY: if dtype == 'A': output_format = FITS2NUMPY[dtype] + str(repeat) # to accommodate both the ASCII table and binary table column # format spec, i.e. A7 in ASCII table is the same as 7A in # binary table, so both will produce 'a7'. # Technically the FITS standard does not allow this but it's a very # common mistake if format.lstrip()[0] == 'A' and option != '': # make sure option is integer output_format = FITS2NUMPY[dtype] + str(int(option)) else: repeat_str = '' if repeat != 1: repeat_str = str(repeat) output_format = repeat_str + FITS2NUMPY[dtype] elif dtype == 'X': output_format = _FormatX(repeat) elif dtype == 'P': output_format = _FormatP.from_tform(format) elif dtype == 'Q': output_format = _FormatQ.from_tform(format) elif dtype == 'F': output_format = 'f8' else: raise ValueError('Illegal format `{}`.'.format(format)) return output_format def _convert_record2fits(format): """ Convert record format spec to FITS format spec. """ recformat, kind, dtype = _dtype_to_recformat(format) shape = dtype.shape itemsize = dtype.base.itemsize if dtype.char == 'U': # Unicode dtype--itemsize is 4 times actual ASCII character length, # which what matters for FITS column formats # Use dtype.base--dtype may be a multi-dimensional dtype itemsize = itemsize // 4 option = str(itemsize) ndims = len(shape) repeat = 1 if ndims > 0: nel = np.array(shape, dtype='i8').prod() if nel > 1: repeat = nel if kind == 'a': # This is a kludge that will place string arrays into a # single field, so at least we won't lose data. Need to # use a TDIM keyword to fix this, declaring as (slength, # dim1, dim2, ...) as mwrfits does ntot = int(repeat) * int(option) output_format = str(ntot) + 'A' elif recformat in NUMPY2FITS: # record format if repeat != 1: repeat = str(repeat) else: repeat = '' output_format = repeat + NUMPY2FITS[recformat] else: raise ValueError('Illegal format `{}`.'.format(format)) return output_format def _dtype_to_recformat(dtype): """ Utility function for converting a dtype object or string that instantiates a dtype (e.g. 'float32') into one of the two character Numpy format codes that have been traditionally used by Astropy. In particular, use of 'a' to refer to character data is long since deprecated in Numpy, but Astropy remains heavily invested in its use (something to try to get away from sooner rather than later). """ if not isinstance(dtype, np.dtype): dtype = np.dtype(dtype) kind = dtype.base.kind if kind in ('U', 'S'): recformat = kind = 'a' else: itemsize = dtype.base.itemsize recformat = kind + str(itemsize) return recformat, kind, dtype def _convert_format(format, reverse=False): """ Convert FITS format spec to record format spec. Do the opposite if reverse=True. """ if reverse: return _convert_record2fits(format) else: return _convert_fits2record(format) def _convert_ascii_format(format, reverse=False): """Convert ASCII table format spec to record format spec.""" if reverse: recformat, kind, dtype = _dtype_to_recformat(format) itemsize = dtype.itemsize if kind == 'a': return 'A' + str(itemsize) elif NUMPY2FITS.get(recformat) == 'L': # Special case for logical/boolean types--for ASCII tables we # represent these as single character columns containing 'T' or 'F' # (a la the storage format for Logical columns in binary tables) return 'A1' elif kind == 'i': # Use for the width the maximum required to represent integers # of that byte size plus 1 for signs, but use a minimum of the # default width (to keep with existing behavior) width = 1 + len(str(2 ** (itemsize * 8))) width = max(width, ASCII_DEFAULT_WIDTHS['I'][0]) return 'I' + str(width) elif kind == 'f': # This is tricky, but go ahead and use D if float-64, and E # if float-32 with their default widths if itemsize >= 8: format = 'D' else: format = 'E' width = '.'.join(str(w) for w in ASCII_DEFAULT_WIDTHS[format]) return format + width # TODO: There may be reasonable ways to represent other Numpy types so # let's see what other possibilities there are besides just 'a', 'i', # and 'f'. If it doesn't have a reasonable ASCII representation then # raise an exception else: format, width, precision = _parse_ascii_tformat(format) # This gives a sensible "default" dtype for a given ASCII # format code recformat = ASCII2NUMPY[format] # The following logic is taken from CFITSIO: # For integers, if the width <= 4 we can safely use 16-bit ints for all # values [for the non-standard J format code just always force 64-bit] if format == 'I' and width <= 4: recformat = 'i2' elif format == 'A': recformat += str(width) return recformat def _parse_tdisp_format(tdisp): """ Parse the ``TDISPn`` keywords for ASCII and binary tables into a ``(format, width, precision, exponential)`` tuple (the TDISP values for ASCII and binary are identical except for 'Lw', which is only present in BINTABLE extensions Parameters ---------- tdisp: str TDISPn FITS Header keyword. Used to specify display formatting. Returns ------- formatc: str The format characters from TDISPn width: str The width int value from TDISPn precision: str The precision int value from TDISPn exponential: str The exponential int value from TDISPn """ # Use appropriate regex for format type tdisp = tdisp.strip() fmt_key = tdisp[0] if tdisp[0] !='E' or tdisp[1] not in 'NS' else tdisp[:2] try: tdisp_re = TDISP_RE_DICT[fmt_key] except KeyError: raise VerifyError('Format {} is not recognized.'.format(tdisp)) match = tdisp_re.match(tdisp.strip()) if not match or match.group('formatc') is None: raise VerifyError('Format {} is not recognized.'.format(tdisp)) formatc = match.group('formatc') width = match.group('width') precision = None exponential = None # Some formats have precision and exponential if tdisp[0] in ('I', 'B', 'O', 'Z', 'F', 'E', 'G', 'D'): precision = match.group('precision') if precision is None: precision = 1 if tdisp[0] in ('E', 'D', 'G') and tdisp[1] not in ('N', 'S'): exponential = match.group('exponential') if exponential is None: exponential = 1 # Once parsed, check format dict to do conversion to a formatting string return formatc, width, precision, exponential def _fortran_to_python_format(tdisp): """ Turn the TDISPn fortran format pieces into a final Python format string. See the format_type definitions above the TDISP_FMT_DICT. If codes is changed to take advantage of the exponential specification, will need to add it as another input parameter. Parameters ---------- tdisp: str TDISPn FITS Header keyword. Used to specify display formatting. Returns ------- format_string: str The TDISPn keyword string translated into a Python format string. """ format_type, width, precision, exponential = _parse_tdisp_format(tdisp) try: fmt = TDISP_FMT_DICT[format_type] return fmt.format(width=width, precision=precision) except KeyError: raise VerifyError('Format {} is not recognized.'.format(format_type)) def python_to_tdisp(format_string, logical_dtype = False): """ Turn the Python format string to a TDISP FITS compliant format string. Not all formats convert. these will cause a Warning and return None. Parameters ---------- format_string: str TDISPn FITS Header keyword. Used to specify display formatting. logical_dtype: bool True is this format type should be a logical type, 'L'. Needs special handeling. Returns ------- tdsip_string: str The TDISPn keyword string translated into a Python format string. """ fmt_to_tdisp = {'a': 'A', 's': 'A', 'd': 'I', 'b': 'B', 'o': 'O', 'x': 'Z', 'X': 'Z', 'f': 'F', 'F': 'F', 'g': 'G', 'G': 'G', 'e': 'E', 'E': 'E'} if format_string in [None, "", "{}"]: return None # Strip out extra format characters that aren't a type or a width/precision if format_string[0] == '{' and format_string != "{}": fmt_str = format_string.lstrip("{:").rstrip('}') elif format_string[0] == '%': fmt_str = format_string.lstrip("%") else: fmt_str = format_string precision, sep = '', '' # Character format, only translate right aligned, and don't take zero fills if fmt_str[-1].isdigit() and fmt_str[0] == '>' and fmt_str[1] != '0': ftype = fmt_to_tdisp['a'] width = fmt_str[1:] elif fmt_str[-1] == 's' and fmt_str != 's': ftype = fmt_to_tdisp['a'] width = fmt_str[:-1].lstrip('0') # Number formats, don't take zero fills elif fmt_str[-1].isalpha() and len(fmt_str) > 1 and fmt_str[0] != '0': ftype = fmt_to_tdisp[fmt_str[-1]] fmt_str = fmt_str[:-1] # If format has a "." split out the width and precision if '.' in fmt_str: width, precision = fmt_str.split('.') sep = '.' if width == "": ascii_key = ftype if ftype != 'G' else 'F' width = str(int(precision) + (ASCII_DEFAULT_WIDTHS[ascii_key][0] - ASCII_DEFAULT_WIDTHS[ascii_key][1])) # Otherwise we just have a width else: width = fmt_str else: warnings.warn('Format {} cannot be mapped to the accepted ' 'TDISPn keyword values. Format will not be ' 'moved into TDISPn keyword.'.format(format_string), AstropyUserWarning) return None # Catch logical data type, set the format type back to L in this case if logical_dtype: ftype = 'L' return ftype + width + sep + precision
cd888a3a4f20cf0c5ce67c859f441c68d412f89a1c39fead86f38a3f44cc2ed1
# Licensed under a 3-clause BSD style license - see PYFITS.rst import collections import copy import itertools import re import warnings from .card import Card, _pad, KEYWORD_LENGTH, UNDEFINED from .file import _File from .util import encode_ascii, decode_ascii, fileobj_closed, fileobj_is_binary from ._utils import parse_header from astropy.utils import isiterable from astropy.utils.exceptions import AstropyUserWarning from astropy.utils.decorators import deprecated_renamed_argument BLOCK_SIZE = 2880 # the FITS block size # This regular expression can match a *valid* END card which just consists of # the string 'END' followed by all spaces, or an *invalid* end card which # consists of END, followed by any character that is *not* a valid character # for a valid FITS keyword (that is, this is not a keyword like 'ENDER' which # starts with 'END' but is not 'END'), followed by any arbitrary bytes. An # invalid end card may also consist of just 'END' with no trailing bytes. HEADER_END_RE = re.compile(encode_ascii( r'(?:(?P<valid>END {77}) *)|(?P<invalid>END$|END {0,76}[^A-Z0-9_-])')) # According to the FITS standard the only characters that may appear in a # header record are the restricted ASCII chars from 0x20 through 0x7E. VALID_HEADER_CHARS = set(map(chr, range(0x20, 0x7F))) END_CARD = 'END' + ' ' * 77 __doctest_skip__ = ['Header', 'Header.*'] class Header: """ FITS header class. This class exposes both a dict-like interface and a list-like interface to FITS headers. The header may be indexed by keyword and, like a dict, the associated value will be returned. When the header contains cards with duplicate keywords, only the value of the first card with the given keyword will be returned. It is also possible to use a 2-tuple as the index in the form (keyword, n)--this returns the n-th value with that keyword, in the case where there are duplicate keywords. For example:: >>> header['NAXIS'] 0 >>> header[('FOO', 1)] # Return the value of the second FOO keyword 'foo' The header may also be indexed by card number:: >>> header[0] # Return the value of the first card in the header 'T' Commentary keywords such as HISTORY and COMMENT are special cases: When indexing the Header object with either 'HISTORY' or 'COMMENT' a list of all the HISTORY/COMMENT values is returned:: >>> header['HISTORY'] This is the first history entry in this header. This is the second history entry in this header. ... See the Astropy documentation for more details on working with headers. """ def __init__(self, cards=[], copy=False): """ Construct a `Header` from an iterable and/or text file. Parameters ---------- cards : A list of `Card` objects, optional The cards to initialize the header with. Also allowed are other `Header` (or `dict`-like) objects. .. versionchanged:: 1.2 Allowed ``cards`` to be a `dict`-like object. copy : bool, optional If ``True`` copies the ``cards`` if they were another `Header` instance. Default is ``False``. .. versionadded:: 1.3 """ self.clear() if isinstance(cards, Header): if copy: cards = cards.copy() cards = cards.cards elif isinstance(cards, dict): cards = cards.items() for card in cards: self.append(card, end=True) self._modified = False def __len__(self): return len(self._cards) def __iter__(self): for card in self._cards: yield card.keyword def __contains__(self, keyword): if keyword in self._keyword_indices or keyword in self._rvkc_indices: # For the most common case (single, standard form keyword lookup) # this will work and is an O(1) check. If it fails that doesn't # guarantee absence, just that we have to perform the full set of # checks in self._cardindex return True try: self._cardindex(keyword) except (KeyError, IndexError): return False return True def __getitem__(self, key): if isinstance(key, slice): return Header([copy.copy(c) for c in self._cards[key]]) elif self._haswildcard(key): return Header([copy.copy(self._cards[idx]) for idx in self._wildcardmatch(key)]) elif (isinstance(key, str) and key.upper() in Card._commentary_keywords): key = key.upper() # Special case for commentary cards return _HeaderCommentaryCards(self, key) if isinstance(key, tuple): keyword = key[0] else: keyword = key card = self._cards[self._cardindex(key)] if card.field_specifier is not None and keyword == card.rawkeyword: # This is RVKC; if only the top-level keyword was specified return # the raw value, not the parsed out float value return card.rawvalue value = card.value if value == UNDEFINED: return None return value def __setitem__(self, key, value): if self._set_slice(key, value, self): return if isinstance(value, tuple): if not (0 < len(value) <= 2): raise ValueError( 'A Header item may be set with either a scalar value, ' 'a 1-tuple containing a scalar value, or a 2-tuple ' 'containing a scalar value and comment string.') if len(value) == 1: value, comment = value[0], None if value is None: value = UNDEFINED elif len(value) == 2: value, comment = value if value is None: value = UNDEFINED if comment is None: comment = '' else: comment = None card = None if isinstance(key, int): card = self._cards[key] elif isinstance(key, tuple): card = self._cards[self._cardindex(key)] if value is None: value = UNDEFINED if card: card.value = value if comment is not None: card.comment = comment if card._modified: self._modified = True else: # If we get an IndexError that should be raised; we don't allow # assignment to non-existing indices self._update((key, value, comment)) def __delitem__(self, key): if isinstance(key, slice) or self._haswildcard(key): # This is very inefficient but it's not a commonly used feature. # If someone out there complains that they make heavy use of slice # deletions and it's too slow, well, we can worry about it then # [the solution is not too complicated--it would be wait 'til all # the cards are deleted before updating _keyword_indices rather # than updating it once for each card that gets deleted] if isinstance(key, slice): indices = range(*key.indices(len(self))) # If the slice step is backwards we want to reverse it, because # it will be reversed in a few lines... if key.step and key.step < 0: indices = reversed(indices) else: indices = self._wildcardmatch(key) for idx in reversed(indices): del self[idx] return elif isinstance(key, str): # delete ALL cards with the same keyword name key = Card.normalize_keyword(key) indices = self._keyword_indices if key not in self._keyword_indices: indices = self._rvkc_indices if key not in indices: # if keyword is not present raise KeyError. # To delete keyword without caring if they were present, # Header.remove(Keyword) can be used with optional argument ignore_missing as True raise KeyError("Keyword '{}' not found.".format(key)) for idx in reversed(indices[key]): # Have to copy the indices list since it will be modified below del self[idx] return idx = self._cardindex(key) card = self._cards[idx] keyword = card.keyword del self._cards[idx] keyword = Card.normalize_keyword(keyword) indices = self._keyword_indices[keyword] indices.remove(idx) if not indices: del self._keyword_indices[keyword] # Also update RVKC indices if necessary :/ if card.field_specifier is not None: indices = self._rvkc_indices[card.rawkeyword] indices.remove(idx) if not indices: del self._rvkc_indices[card.rawkeyword] # We also need to update all other indices self._updateindices(idx, increment=False) self._modified = True def __repr__(self): return self.tostring(sep='\n', endcard=False, padding=False) def __str__(self): return self.tostring() def __eq__(self, other): """ Two Headers are equal only if they have the exact same string representation. """ return str(self) == str(other) def __add__(self, other): temp = self.copy(strip=False) temp.extend(other) return temp def __iadd__(self, other): self.extend(other) return self def _ipython_key_completions_(self): return self.__iter__() @property def cards(self): """ The underlying physical cards that make up this Header; it can be looked at, but it should not be modified directly. """ return _CardAccessor(self) @property def comments(self): """ View the comments associated with each keyword, if any. For example, to see the comment on the NAXIS keyword: >>> header.comments['NAXIS'] number of data axes Comments can also be updated through this interface: >>> header.comments['NAXIS'] = 'Number of data axes' """ return _HeaderComments(self) @property def _modified(self): """ Whether or not the header has been modified; this is a property so that it can also check each card for modifications--cards may have been modified directly without the header containing it otherwise knowing. """ modified_cards = any(c._modified for c in self._cards) if modified_cards: # If any cards were modified then by definition the header was # modified self.__dict__['_modified'] = True return self.__dict__['_modified'] @_modified.setter def _modified(self, val): self.__dict__['_modified'] = val @classmethod def fromstring(cls, data, sep=''): """ Creates an HDU header from a byte string containing the entire header data. Parameters ---------- data : str String containing the entire header. sep : str, optional The string separating cards from each other, such as a newline. By default there is no card separator (as is the case in a raw FITS file). Returns ------- header A new `Header` instance. """ cards = [] # If the card separator contains characters that may validly appear in # a card, the only way to unambiguously distinguish between cards is to # require that they be Card.length long. However, if the separator # contains non-valid characters (namely \n) the cards may be split # immediately at the separator require_full_cardlength = set(sep).issubset(VALID_HEADER_CHARS) # Split the header into individual cards idx = 0 image = [] while idx < len(data): if require_full_cardlength: end_idx = idx + Card.length else: try: end_idx = data.index(sep, idx) except ValueError: end_idx = len(data) next_image = data[idx:end_idx] idx = end_idx + len(sep) if image: if next_image[:8] == 'CONTINUE': image.append(next_image) continue cards.append(Card.fromstring(''.join(image))) if require_full_cardlength: if next_image == END_CARD: image = [] break else: if next_image.split(sep)[0].rstrip() == 'END': image = [] break image = [next_image] # Add the last image that was found before the end, if any if image: cards.append(Card.fromstring(''.join(image))) return cls._fromcards(cards) @classmethod def fromfile(cls, fileobj, sep='', endcard=True, padding=True): """ Similar to :meth:`Header.fromstring`, but reads the header string from a given file-like object or filename. Parameters ---------- fileobj : str, file-like A filename or an open file-like object from which a FITS header is to be read. For open file handles the file pointer must be at the beginning of the header. sep : str, optional The string separating cards from each other, such as a newline. By default there is no card separator (as is the case in a raw FITS file). endcard : bool, optional If True (the default) the header must end with an END card in order to be considered valid. If an END card is not found an `OSError` is raised. padding : bool, optional If True (the default) the header will be required to be padded out to a multiple of 2880, the FITS header block size. Otherwise any padding, or lack thereof, is ignored. Returns ------- header A new `Header` instance. """ close_file = False if isinstance(fileobj, str): # Open in text mode by default to support newline handling; if a # binary-mode file object is passed in, the user is on their own # with respect to newline handling fileobj = open(fileobj, 'r') close_file = True try: is_binary = fileobj_is_binary(fileobj) def block_iter(nbytes): while True: data = fileobj.read(nbytes) if data: yield data else: break return cls._from_blocks(block_iter, is_binary, sep, endcard, padding)[1] finally: if close_file: fileobj.close() @classmethod def _fromcards(cls, cards): header = cls() for idx, card in enumerate(cards): header._cards.append(card) keyword = Card.normalize_keyword(card.keyword) header._keyword_indices[keyword].append(idx) if card.field_specifier is not None: header._rvkc_indices[card.rawkeyword].append(idx) header._modified = False return header @classmethod def _from_blocks(cls, block_iter, is_binary, sep, endcard, padding): """ The meat of `Header.fromfile`; in a separate method so that `Header.fromfile` itself is just responsible for wrapping file handling. Also used by `_BaseHDU.fromstring`. ``block_iter`` should be a callable which, given a block size n (typically 2880 bytes as used by the FITS standard) returns an iterator of byte strings of that block size. ``is_binary`` specifies whether the returned blocks are bytes or text Returns both the entire header *string*, and the `Header` object returned by Header.fromstring on that string. """ actual_block_size = _block_size(sep) clen = Card.length + len(sep) blocks = block_iter(actual_block_size) # Read the first header block. try: block = next(blocks) except StopIteration: raise EOFError() if not is_binary: # TODO: There needs to be error handling at *this* level for # non-ASCII characters; maybe at this stage decoding latin-1 might # be safer block = encode_ascii(block) read_blocks = [] is_eof = False end_found = False # continue reading header blocks until END card or EOF is reached while True: # find the END card end_found, block = cls._find_end_card(block, clen) read_blocks.append(decode_ascii(block)) if end_found: break try: block = next(blocks) except StopIteration: is_eof = True break if not block: is_eof = True break if not is_binary: block = encode_ascii(block) if not end_found and is_eof and endcard: # TODO: Pass this error to validation framework as an ERROR, # rather than raising an exception raise OSError('Header missing END card.') header_str = ''.join(read_blocks) _check_padding(header_str, actual_block_size, is_eof, check_block_size=padding) return header_str, cls.fromstring(header_str, sep=sep) @classmethod def _find_end_card(cls, block, card_len): """ Utility method to search a header block for the END card and handle invalid END cards. This method can also returned a modified copy of the input header block in case an invalid end card needs to be sanitized. """ for mo in HEADER_END_RE.finditer(block): # Ensure the END card was found, and it started on the # boundary of a new card (see ticket #142) if mo.start() % card_len != 0: continue # This must be the last header block, otherwise the # file is malformatted if mo.group('invalid'): offset = mo.start() trailing = block[offset + 3:offset + card_len - 3].rstrip() if trailing: trailing = repr(trailing).lstrip('ub') # TODO: Pass this warning up to the validation framework warnings.warn( 'Unexpected bytes trailing END keyword: {0}; these ' 'bytes will be replaced with spaces on write.'.format( trailing), AstropyUserWarning) else: # TODO: Pass this warning up to the validation framework warnings.warn( 'Missing padding to end of the FITS block after the ' 'END keyword; additional spaces will be appended to ' 'the file upon writing to pad out to {0} ' 'bytes.'.format(BLOCK_SIZE), AstropyUserWarning) # Sanitize out invalid END card now that the appropriate # warnings have been issued block = (block[:offset] + encode_ascii(END_CARD) + block[offset + len(END_CARD):]) return True, block return False, block def tostring(self, sep='', endcard=True, padding=True): r""" Returns a string representation of the header. By default this uses no separator between cards, adds the END card, and pads the string with spaces to the next multiple of 2880 bytes. That is, it returns the header exactly as it would appear in a FITS file. Parameters ---------- sep : str, optional The character or string with which to separate cards. By default there is no separator, but one could use ``'\\n'``, for example, to separate each card with a new line endcard : bool, optional If True (default) adds the END card to the end of the header string padding : bool, optional If True (default) pads the string with spaces out to the next multiple of 2880 characters Returns ------- s : str A string representing a FITS header. """ lines = [] for card in self._cards: s = str(card) # Cards with CONTINUE cards may be longer than 80 chars; so break # them into multiple lines while s: lines.append(s[:Card.length]) s = s[Card.length:] s = sep.join(lines) if endcard: s += sep + _pad('END') if padding: s += ' ' * _pad_length(len(s)) return s @deprecated_renamed_argument('clobber', 'overwrite', '2.0') def tofile(self, fileobj, sep='', endcard=True, padding=True, overwrite=False): r""" Writes the header to file or file-like object. By default this writes the header exactly as it would be written to a FITS file, with the END card included and padding to the next multiple of 2880 bytes. However, aspects of this may be controlled. Parameters ---------- fileobj : str, file, optional Either the pathname of a file, or an open file handle or file-like object sep : str, optional The character or string with which to separate cards. By default there is no separator, but one could use ``'\\n'``, for example, to separate each card with a new line endcard : bool, optional If `True` (default) adds the END card to the end of the header string padding : bool, optional If `True` (default) pads the string with spaces out to the next multiple of 2880 characters overwrite : bool, optional If ``True``, overwrite the output file if it exists. Raises an ``OSError`` if ``False`` and the output file exists. Default is ``False``. .. versionchanged:: 1.3 ``overwrite`` replaces the deprecated ``clobber`` argument. """ close_file = fileobj_closed(fileobj) if not isinstance(fileobj, _File): fileobj = _File(fileobj, mode='ostream', overwrite=overwrite) try: blocks = self.tostring(sep=sep, endcard=endcard, padding=padding) actual_block_size = _block_size(sep) if padding and len(blocks) % actual_block_size != 0: raise OSError( 'Header size ({}) is not a multiple of block ' 'size ({}).'.format( len(blocks) - actual_block_size + BLOCK_SIZE, BLOCK_SIZE)) if not fileobj.simulateonly: fileobj.flush() try: offset = fileobj.tell() except (AttributeError, OSError): offset = 0 fileobj.write(blocks.encode('ascii')) fileobj.flush() finally: if close_file: fileobj.close() @classmethod def fromtextfile(cls, fileobj, endcard=False): """ Read a header from a simple text file or file-like object. Equivalent to:: >>> Header.fromfile(fileobj, sep='\\n', endcard=False, ... padding=False) See Also -------- fromfile """ return cls.fromfile(fileobj, sep='\n', endcard=endcard, padding=False) @deprecated_renamed_argument('clobber', 'overwrite', '2.0') def totextfile(self, fileobj, endcard=False, overwrite=False): """ Write the header as text to a file or a file-like object. Equivalent to:: >>> Header.tofile(fileobj, sep='\\n', endcard=False, ... padding=False, overwrite=overwrite) .. versionchanged:: 1.3 ``overwrite`` replaces the deprecated ``clobber`` argument. See Also -------- tofile """ self.tofile(fileobj, sep='\n', endcard=endcard, padding=False, overwrite=overwrite) def clear(self): """ Remove all cards from the header. """ self._cards = [] self._keyword_indices = collections.defaultdict(list) self._rvkc_indices = collections.defaultdict(list) def copy(self, strip=False): """ Make a copy of the :class:`Header`. .. versionchanged:: 1.3 `copy.copy` and `copy.deepcopy` on a `Header` will call this method. Parameters ---------- strip : bool, optional If `True`, strip any headers that are specific to one of the standard HDU types, so that this header can be used in a different HDU. Returns ------- header A new :class:`Header` instance. """ tmp = Header((copy.copy(card) for card in self._cards)) if strip: tmp._strip() return tmp def __copy__(self): return self.copy() def __deepcopy__(self, *args, **kwargs): return self.copy() @classmethod def fromkeys(cls, iterable, value=None): """ Similar to :meth:`dict.fromkeys`--creates a new `Header` from an iterable of keywords and an optional default value. This method is not likely to be particularly useful for creating real world FITS headers, but it is useful for testing. Parameters ---------- iterable Any iterable that returns strings representing FITS keywords. value : optional A default value to assign to each keyword; must be a valid type for FITS keywords. Returns ------- header A new `Header` instance. """ d = cls() if not isinstance(value, tuple): value = (value,) for key in iterable: d.append((key,) + value) return d def get(self, key, default=None): """ Similar to :meth:`dict.get`--returns the value associated with keyword in the header, or a default value if the keyword is not found. Parameters ---------- key : str A keyword that may or may not be in the header. default : optional A default value to return if the keyword is not found in the header. Returns ------- value The value associated with the given keyword, or the default value if the keyword is not in the header. """ try: return self[key] except (KeyError, IndexError): return default def set(self, keyword, value=None, comment=None, before=None, after=None): """ Set the value and/or comment and/or position of a specified keyword. If the keyword does not already exist in the header, a new keyword is created in the specified position, or appended to the end of the header if no position is specified. This method is similar to :meth:`Header.update` prior to Astropy v0.1. .. note:: It should be noted that ``header.set(keyword, value)`` and ``header.set(keyword, value, comment)`` are equivalent to ``header[keyword] = value`` and ``header[keyword] = (value, comment)`` respectively. New keywords can also be inserted relative to existing keywords using, for example:: >>> header.insert('NAXIS1', ('NAXIS', 2, 'Number of axes')) to insert before an existing keyword, or:: >>> header.insert('NAXIS', ('NAXIS1', 4096), after=True) to insert after an existing keyword. The only advantage of using :meth:`Header.set` is that it easily replaces the old usage of :meth:`Header.update` both conceptually and in terms of function signature. Parameters ---------- keyword : str A header keyword value : str, optional The value to set for the given keyword; if None the existing value is kept, but '' may be used to set a blank value comment : str, optional The comment to set for the given keyword; if None the existing comment is kept, but ``''`` may be used to set a blank comment before : str, int, optional Name of the keyword, or index of the `Card` before which this card should be located in the header. The argument ``before`` takes precedence over ``after`` if both specified. after : str, int, optional Name of the keyword, or index of the `Card` after which this card should be located in the header. """ # Create a temporary card that looks like the one being set; if the # temporary card turns out to be a RVKC this will make it easier to # deal with the idiosyncrasies thereof # Don't try to make a temporary card though if they keyword looks like # it might be a HIERARCH card or is otherwise invalid--this step is # only for validating RVKCs. if (len(keyword) <= KEYWORD_LENGTH and Card._keywd_FSC_RE.match(keyword) and keyword not in self._keyword_indices): new_card = Card(keyword, value, comment) new_keyword = new_card.keyword else: new_keyword = keyword if (new_keyword not in Card._commentary_keywords and new_keyword in self): if comment is None: comment = self.comments[keyword] if value is None: value = self[keyword] self[keyword] = (value, comment) if before is not None or after is not None: card = self._cards[self._cardindex(keyword)] self._relativeinsert(card, before=before, after=after, replace=True) elif before is not None or after is not None: self._relativeinsert((keyword, value, comment), before=before, after=after) else: self[keyword] = (value, comment) def items(self): """Like :meth:`dict.items`.""" for card in self._cards: yield (card.keyword, card.value) def keys(self): """ Like :meth:`dict.keys`--iterating directly over the `Header` instance has the same behavior. """ for card in self._cards: yield card.keyword def values(self): """Like :meth:`dict.values`.""" for card in self._cards: yield card.value def pop(self, *args): """ Works like :meth:`list.pop` if no arguments or an index argument are supplied; otherwise works like :meth:`dict.pop`. """ if len(args) > 2: raise TypeError('Header.pop expected at most 2 arguments, got ' '{}'.format(len(args))) if len(args) == 0: key = -1 else: key = args[0] try: value = self[key] except (KeyError, IndexError): if len(args) == 2: return args[1] raise del self[key] return value def popitem(self): """Similar to :meth:`dict.popitem`.""" try: k, v = next(self.items()) except StopIteration: raise KeyError('Header is empty') del self[k] return k, v def setdefault(self, key, default=None): """Similar to :meth:`dict.setdefault`.""" try: return self[key] except (KeyError, IndexError): self[key] = default return default def update(self, *args, **kwargs): """ Update the Header with new keyword values, updating the values of existing keywords and appending new keywords otherwise; similar to `dict.update`. `update` accepts either a dict-like object or an iterable. In the former case the keys must be header keywords and the values may be either scalar values or (value, comment) tuples. In the case of an iterable the items must be (keyword, value) tuples or (keyword, value, comment) tuples. Arbitrary arguments are also accepted, in which case the update() is called again with the kwargs dict as its only argument. That is, :: >>> header.update(NAXIS1=100, NAXIS2=100) is equivalent to:: header.update({'NAXIS1': 100, 'NAXIS2': 100}) .. warning:: As this method works similarly to `dict.update` it is very different from the ``Header.update()`` method in Astropy v0.1. Use of the old API was **deprecated** for a long time and is now removed. Most uses of the old API can be replaced as follows: * Replace :: header.update(keyword, value) with :: header[keyword] = value * Replace :: header.update(keyword, value, comment=comment) with :: header[keyword] = (value, comment) * Replace :: header.update(keyword, value, before=before_keyword) with :: header.insert(before_keyword, (keyword, value)) * Replace :: header.update(keyword, value, after=after_keyword) with :: header.insert(after_keyword, (keyword, value), after=True) See also :meth:`Header.set` which is a new method that provides an interface similar to the old ``Header.update()`` and may help make transition a little easier. """ if args: other = args[0] else: other = None def update_from_dict(k, v): if not isinstance(v, tuple): card = Card(k, v) elif 0 < len(v) <= 2: card = Card(*((k,) + v)) else: raise ValueError( 'Header update value for key %r is invalid; the ' 'value must be either a scalar, a 1-tuple ' 'containing the scalar value, or a 2-tuple ' 'containing the value and a comment string.' % k) self._update(card) if other is None: pass elif hasattr(other, 'items'): for k, v in other.items(): update_from_dict(k, v) elif hasattr(other, 'keys'): for k in other.keys(): update_from_dict(k, other[k]) else: for idx, card in enumerate(other): if isinstance(card, Card): self._update(card) elif isinstance(card, tuple) and (1 < len(card) <= 3): self._update(Card(*card)) else: raise ValueError( 'Header update sequence item #{} is invalid; ' 'the item must either be a 2-tuple containing ' 'a keyword and value, or a 3-tuple containing ' 'a keyword, value, and comment string.'.format(idx)) if kwargs: self.update(kwargs) def append(self, card=None, useblanks=True, bottom=False, end=False): """ Appends a new keyword+value card to the end of the Header, similar to `list.append`. By default if the last cards in the Header have commentary keywords, this will append the new keyword before the commentary (unless the new keyword is also commentary). Also differs from `list.append` in that it can be called with no arguments: In this case a blank card is appended to the end of the Header. In the case all the keyword arguments are ignored. Parameters ---------- card : str, tuple A keyword or a (keyword, value, [comment]) tuple representing a single header card; the comment is optional in which case a 2-tuple may be used useblanks : bool, optional If there are blank cards at the end of the Header, replace the first blank card so that the total number of cards in the Header does not increase. Otherwise preserve the number of blank cards. bottom : bool, optional If True, instead of appending after the last non-commentary card, append after the last non-blank card. end : bool, optional If True, ignore the useblanks and bottom options, and append at the very end of the Header. """ if isinstance(card, str): card = Card(card) elif isinstance(card, tuple): card = Card(*card) elif card is None: card = Card() elif not isinstance(card, Card): raise ValueError( 'The value appended to a Header must be either a keyword or ' '(keyword, value, [comment]) tuple; got: {!r}'.format(card)) if not end and card.is_blank: # Blank cards should always just be appended to the end end = True if end: self._cards.append(card) idx = len(self._cards) - 1 else: idx = len(self._cards) - 1 while idx >= 0 and self._cards[idx].is_blank: idx -= 1 if not bottom and card.keyword not in Card._commentary_keywords: while (idx >= 0 and self._cards[idx].keyword in Card._commentary_keywords): idx -= 1 idx += 1 self._cards.insert(idx, card) self._updateindices(idx) keyword = Card.normalize_keyword(card.keyword) self._keyword_indices[keyword].append(idx) if card.field_specifier is not None: self._rvkc_indices[card.rawkeyword].append(idx) if not end: # If the appended card was a commentary card, and it was appended # before existing cards with the same keyword, the indices for # cards with that keyword may have changed if not bottom and card.keyword in Card._commentary_keywords: self._keyword_indices[keyword].sort() # Finally, if useblanks, delete a blank cards from the end if useblanks and self._countblanks(): # Don't do this unless there is at least one blanks at the end # of the header; we need to convert the card to its string # image to see how long it is. In the vast majority of cases # this will just be 80 (Card.length) but it may be longer for # CONTINUE cards self._useblanks(len(str(card)) // Card.length) self._modified = True def extend(self, cards, strip=True, unique=False, update=False, update_first=False, useblanks=True, bottom=False, end=False): """ Appends multiple keyword+value cards to the end of the header, similar to `list.extend`. Parameters ---------- cards : iterable An iterable of (keyword, value, [comment]) tuples; see `Header.append`. strip : bool, optional Remove any keywords that have meaning only to specific types of HDUs, so that only more general keywords are added from extension Header or Card list (default: `True`). unique : bool, optional If `True`, ensures that no duplicate keywords are appended; keywords already in this header are simply discarded. The exception is commentary keywords (COMMENT, HISTORY, etc.): they are only treated as duplicates if their values match. update : bool, optional If `True`, update the current header with the values and comments from duplicate keywords in the input header. This supersedes the ``unique`` argument. Commentary keywords are treated the same as if ``unique=True``. update_first : bool, optional If the first keyword in the header is 'SIMPLE', and the first keyword in the input header is 'XTENSION', the 'SIMPLE' keyword is replaced by the 'XTENSION' keyword. Likewise if the first keyword in the header is 'XTENSION' and the first keyword in the input header is 'SIMPLE', the 'XTENSION' keyword is replaced by the 'SIMPLE' keyword. This behavior is otherwise dumb as to whether or not the resulting header is a valid primary or extension header. This is mostly provided to support backwards compatibility with the old ``Header.fromTxtFile`` method, and only applies if ``update=True``. useblanks, bottom, end : bool, optional These arguments are passed to :meth:`Header.append` while appending new cards to the header. """ temp = Header(cards) if strip: temp._strip() if len(self): first = self._cards[0].keyword else: first = None # We don't immediately modify the header, because first we need to sift # out any duplicates in the new header prior to adding them to the # existing header, but while *allowing* duplicates from the header # being extended from (see ticket #156) extend_cards = [] for idx, card in enumerate(temp.cards): keyword = card.keyword if keyword not in Card._commentary_keywords: if unique and not update and keyword in self: continue elif update: if idx == 0 and update_first: # Dumbly update the first keyword to either SIMPLE or # XTENSION as the case may be, as was in the case in # Header.fromTxtFile if ((keyword == 'SIMPLE' and first == 'XTENSION') or (keyword == 'XTENSION' and first == 'SIMPLE')): del self[0] self.insert(0, card) else: self[keyword] = (card.value, card.comment) elif keyword in self: self[keyword] = (card.value, card.comment) else: extend_cards.append(card) else: extend_cards.append(card) else: if (unique or update) and keyword in self: if card.is_blank: extend_cards.append(card) continue for value in self[keyword]: if value == card.value: break else: extend_cards.append(card) else: extend_cards.append(card) for card in extend_cards: self.append(card, useblanks=useblanks, bottom=bottom, end=end) def count(self, keyword): """ Returns the count of the given keyword in the header, similar to `list.count` if the Header object is treated as a list of keywords. Parameters ---------- keyword : str The keyword to count instances of in the header """ keyword = Card.normalize_keyword(keyword) # We have to look before we leap, since otherwise _keyword_indices, # being a defaultdict, will create an entry for the nonexistent keyword if keyword not in self._keyword_indices: raise KeyError("Keyword {!r} not found.".format(keyword)) return len(self._keyword_indices[keyword]) def index(self, keyword, start=None, stop=None): """ Returns the index if the first instance of the given keyword in the header, similar to `list.index` if the Header object is treated as a list of keywords. Parameters ---------- keyword : str The keyword to look up in the list of all keywords in the header start : int, optional The lower bound for the index stop : int, optional The upper bound for the index """ if start is None: start = 0 if stop is None: stop = len(self._cards) if stop < start: step = -1 else: step = 1 norm_keyword = Card.normalize_keyword(keyword) for idx in range(start, stop, step): if self._cards[idx].keyword.upper() == norm_keyword: return idx else: raise ValueError('The keyword {!r} is not in the ' ' header.'.format(keyword)) def insert(self, key, card, useblanks=True, after=False): """ Inserts a new keyword+value card into the Header at a given location, similar to `list.insert`. Parameters ---------- key : int, str, or tuple The index into the list of header keywords before which the new keyword should be inserted, or the name of a keyword before which the new keyword should be inserted. Can also accept a (keyword, index) tuple for inserting around duplicate keywords. card : str, tuple A keyword or a (keyword, value, [comment]) tuple; see `Header.append` useblanks : bool, optional If there are blank cards at the end of the Header, replace the first blank card so that the total number of cards in the Header does not increase. Otherwise preserve the number of blank cards. after : bool, optional If set to `True`, insert *after* the specified index or keyword, rather than before it. Defaults to `False`. """ if not isinstance(key, int): # Don't pass through ints to _cardindex because it will not take # kindly to indices outside the existing number of cards in the # header, which insert needs to be able to support (for example # when inserting into empty headers) idx = self._cardindex(key) else: idx = key if after: if idx == -1: idx = len(self._cards) else: idx += 1 if idx >= len(self._cards): # This is just an append (Though it must be an append absolutely to # the bottom, ignoring blanks, etc.--the point of the insert method # is that you get exactly what you asked for with no surprises) self.append(card, end=True) return if isinstance(card, str): card = Card(card) elif isinstance(card, tuple): card = Card(*card) elif not isinstance(card, Card): raise ValueError( 'The value inserted into a Header must be either a keyword or ' '(keyword, value, [comment]) tuple; got: {!r}'.format(card)) self._cards.insert(idx, card) keyword = card.keyword # If idx was < 0, determine the actual index according to the rules # used by list.insert() if idx < 0: idx += len(self._cards) - 1 if idx < 0: idx = 0 # All the keyword indices above the insertion point must be updated self._updateindices(idx) keyword = Card.normalize_keyword(keyword) self._keyword_indices[keyword].append(idx) count = len(self._keyword_indices[keyword]) if count > 1: # There were already keywords with this same name if keyword not in Card._commentary_keywords: warnings.warn( 'A {!r} keyword already exists in this header. Inserting ' 'duplicate keyword.'.format(keyword), AstropyUserWarning) self._keyword_indices[keyword].sort() if card.field_specifier is not None: # Update the index of RVKC as well rvkc_indices = self._rvkc_indices[card.rawkeyword] rvkc_indices.append(idx) rvkc_indices.sort() if useblanks: self._useblanks(len(str(card)) // Card.length) self._modified = True def remove(self, keyword, ignore_missing=False, remove_all=False): """ Removes the first instance of the given keyword from the header similar to `list.remove` if the Header object is treated as a list of keywords. Parameters ---------- keyword : str The keyword of which to remove the first instance in the header. ignore_missing : bool, optional When True, ignores missing keywords. Otherwise, if the keyword is not present in the header a KeyError is raised. remove_all : bool, optional When True, all instances of keyword will be removed. Otherwise only the first instance of the given keyword is removed. """ keyword = Card.normalize_keyword(keyword) if keyword in self._keyword_indices: del self[self._keyword_indices[keyword][0]] if remove_all: while keyword in self._keyword_indices: del self[self._keyword_indices[keyword][0]] elif not ignore_missing: raise KeyError("Keyword '{}' not found.".format(keyword)) def rename_keyword(self, oldkeyword, newkeyword, force=False): """ Rename a card's keyword in the header. Parameters ---------- oldkeyword : str or int Old keyword or card index newkeyword : str New keyword force : bool, optional When `True`, if the new keyword already exists in the header, force the creation of a duplicate keyword. Otherwise a `ValueError` is raised. """ oldkeyword = Card.normalize_keyword(oldkeyword) newkeyword = Card.normalize_keyword(newkeyword) if newkeyword == 'CONTINUE': raise ValueError('Can not rename to CONTINUE') if (newkeyword in Card._commentary_keywords or oldkeyword in Card._commentary_keywords): if not (newkeyword in Card._commentary_keywords and oldkeyword in Card._commentary_keywords): raise ValueError('Regular and commentary keys can not be ' 'renamed to each other.') elif not force and newkeyword in self: raise ValueError('Intended keyword {} already exists in header.' .format(newkeyword)) idx = self.index(oldkeyword) card = self._cards[idx] del self[idx] self.insert(idx, (newkeyword, card.value, card.comment)) def add_history(self, value, before=None, after=None): """ Add a ``HISTORY`` card. Parameters ---------- value : str History text to be added. before : str or int, optional Same as in `Header.update` after : str or int, optional Same as in `Header.update` """ self._add_commentary('HISTORY', value, before=before, after=after) def add_comment(self, value, before=None, after=None): """ Add a ``COMMENT`` card. Parameters ---------- value : str Text to be added. before : str or int, optional Same as in `Header.update` after : str or int, optional Same as in `Header.update` """ self._add_commentary('COMMENT', value, before=before, after=after) def add_blank(self, value='', before=None, after=None): """ Add a blank card. Parameters ---------- value : str, optional Text to be added. before : str or int, optional Same as in `Header.update` after : str or int, optional Same as in `Header.update` """ self._add_commentary('', value, before=before, after=after) def _update(self, card): """ The real update code. If keyword already exists, its value and/or comment will be updated. Otherwise a new card will be appended. This will not create a duplicate keyword except in the case of commentary cards. The only other way to force creation of a duplicate is to use the insert(), append(), or extend() methods. """ keyword, value, comment = card # Lookups for existing/known keywords are case-insensitive keyword = keyword.upper() if keyword.startswith('HIERARCH '): keyword = keyword[9:] if (keyword not in Card._commentary_keywords and keyword in self._keyword_indices): # Easy; just update the value/comment idx = self._keyword_indices[keyword][0] existing_card = self._cards[idx] existing_card.value = value if comment is not None: # '' should be used to explicitly blank a comment existing_card.comment = comment if existing_card._modified: self._modified = True elif keyword in Card._commentary_keywords: cards = self._splitcommentary(keyword, value) if keyword in self._keyword_indices: # Append after the last keyword of the same type idx = self.index(keyword, start=len(self) - 1, stop=-1) isblank = not (keyword or value or comment) for c in reversed(cards): self.insert(idx + 1, c, useblanks=(not isblank)) else: for c in cards: self.append(c, bottom=True) else: # A new keyword! self.append() will handle updating _modified self.append(card) def _cardindex(self, key): """Returns an index into the ._cards list given a valid lookup key.""" # This used to just set key = (key, 0) and then go on to act as if the # user passed in a tuple, but it's much more common to just be given a # string as the key, so optimize more for that case if isinstance(key, str): keyword = key n = 0 elif isinstance(key, int): # If < 0, determine the actual index if key < 0: key += len(self._cards) if key < 0 or key >= len(self._cards): raise IndexError('Header index out of range.') return key elif isinstance(key, slice): return key elif isinstance(key, tuple): if (len(key) != 2 or not isinstance(key[0], str) or not isinstance(key[1], int)): raise ValueError( 'Tuple indices must be 2-tuples consisting of a ' 'keyword string and an integer index.') keyword, n = key else: raise ValueError( 'Header indices must be either a string, a 2-tuple, or ' 'an integer.') keyword = Card.normalize_keyword(keyword) # Returns the index into _cards for the n-th card with the given # keyword (where n is 0-based) indices = self._keyword_indices.get(keyword, None) if keyword and not indices: if len(keyword) > KEYWORD_LENGTH or '.' in keyword: raise KeyError("Keyword {!r} not found.".format(keyword)) else: # Maybe it's a RVKC? indices = self._rvkc_indices.get(keyword, None) if not indices: raise KeyError("Keyword {!r} not found.".format(keyword)) try: return indices[n] except IndexError: raise IndexError('There are only {} {!r} cards in the ' 'header.'.format(len(indices), keyword)) def _keyword_from_index(self, idx): """ Given an integer index, return the (keyword, repeat) tuple that index refers to. For most keywords the repeat will always be zero, but it may be greater than zero for keywords that are duplicated (especially commentary keywords). In a sense this is the inverse of self.index, except that it also supports duplicates. """ if idx < 0: idx += len(self._cards) keyword = self._cards[idx].keyword keyword = Card.normalize_keyword(keyword) repeat = self._keyword_indices[keyword].index(idx) return keyword, repeat def _relativeinsert(self, card, before=None, after=None, replace=False): """ Inserts a new card before or after an existing card; used to implement support for the legacy before/after keyword arguments to Header.update(). If replace=True, move an existing card with the same keyword. """ if before is None: insertionkey = after else: insertionkey = before def get_insertion_idx(): if not (isinstance(insertionkey, int) and insertionkey >= len(self._cards)): idx = self._cardindex(insertionkey) else: idx = insertionkey if before is None: idx += 1 return idx if replace: # The card presumably already exists somewhere in the header. # Check whether or not we actually have to move it; if it does need # to be moved we just delete it and then it will be reinserted # below old_idx = self._cardindex(card.keyword) insertion_idx = get_insertion_idx() if (insertion_idx >= len(self._cards) and old_idx == len(self._cards) - 1): # The card would be appended to the end, but it's already at # the end return if before is not None: if old_idx == insertion_idx - 1: return elif after is not None and old_idx == insertion_idx: return del self[old_idx] # Even if replace=True, the insertion idx may have changed since the # old card was deleted idx = get_insertion_idx() if card[0] in Card._commentary_keywords: cards = reversed(self._splitcommentary(card[0], card[1])) else: cards = [card] for c in cards: self.insert(idx, c) def _updateindices(self, idx, increment=True): """ For all cards with index above idx, increment or decrement its index value in the keyword_indices dict. """ if idx > len(self._cards): # Save us some effort return increment = 1 if increment else -1 for index_sets in (self._keyword_indices, self._rvkc_indices): for indices in index_sets.values(): for jdx, keyword_index in enumerate(indices): if keyword_index >= idx: indices[jdx] += increment def _countblanks(self): """Returns the number of blank cards at the end of the Header.""" for idx in range(1, len(self._cards)): if not self._cards[-idx].is_blank: return idx - 1 return 0 def _useblanks(self, count): for _ in range(count): if self._cards[-1].is_blank: del self[-1] else: break def _haswildcard(self, keyword): """Return `True` if the input keyword contains a wildcard pattern.""" return (isinstance(keyword, str) and (keyword.endswith('...') or '*' in keyword or '?' in keyword)) def _wildcardmatch(self, pattern): """ Returns a list of indices of the cards matching the given wildcard pattern. * '*' matches 0 or more characters * '?' matches a single character * '...' matches 0 or more of any non-whitespace character """ pattern = pattern.replace('*', r'.*').replace('?', r'.') pattern = pattern.replace('...', r'\S*') + '$' pattern_re = re.compile(pattern, re.I) return [idx for idx, card in enumerate(self._cards) if pattern_re.match(card.keyword)] def _set_slice(self, key, value, target): """ Used to implement Header.__setitem__ and CardAccessor.__setitem__. """ if isinstance(key, slice) or self._haswildcard(key): if isinstance(key, slice): indices = range(*key.indices(len(target))) else: indices = self._wildcardmatch(key) if isinstance(value, str) or not isiterable(value): value = itertools.repeat(value, len(indices)) for idx, val in zip(indices, value): target[idx] = val return True return False def _splitcommentary(self, keyword, value): """ Given a commentary keyword and value, returns a list of the one or more cards needed to represent the full value. This is primarily used to create the multiple commentary cards needed to represent a long value that won't fit into a single commentary card. """ # The maximum value in each card can be the maximum card length minus # the maximum key length (which can include spaces if they key length # less than 8 maxlen = Card.length - KEYWORD_LENGTH valuestr = str(value) if len(valuestr) <= maxlen: # The value can fit in a single card cards = [Card(keyword, value)] else: # The value must be split across multiple consecutive commentary # cards idx = 0 cards = [] while idx < len(valuestr): cards.append(Card(keyword, valuestr[idx:idx + maxlen])) idx += maxlen return cards def _strip(self): """ Strip cards specific to a certain kind of header. Strip cards like ``SIMPLE``, ``BITPIX``, etc. so the rest of the header can be used to reconstruct another kind of header. """ # TODO: Previously this only deleted some cards specific to an HDU if # _hdutype matched that type. But it seemed simple enough to just # delete all desired cards anyways, and just ignore the KeyErrors if # they don't exist. # However, it might be desirable to make this extendable somehow--have # a way for HDU classes to specify some headers that are specific only # to that type, and should be removed otherwise. if 'NAXIS' in self: naxis = self['NAXIS'] else: naxis = 0 if 'TFIELDS' in self: tfields = self['TFIELDS'] else: tfields = 0 for idx in range(naxis): try: del self['NAXIS' + str(idx + 1)] except KeyError: pass for name in ('TFORM', 'TSCAL', 'TZERO', 'TNULL', 'TTYPE', 'TUNIT', 'TDISP', 'TDIM', 'THEAP', 'TBCOL'): for idx in range(tfields): try: del self[name + str(idx + 1)] except KeyError: pass for name in ('SIMPLE', 'XTENSION', 'BITPIX', 'NAXIS', 'EXTEND', 'PCOUNT', 'GCOUNT', 'GROUPS', 'BSCALE', 'BZERO', 'TFIELDS'): try: del self[name] except KeyError: pass def _add_commentary(self, key, value, before=None, after=None): """ Add a commentary card. If ``before`` and ``after`` are `None`, add to the last occurrence of cards of the same name (except blank card). If there is no card (or blank card), append at the end. """ if before is not None or after is not None: self._relativeinsert((key, value), before=before, after=after) else: self[key] = value collections.abc.MutableSequence.register(Header) collections.abc.MutableMapping.register(Header) class _DelayedHeader: """ Descriptor used to create the Header object from the header string that was stored in HDU._header_str when parsing the file. """ def __get__(self, obj, owner=None): try: return obj.__dict__['_header'] except KeyError: if obj._header_str is not None: hdr = Header.fromstring(obj._header_str) obj._header_str = None else: raise AttributeError("'{}' object has no attribute '_header'" .format(obj.__class__.__name__)) obj.__dict__['_header'] = hdr return hdr def __set__(self, obj, val): obj.__dict__['_header'] = val def __delete__(self, obj): del obj.__dict__['_header'] class _BasicHeaderCards: """ This class allows to access cards with the _BasicHeader.cards attribute. This is needed because during the HDU class detection, some HDUs uses the .cards interface. Cards cannot be modified here as the _BasicHeader object will be deleted once the HDU object is created. """ def __init__(self, header): self.header = header def __getitem__(self, key): # .cards is a list of cards, so key here is an integer. # get the keyword name from its index. key = self.header._keys[key] # then we get the card from the _BasicHeader._cards list, or parse it # if needed. try: return self.header._cards[key] except KeyError: cardstr = self.header._raw_cards[key] card = Card.fromstring(cardstr) self.header._cards[key] = card return card class _BasicHeader(collections.abc.Mapping): """This class provides a fast header parsing, without all the additional features of the Header class. Here only standard keywords are parsed, no support for CONTINUE, HIERARCH, COMMENT, HISTORY, or rvkc. The raw card images are stored and parsed only if needed. The idea is that to create the HDU objects, only a small subset of standard cards is needed. Once a card is parsed, which is deferred to the Card class, the Card object is kept in a cache. This is useful because a small subset of cards is used a lot in the HDU creation process (NAXIS, XTENSION, ...). """ def __init__(self, cards): # dict of (keywords, card images) self._raw_cards = cards self._keys = list(cards.keys()) # dict of (keyword, Card object) storing the parsed cards self._cards = {} # the _BasicHeaderCards object allows to access Card objects from # keyword indices self.cards = _BasicHeaderCards(self) self._modified = False def __getitem__(self, key): if isinstance(key, int): key = self._keys[key] try: return self._cards[key].value except KeyError: # parse the Card and store it cardstr = self._raw_cards[key] self._cards[key] = card = Card.fromstring(cardstr) return card.value def __len__(self): return len(self._raw_cards) def __iter__(self): return iter(self._raw_cards) def index(self, keyword): return self._keys.index(keyword) @classmethod def fromfile(cls, fileobj): """The main method to parse a FITS header from a file. The parsing is done with the parse_header function implemented in Cython.""" close_file = False if isinstance(fileobj, str): fileobj = open(fileobj, 'rb') close_file = True try: header_str, cards = parse_header(fileobj) _check_padding(header_str, BLOCK_SIZE, False) return header_str, cls(cards) finally: if close_file: fileobj.close() class _CardAccessor: """ This is a generic class for wrapping a Header in such a way that you can use the header's slice/filtering capabilities to return a subset of cards and do something with them. This is sort of the opposite notion of the old CardList class--whereas Header used to use CardList to get lists of cards, this uses Header to get lists of cards. """ # TODO: Consider giving this dict/list methods like Header itself def __init__(self, header): self._header = header def __repr__(self): return '\n'.join(repr(c) for c in self._header._cards) def __len__(self): return len(self._header._cards) def __iter__(self): return iter(self._header._cards) def __eq__(self, other): # If the `other` item is a scalar we will still treat it as equal if # this _CardAccessor only contains one item if not isiterable(other) or isinstance(other, str): if len(self) == 1: other = [other] else: return False for a, b in itertools.zip_longest(self, other): if a != b: return False else: return True def __ne__(self, other): return not (self == other) def __getitem__(self, item): if isinstance(item, slice) or self._header._haswildcard(item): return self.__class__(self._header[item]) idx = self._header._cardindex(item) return self._header._cards[idx] def _setslice(self, item, value): """ Helper for implementing __setitem__ on _CardAccessor subclasses; slices should always be handled in this same way. """ if isinstance(item, slice) or self._header._haswildcard(item): if isinstance(item, slice): indices = range(*item.indices(len(self))) else: indices = self._header._wildcardmatch(item) if isinstance(value, str) or not isiterable(value): value = itertools.repeat(value, len(indices)) for idx, val in zip(indices, value): self[idx] = val return True return False collections.abc.Mapping.register(_CardAccessor) collections.abc.Sequence.register(_CardAccessor) class _HeaderComments(_CardAccessor): """ A class used internally by the Header class for the Header.comments attribute access. This object can be used to display all the keyword comments in the Header, or look up the comments on specific keywords. It allows all the same forms of keyword lookup as the Header class itself, but returns comments instead of values. """ def __iter__(self): for card in self._header._cards: yield card.comment def __repr__(self): """Returns a simple list of all keywords and their comments.""" keyword_length = KEYWORD_LENGTH for card in self._header._cards: keyword_length = max(keyword_length, len(card.keyword)) return '\n'.join('{:>{len}} {}'.format(c.keyword, c.comment, len=keyword_length) for c in self._header._cards) def __getitem__(self, item): """ Slices and filter strings return a new _HeaderComments containing the returned cards. Otherwise the comment of a single card is returned. """ item = super().__getitem__(item) if isinstance(item, _HeaderComments): # The item key was a slice return item return item.comment def __setitem__(self, item, comment): """ Set/update the comment on specified card or cards. Slice/filter updates work similarly to how Header.__setitem__ works. """ if self._header._set_slice(item, comment, self): return # In this case, key/index errors should be raised; don't update # comments of nonexistent cards idx = self._header._cardindex(item) value = self._header[idx] self._header[idx] = (value, comment) class _HeaderCommentaryCards(_CardAccessor): """ This is used to return a list-like sequence over all the values in the header for a given commentary keyword, such as HISTORY. """ def __init__(self, header, keyword=''): super().__init__(header) self._keyword = keyword self._count = self._header.count(self._keyword) self._indices = slice(self._count).indices(self._count) # __len__ and __iter__ need to be overridden from the base class due to the # different approach this class has to take for slicing def __len__(self): return len(range(*self._indices)) def __iter__(self): for idx in range(*self._indices): yield self._header[(self._keyword, idx)] def __repr__(self): return '\n'.join(self) def __getitem__(self, idx): if isinstance(idx, slice): n = self.__class__(self._header, self._keyword) n._indices = idx.indices(self._count) return n elif not isinstance(idx, int): raise ValueError('{} index must be an integer'.format(self._keyword)) idx = list(range(*self._indices))[idx] return self._header[(self._keyword, idx)] def __setitem__(self, item, value): """ Set the value of a specified commentary card or cards. Slice/filter updates work similarly to how Header.__setitem__ works. """ if self._header._set_slice(item, value, self): return # In this case, key/index errors should be raised; don't update # comments of nonexistent cards self._header[(self._keyword, item)] = value def _block_size(sep): """ Determine the size of a FITS header block if a non-blank separator is used between cards. """ return BLOCK_SIZE + (len(sep) * (BLOCK_SIZE // Card.length - 1)) def _pad_length(stringlen): """Bytes needed to pad the input stringlen to the next FITS block.""" return (BLOCK_SIZE - (stringlen % BLOCK_SIZE)) % BLOCK_SIZE def _check_padding(header_str, block_size, is_eof, check_block_size=True): # Strip any zero-padding (see ticket #106) if header_str and header_str[-1] == '\0': if is_eof and header_str.strip('\0') == '': # TODO: Pass this warning to validation framework warnings.warn( 'Unexpected extra padding at the end of the file. This ' 'padding may not be preserved when saving changes.', AstropyUserWarning) raise EOFError() else: # Replace the illegal null bytes with spaces as required by # the FITS standard, and issue a nasty warning # TODO: Pass this warning to validation framework warnings.warn( 'Header block contains null bytes instead of spaces for ' 'padding, and is not FITS-compliant. Nulls may be ' 'replaced with spaces upon writing.', AstropyUserWarning) header_str.replace('\0', ' ') if check_block_size and (len(header_str) % block_size) != 0: # This error message ignores the length of the separator for # now, but maybe it shouldn't? actual_len = len(header_str) - block_size + BLOCK_SIZE # TODO: Pass this error to validation framework raise ValueError('Header size is not multiple of {0}: {1}' .format(BLOCK_SIZE, actual_len))
f310e6a4f73ee5cdfa2037c9a0253f43f1b23396d1be77ee03c018ad784bd17c
# Licensed under a 3-clause BSD style license - see PYFITS.rst import copy import operator import warnings import weakref from contextlib import suppress from functools import reduce import numpy as np from numpy import char as chararray from .column import (ASCIITNULL, FITS2NUMPY, ASCII2NUMPY, ASCII2STR, ColDefs, _AsciiColDefs, _FormatX, _FormatP, _VLF, _get_index, _wrapx, _unwrapx, _makep, Delayed) from .util import decode_ascii, encode_ascii, _rstrip_inplace from astropy.utils import lazyproperty class FITS_record: """ FITS record class. `FITS_record` is used to access records of the `FITS_rec` object. This will allow us to deal with scaled columns. It also handles conversion/scaling of columns in ASCII tables. The `FITS_record` class expects a `FITS_rec` object as input. """ def __init__(self, input, row=0, start=None, end=None, step=None, base=None, **kwargs): """ Parameters ---------- input : array The array to wrap. row : int, optional The starting logical row of the array. start : int, optional The starting column in the row associated with this object. Used for subsetting the columns of the `FITS_rec` object. end : int, optional The ending column in the row associated with this object. Used for subsetting the columns of the `FITS_rec` object. """ self.array = input self.row = row if base: width = len(base) else: width = self.array._nfields s = slice(start, end, step).indices(width) self.start, self.end, self.step = s self.base = base def __getitem__(self, key): if isinstance(key, str): indx = _get_index(self.array.names, key) if indx < self.start or indx > self.end - 1: raise KeyError("Key '{}' does not exist.".format(key)) elif isinstance(key, slice): return type(self)(self.array, self.row, key.start, key.stop, key.step, self) else: indx = self._get_index(key) if indx > self.array._nfields - 1: raise IndexError('Index out of bounds') return self.array.field(indx)[self.row] def __setitem__(self, key, value): if isinstance(key, str): indx = _get_index(self.array.names, key) if indx < self.start or indx > self.end - 1: raise KeyError("Key '{}' does not exist.".format(key)) elif isinstance(key, slice): for indx in range(slice.start, slice.stop, slice.step): indx = self._get_indx(indx) self.array.field(indx)[self.row] = value else: indx = self._get_index(key) if indx > self.array._nfields - 1: raise IndexError('Index out of bounds') self.array.field(indx)[self.row] = value def __len__(self): return len(range(self.start, self.end, self.step)) def __repr__(self): """ Display a single row. """ outlist = [] for idx in range(len(self)): outlist.append(repr(self[idx])) return '({})'.format(', '.join(outlist)) def field(self, field): """ Get the field data of the record. """ return self.__getitem__(field) def setfield(self, field, value): """ Set the field data of the record. """ self.__setitem__(field, value) @lazyproperty def _bases(self): bases = [weakref.proxy(self)] base = self.base while base: bases.append(base) base = base.base return bases def _get_index(self, indx): indices = np.ogrid[:self.array._nfields] for base in reversed(self._bases): if base.step < 1: s = slice(base.start, None, base.step) else: s = slice(base.start, base.end, base.step) indices = indices[s] return indices[indx] class FITS_rec(np.recarray): """ FITS record array class. `FITS_rec` is the data part of a table HDU's data part. This is a layer over the `~numpy.recarray`, so we can deal with scaled columns. It inherits all of the standard methods from `numpy.ndarray`. """ _record_type = FITS_record _character_as_bytes = False def __new__(subtype, input): """ Construct a FITS record array from a recarray. """ # input should be a record array if input.dtype.subdtype is None: self = np.recarray.__new__(subtype, input.shape, input.dtype, buf=input.data) else: self = np.recarray.__new__(subtype, input.shape, input.dtype, buf=input.data, strides=input.strides) self._init() if self.dtype.fields: self._nfields = len(self.dtype.fields) return self def __setstate__(self, state): meta = state[-1] column_state = state[-2] state = state[:-2] super().__setstate__(state) self._col_weakrefs = weakref.WeakSet() for attr, value in zip(meta, column_state): setattr(self, attr, value) def __reduce__(self): """ Return a 3-tuple for pickling a FITS_rec. Use the super-class functionality but then add in a tuple of FITS_rec-specific values that get used in __setstate__. """ reconst_func, reconst_func_args, state = super().__reduce__() # Define FITS_rec-specific attrs that get added to state column_state = [] meta = [] for attrs in ['_converted', '_heapoffset', '_heapsize', '_nfields', '_gap', '_uint', 'parnames', '_coldefs']: with suppress(AttributeError): # _coldefs can be Delayed, and file objects cannot be # picked, it needs to be deepcopied first if attrs == '_coldefs': column_state.append(self._coldefs.__deepcopy__(None)) else: column_state.append(getattr(self, attrs)) meta.append(attrs) state = state + (column_state, meta) return reconst_func, reconst_func_args, state def __array_finalize__(self, obj): if obj is None: return if isinstance(obj, FITS_rec): self._character_as_bytes = obj._character_as_bytes if isinstance(obj, FITS_rec) and obj.dtype == self.dtype: self._converted = obj._converted self._heapoffset = obj._heapoffset self._heapsize = obj._heapsize self._col_weakrefs = obj._col_weakrefs self._coldefs = obj._coldefs self._nfields = obj._nfields self._gap = obj._gap self._uint = obj._uint elif self.dtype.fields is not None: # This will allow regular ndarrays with fields, rather than # just other FITS_rec objects self._nfields = len(self.dtype.fields) self._converted = {} self._heapoffset = getattr(obj, '_heapoffset', 0) self._heapsize = getattr(obj, '_heapsize', 0) self._gap = getattr(obj, '_gap', 0) self._uint = getattr(obj, '_uint', False) self._col_weakrefs = weakref.WeakSet() self._coldefs = ColDefs(self) # Work around chicken-egg problem. Column.array relies on the # _coldefs attribute to set up ref back to parent FITS_rec; however # in the above line the self._coldefs has not been assigned yet so # this fails. This patches that up... for col in self._coldefs: del col.array col._parent_fits_rec = weakref.ref(self) else: self._init() def _init(self): """Initializes internal attributes specific to FITS-isms.""" self._nfields = 0 self._converted = {} self._heapoffset = 0 self._heapsize = 0 self._col_weakrefs = weakref.WeakSet() self._coldefs = None self._gap = 0 self._uint = False @classmethod def from_columns(cls, columns, nrows=0, fill=False, character_as_bytes=False): """ Given a `ColDefs` object of unknown origin, initialize a new `FITS_rec` object. .. note:: This was originally part of the ``new_table`` function in the table module but was moved into a class method since most of its functionality always had more to do with initializing a `FITS_rec` object than anything else, and much of it also overlapped with ``FITS_rec._scale_back``. Parameters ---------- columns : sequence of `Column` or a `ColDefs` The columns from which to create the table data. If these columns have data arrays attached that data may be used in initializing the new table. Otherwise the input columns will be used as a template for a new table with the requested number of rows. nrows : int Number of rows in the new table. If the input columns have data associated with them, the size of the largest input column is used. Otherwise the default is 0. fill : bool If `True`, will fill all cells with zeros or blanks. If `False`, copy the data from input, undefined cells will still be filled with zeros/blanks. """ if not isinstance(columns, ColDefs): columns = ColDefs(columns) # read the delayed data for column in columns: arr = column.array if isinstance(arr, Delayed): if arr.hdu.data is None: column.array = None else: column.array = _get_recarray_field(arr.hdu.data, arr.field) # Reset columns._arrays (which we may want to just do away with # altogether del columns._arrays # use the largest column shape as the shape of the record if nrows == 0: for arr in columns._arrays: if arr is not None: dim = arr.shape[0] else: dim = 0 if dim > nrows: nrows = dim raw_data = np.empty(columns.dtype.itemsize * nrows, dtype=np.uint8) raw_data.fill(ord(columns._padding_byte)) data = np.recarray(nrows, dtype=columns.dtype, buf=raw_data).view(cls) data._character_as_bytes = character_as_bytes # Make sure the data is a listener for changes to the columns columns._add_listener(data) # Previously this assignment was made from hdu.columns, but that's a # bug since if a _TableBaseHDU has a FITS_rec in its .data attribute # the _TableBaseHDU.columns property is actually returned from # .data._coldefs, so this assignment was circular! Don't make that # mistake again. # All of this is an artifact of the fragility of the FITS_rec class, # and that it can't just be initialized by columns... data._coldefs = columns # If fill is True we don't copy anything from the column arrays. We're # just using them as a template, and returning a table filled with # zeros/blanks if fill: return data # Otherwise we have to fill the recarray with data from the input # columns for idx, column in enumerate(columns): # For each column in the ColDef object, determine the number of # rows in that column. This will be either the number of rows in # the ndarray associated with the column, or the number of rows # given in the call to this function, which ever is smaller. If # the input FILL argument is true, the number of rows is set to # zero so that no data is copied from the original input data. arr = column.array if arr is None: array_size = 0 else: array_size = len(arr) n = min(array_size, nrows) # TODO: At least *some* of this logic is mostly redundant with the # _convert_foo methods in this class; see if we can eliminate some # of that duplication. if not n: # The input column had an empty array, so just use the fill # value continue field = _get_recarray_field(data, idx) name = column.name fitsformat = column.format recformat = fitsformat.recformat outarr = field[:n] inarr = arr[:n] if isinstance(recformat, _FormatX): # Data is a bit array if inarr.shape[-1] == recformat.repeat: _wrapx(inarr, outarr, recformat.repeat) continue elif isinstance(recformat, _FormatP): data._cache_field(name, _makep(inarr, field, recformat, nrows=nrows)) continue # TODO: Find a better way of determining that the column is meant # to be FITS L formatted elif recformat[-2:] == FITS2NUMPY['L'] and inarr.dtype == bool: # column is boolean # The raw data field should be filled with either 'T' or 'F' # (not 0). Use 'F' as a default field[:] = ord('F') # Also save the original boolean array in data._converted so # that it doesn't have to be re-converted converted = np.zeros(field.shape, dtype=bool) converted[:n] = inarr data._cache_field(name, converted) # TODO: Maybe this step isn't necessary at all if _scale_back # will handle it? inarr = np.where(inarr == np.False_, ord('F'), ord('T')) elif (columns[idx]._physical_values and columns[idx]._pseudo_unsigned_ints): # Temporary hack... bzero = column.bzero converted = np.zeros(field.shape, dtype=inarr.dtype) converted[:n] = inarr data._cache_field(name, converted) if n < nrows: # Pre-scale rows below the input data field[n:] = -bzero inarr = inarr - bzero elif isinstance(columns, _AsciiColDefs): # Regardless whether the format is character or numeric, if the # input array contains characters then it's already in the raw # format for ASCII tables if fitsformat._pseudo_logical: # Hack to support converting from 8-bit T/F characters # Normally the column array is a chararray of 1 character # strings, but we need to view it as a normal ndarray of # 8-bit ints to fill it with ASCII codes for 'T' and 'F' outarr = field.view(np.uint8, np.ndarray)[:n] elif arr.dtype.kind not in ('S', 'U'): # Set up views of numeric columns with the appropriate # numeric dtype # Fill with the appropriate blanks for the column format data._cache_field(name, np.zeros(nrows, dtype=arr.dtype)) outarr = data._converted[name][:n] outarr[:] = inarr continue if inarr.shape != outarr.shape: if (inarr.dtype.kind == outarr.dtype.kind and inarr.dtype.kind in ('U', 'S') and inarr.dtype != outarr.dtype): inarr_rowsize = inarr[0].size inarr = inarr.flatten().view(outarr.dtype) # This is a special case to handle input arrays with # non-trivial TDIMn. # By design each row of the outarray is 1-D, while each row of # the input array may be n-D if outarr.ndim > 1: # The normal case where the first dimension is the rows inarr_rowsize = inarr[0].size inarr = inarr.reshape(n, inarr_rowsize) outarr[:, :inarr_rowsize] = inarr else: # Special case for strings where the out array only has one # dimension (the second dimension is rolled up into the # strings outarr[:n] = inarr.ravel() else: outarr[:] = inarr # Now replace the original column array references with the new # fields # This is required to prevent the issue reported in # https://github.com/spacetelescope/PyFITS/issues/99 for idx in range(len(columns)): columns._arrays[idx] = data.field(idx) return data def __repr__(self): # Force use of the normal ndarray repr (rather than the new # one added for recarray in Numpy 1.10) for backwards compat return np.ndarray.__repr__(self) def __getitem__(self, key): if self._coldefs is None: return super().__getitem__(key) if isinstance(key, str): return self.field(key) # Have to view as a recarray then back as a FITS_rec, otherwise the # circular reference fix/hack in FITS_rec.field() won't preserve # the slice. out = self.view(np.recarray)[key] if type(out) is not np.recarray: # Oops, we got a single element rather than a view. In that case, # return a Record, which has no __getstate__ and is more efficient. return self._record_type(self, key) # We got a view; change it back to our class, and add stuff out = out.view(type(self)) out._coldefs = ColDefs(self._coldefs) arrays = [] out._converted = {} for idx, name in enumerate(self._coldefs.names): # # Store the new arrays for the _coldefs object # arrays.append(self._coldefs._arrays[idx][key]) # Ensure that the sliced FITS_rec will view the same scaled # columns as the original; this is one of the few cases where # it is not necessary to use _cache_field() if name in self._converted: dummy = self._converted[name] field = np.ndarray.__getitem__(dummy, key) out._converted[name] = field out._coldefs._arrays = arrays return out def __setitem__(self, key, value): if self._coldefs is None: return super().__setitem__(key, value) if isinstance(key, str): self[key][:] = value return if isinstance(key, slice): end = min(len(self), key.stop or len(self)) end = max(0, end) start = max(0, key.start or 0) end = min(end, start + len(value)) for idx in range(start, end): self.__setitem__(idx, value[idx - start]) return if isinstance(value, FITS_record): for idx in range(self._nfields): self.field(self.names[idx])[key] = value.field(self.names[idx]) elif isinstance(value, (tuple, list, np.void)): if self._nfields == len(value): for idx in range(self._nfields): self.field(idx)[key] = value[idx] else: raise ValueError('Input tuple or list required to have {} ' 'elements.'.format(self._nfields)) else: raise TypeError('Assignment requires a FITS_record, tuple, or ' 'list as input.') def _ipython_key_completions_(self): return self.names def copy(self, order='C'): """ The Numpy documentation lies; `numpy.ndarray.copy` is not equivalent to `numpy.copy`. Differences include that it re-views the copied array as self's ndarray subclass, as though it were taking a slice; this means ``__array_finalize__`` is called and the copy shares all the array attributes (including ``._converted``!). So we need to make a deep copy of all those attributes so that the two arrays truly do not share any data. """ new = super().copy(order=order) new.__dict__ = copy.deepcopy(self.__dict__) return new @property def columns(self): """ A user-visible accessor for the coldefs. See https://aeon.stsci.edu/ssb/trac/pyfits/ticket/44 """ return self._coldefs @property def _coldefs(self): # This used to be a normal internal attribute, but it was changed to a # property as a quick and transparent way to work around the reference # leak bug fixed in https://github.com/astropy/astropy/pull/4539 # # See the long comment in the Column.array property for more details # on this. But in short, FITS_rec now has a ._col_weakrefs attribute # which is a WeakSet of weakrefs to each Column in _coldefs. # # So whenever ._coldefs is set we also add each Column in the ColDefs # to the weakrefs set. This is an easy way to find out if a Column has # any references to it external to the FITS_rec (i.e. a user assigned a # column to a variable). If the column is still in _col_weakrefs then # there are other references to it external to this FITS_rec. We use # that information in __del__ to save off copies of the array data # for those columns to their Column.array property before our memory # is freed. return self.__dict__.get('_coldefs') @_coldefs.setter def _coldefs(self, cols): self.__dict__['_coldefs'] = cols if isinstance(cols, ColDefs): for col in cols.columns: self._col_weakrefs.add(col) @_coldefs.deleter def _coldefs(self): try: del self.__dict__['_coldefs'] except KeyError as exc: raise AttributeError(exc.args[0]) def __del__(self): try: del self._coldefs if self.dtype.fields is not None: for col in self._col_weakrefs: if col.array is not None: col.array = col.array.copy() # See issues #4690 and #4912 except (AttributeError, TypeError): # pragma: no cover pass @property def names(self): """List of column names.""" if self.dtype.fields: return list(self.dtype.names) elif getattr(self, '_coldefs', None) is not None: return self._coldefs.names else: return None @property def formats(self): """List of column FITS formats.""" if getattr(self, '_coldefs', None) is not None: return self._coldefs.formats return None @property def _raw_itemsize(self): """ Returns the size of row items that would be written to the raw FITS file, taking into account the possibility of unicode columns being compactified. Currently for internal use only. """ if _has_unicode_fields(self): total_itemsize = 0 for field in self.dtype.fields.values(): itemsize = field[0].itemsize if field[0].kind == 'U': itemsize = itemsize // 4 total_itemsize += itemsize return total_itemsize else: # Just return the normal itemsize return self.itemsize def field(self, key): """ A view of a `Column`'s data as an array. """ # NOTE: The *column* index may not be the same as the field index in # the recarray, if the column is a phantom column column = self.columns[key] name = column.name format = column.format if format.dtype.itemsize == 0: warnings.warn( 'Field {!r} has a repeat count of 0 in its format code, ' 'indicating an empty field.'.format(key)) return np.array([], dtype=format.dtype) # If field's base is a FITS_rec, we can run into trouble because it # contains a reference to the ._coldefs object of the original data; # this can lead to a circular reference; see ticket #49 base = self while (isinstance(base, FITS_rec) and isinstance(base.base, np.recarray)): base = base.base # base could still be a FITS_rec in some cases, so take care to # use rec.recarray.field to avoid a potential infinite # recursion field = _get_recarray_field(base, name) if name not in self._converted: recformat = format.recformat # TODO: If we're now passing the column to these subroutines, do we # really need to pass them the recformat? if isinstance(recformat, _FormatP): # for P format converted = self._convert_p(column, field, recformat) else: # Handle all other column data types which are fixed-width # fields converted = self._convert_other(column, field, recformat) # Note: Never assign values directly into the self._converted dict; # always go through self._cache_field; this way self._converted is # only used to store arrays that are not already direct views of # our own data. self._cache_field(name, converted) return converted return self._converted[name] def _cache_field(self, name, field): """ Do not store fields in _converted if one of its bases is self, or if it has a common base with self. This results in a reference cycle that cannot be broken since ndarrays do not participate in cyclic garbage collection. """ base = field while True: self_base = self while True: if self_base is base: return if getattr(self_base, 'base', None) is not None: self_base = self_base.base else: break if getattr(base, 'base', None) is not None: base = base.base else: break self._converted[name] = field def _update_column_attribute_changed(self, column, idx, attr, old_value, new_value): """ Update how the data is formatted depending on changes to column attributes initiated by the user through the `Column` interface. Dispatches column attribute change notifications to individual methods for each attribute ``_update_column_<attr>`` """ method_name = '_update_column_{0}'.format(attr) if hasattr(self, method_name): # Right now this is so we can be lazy and not implement updaters # for every attribute yet--some we may not need at all, TBD getattr(self, method_name)(column, idx, old_value, new_value) def _update_column_name(self, column, idx, old_name, name): """Update the dtype field names when a column name is changed.""" dtype = self.dtype # Updating the names on the dtype should suffice dtype.names = dtype.names[:idx] + (name,) + dtype.names[idx + 1:] def _convert_x(self, field, recformat): """Convert a raw table column to a bit array as specified by the FITS X format. """ dummy = np.zeros(self.shape + (recformat.repeat,), dtype=np.bool_) _unwrapx(field, dummy, recformat.repeat) return dummy def _convert_p(self, column, field, recformat): """Convert a raw table column of FITS P or Q format descriptors to a VLA column with the array data returned from the heap. """ dummy = _VLF([None] * len(self), dtype=recformat.dtype) raw_data = self._get_raw_data() if raw_data is None: raise OSError( "Could not find heap data for the {!r} variable-length " "array column.".format(column.name)) for idx in range(len(self)): offset = field[idx, 1] + self._heapoffset count = field[idx, 0] if recformat.dtype == 'a': dt = np.dtype(recformat.dtype + str(1)) arr_len = count * dt.itemsize da = raw_data[offset:offset + arr_len].view(dt) da = np.char.array(da.view(dtype=dt), itemsize=count) dummy[idx] = decode_ascii(da) else: dt = np.dtype(recformat.dtype) arr_len = count * dt.itemsize dummy[idx] = raw_data[offset:offset + arr_len].view(dt) dummy[idx].dtype = dummy[idx].dtype.newbyteorder('>') # Each array in the field may now require additional # scaling depending on the other scaling parameters # TODO: The same scaling parameters apply to every # array in the column so this is currently very slow; we # really only need to check once whether any scaling will # be necessary and skip this step if not # TODO: Test that this works for X format; I don't think # that it does--the recformat variable only applies to the P # format not the X format dummy[idx] = self._convert_other(column, dummy[idx], recformat) return dummy def _convert_ascii(self, column, field): """ Special handling for ASCII table columns to convert columns containing numeric types to actual numeric arrays from the string representation. """ format = column.format recformat = ASCII2NUMPY[format[0]] # if the string = TNULL, return ASCIITNULL nullval = str(column.null).strip().encode('ascii') if len(nullval) > format.width: nullval = nullval[:format.width] # Before using .replace make sure that any trailing bytes in each # column are filled with spaces, and *not*, say, nulls; this causes # functions like replace to potentially leave gibberish bytes in the # array buffer. dummy = np.char.ljust(field, format.width) dummy = np.char.replace(dummy, encode_ascii('D'), encode_ascii('E')) null_fill = encode_ascii(str(ASCIITNULL).rjust(format.width)) # Convert all fields equal to the TNULL value (nullval) to empty fields. # TODO: These fields really should be conerted to NaN or something else undefined. # Currently they are converted to empty fields, which are then set to zero. dummy = np.where(np.char.strip(dummy) == nullval, null_fill, dummy) # always replace empty fields, see https://github.com/astropy/astropy/pull/5394 if nullval != b'': dummy = np.where(np.char.strip(dummy) == b'', null_fill, dummy) try: dummy = np.array(dummy, dtype=recformat) except ValueError as exc: indx = self.names.index(column.name) raise ValueError( '{}; the header may be missing the necessary TNULL{} ' 'keyword or the table contains invalid data'.format( exc, indx + 1)) return dummy def _convert_other(self, column, field, recformat): """Perform conversions on any other fixed-width column data types. This may not perform any conversion at all if it's not necessary, in which case the original column array is returned. """ if isinstance(recformat, _FormatX): # special handling for the X format return self._convert_x(field, recformat) (_str, _bool, _number, _scale, _zero, bscale, bzero, dim) = \ self._get_scale_factors(column) indx = self.names.index(column.name) # ASCII table, convert strings to numbers # TODO: # For now, check that these are ASCII columns by checking the coldefs # type; in the future all columns (for binary tables, ASCII tables, or # otherwise) should "know" what type they are already and how to handle # converting their data from FITS format to native format and vice # versa... if not _str and isinstance(self._coldefs, _AsciiColDefs): field = self._convert_ascii(column, field) # Test that the dimensions given in dim are sensible; otherwise # display a warning and ignore them if dim: # See if the dimensions already match, if not, make sure the # number items will fit in the specified dimensions if field.ndim > 1: actual_shape = field.shape[1:] if _str: actual_shape = actual_shape + (field.itemsize,) else: actual_shape = field.shape[0] if dim == actual_shape: # The array already has the correct dimensions, so we # ignore dim and don't convert dim = None else: nitems = reduce(operator.mul, dim) if _str: actual_nitems = field.itemsize elif len(field.shape) == 1: # No repeat count in TFORMn, equivalent to 1 actual_nitems = 1 else: actual_nitems = field.shape[1] if nitems > actual_nitems: warnings.warn( 'TDIM{} value {:d} does not fit with the size of ' 'the array items ({:d}). TDIM{:d} will be ignored.' .format(indx + 1, self._coldefs[indx].dims, actual_nitems, indx + 1)) dim = None # further conversion for both ASCII and binary tables # For now we've made columns responsible for *knowing* whether their # data has been scaled, but we make the FITS_rec class responsible for # actually doing the scaling # TODO: This also needs to be fixed in the effort to make Columns # responsible for scaling their arrays to/from FITS native values if not column.ascii and column.format.p_format: format_code = column.format.p_format else: # TODO: Rather than having this if/else it might be nice if the # ColumnFormat class had an attribute guaranteed to give the format # of actual values in a column regardless of whether the true # format is something like P or Q format_code = column.format.format if (_number and (_scale or _zero) and not column._physical_values): # This is to handle pseudo unsigned ints in table columns # TODO: For now this only really works correctly for binary tables # Should it work for ASCII tables as well? if self._uint: if bzero == 2**15 and format_code == 'I': field = np.array(field, dtype=np.uint16) elif bzero == 2**31 and format_code == 'J': field = np.array(field, dtype=np.uint32) elif bzero == 2**63 and format_code == 'K': field = np.array(field, dtype=np.uint64) bzero64 = np.uint64(2 ** 63) else: field = np.array(field, dtype=np.float64) else: field = np.array(field, dtype=np.float64) if _scale: np.multiply(field, bscale, field) if _zero: if self._uint and format_code == 'K': # There is a chance of overflow, so be careful test_overflow = field.copy() try: test_overflow += bzero64 except OverflowError: warnings.warn( "Overflow detected while applying TZERO{0:d}. " "Returning unscaled data.".format(indx + 1)) else: field = test_overflow else: field += bzero # mark the column as scaled column._physical_values = True elif _bool and field.dtype != bool: field = np.equal(field, ord('T')) elif _str: if not self._character_as_bytes: with suppress(UnicodeDecodeError): field = decode_ascii(field) if dim: # Apply the new field item dimensions nitems = reduce(operator.mul, dim) if field.ndim > 1: field = field[:, :nitems] if _str: fmt = field.dtype.char dtype = ('|{}{}'.format(fmt, dim[-1]), dim[:-1]) field.dtype = dtype else: field.shape = (field.shape[0],) + dim return field def _get_heap_data(self): """ Returns a pointer into the table's raw data to its heap (if present). This is returned as a numpy byte array. """ if self._heapsize: raw_data = self._get_raw_data().view(np.ubyte) heap_end = self._heapoffset + self._heapsize return raw_data[self._heapoffset:heap_end] else: return np.array([], dtype=np.ubyte) def _get_raw_data(self): """ Returns the base array of self that "raw data array" that is the array in the format that it was first read from a file before it was sliced or viewed as a different type in any way. This is determined by walking through the bases until finding one that has at least the same number of bytes as self, plus the heapsize. This may be the immediate .base but is not always. This is used primarily for variable-length array support which needs to be able to find the heap (the raw data *may* be larger than nbytes + heapsize if it contains a gap or padding). May return ``None`` if no array resembling the "raw data" according to the stated criteria can be found. """ raw_data_bytes = self.nbytes + self._heapsize base = self while hasattr(base, 'base') and base.base is not None: base = base.base if hasattr(base, 'nbytes') and base.nbytes >= raw_data_bytes: return base def _get_scale_factors(self, column): """Get all the scaling flags and factors for one column.""" # TODO: Maybe this should be a method/property on Column? Or maybe # it's not really needed at all... _str = column.format.format == 'A' _bool = column.format.format == 'L' _number = not (_bool or _str) bscale = column.bscale bzero = column.bzero _scale = bscale not in ('', None, 1) _zero = bzero not in ('', None, 0) # ensure bscale/bzero are numbers if not _scale: bscale = 1 if not _zero: bzero = 0 # column._dims gives a tuple, rather than column.dim which returns the # original string format code from the FITS header... dim = column._dims return (_str, _bool, _number, _scale, _zero, bscale, bzero, dim) def _scale_back(self, update_heap_pointers=True): """ Update the parent array, using the (latest) scaled array. If ``update_heap_pointers`` is `False`, this will leave all the heap pointers in P/Q columns as they are verbatim--it only makes sense to do this if there is already data on the heap and it can be guaranteed that that data has not been modified, and there is not new data to add to the heap. Currently this is only used as an optimization for CompImageHDU that does its own handling of the heap. """ # Running total for the new heap size heapsize = 0 for indx, name in enumerate(self.dtype.names): column = self._coldefs[indx] recformat = column.format.recformat raw_field = _get_recarray_field(self, indx) # add the location offset of the heap area for each # variable length column if isinstance(recformat, _FormatP): # Irritatingly, this can return a different dtype than just # doing np.dtype(recformat.dtype); but this returns the results # that we want. For example if recformat.dtype is 'a' we want # an array of characters. dtype = np.array([], dtype=recformat.dtype).dtype if update_heap_pointers and name in self._converted: # The VLA has potentially been updated, so we need to # update the array descriptors raw_field[:] = 0 # reset npts = [len(arr) for arr in self._converted[name]] raw_field[:len(npts), 0] = npts raw_field[1:, 1] = (np.add.accumulate(raw_field[:-1, 0]) * dtype.itemsize) raw_field[:, 1][:] += heapsize heapsize += raw_field[:, 0].sum() * dtype.itemsize # Even if this VLA has not been read or updated, we need to # include the size of its constituent arrays in the heap size # total if isinstance(recformat, _FormatX) and name in self._converted: _wrapx(self._converted[name], raw_field, recformat.repeat) continue _str, _bool, _number, _scale, _zero, bscale, bzero, _ = \ self._get_scale_factors(column) field = self._converted.get(name, raw_field) # conversion for both ASCII and binary tables if _number or _str: if _number and (_scale or _zero) and column._physical_values: dummy = field.copy() if _zero: dummy -= bzero if _scale: dummy /= bscale # This will set the raw values in the recarray back to # their non-physical storage values, so the column should # be mark is not scaled column._physical_values = False elif _str or isinstance(self._coldefs, _AsciiColDefs): dummy = field else: continue # ASCII table, convert numbers to strings if isinstance(self._coldefs, _AsciiColDefs): self._scale_back_ascii(indx, dummy, raw_field) # binary table string column elif isinstance(raw_field, chararray.chararray): self._scale_back_strings(indx, dummy, raw_field) # all other binary table columns else: if len(raw_field) and isinstance(raw_field[0], np.integer): dummy = np.around(dummy) if raw_field.shape == dummy.shape: raw_field[:] = dummy else: # Reshaping the data is necessary in cases where the # TDIMn keyword was used to shape a column's entries # into arrays raw_field[:] = dummy.ravel().view(raw_field.dtype) del dummy # ASCII table does not have Boolean type elif _bool and name in self._converted: choices = (np.array([ord('F')], dtype=np.int8)[0], np.array([ord('T')], dtype=np.int8)[0]) raw_field[:] = np.choose(field, choices) # Store the updated heapsize self._heapsize = heapsize def _scale_back_strings(self, col_idx, input_field, output_field): # There are a few possibilities this has to be able to handle properly # The input_field, which comes from the _converted column is of dtype # 'Un' so that elements read out of the array are normal str # objects (i.e. unicode strings) # # At the other end the *output_field* may also be of type 'S' or of # type 'U'. It will *usually* be of type 'S' because when reading # an existing FITS table the raw data is just ASCII strings, and # represented in Numpy as an S array. However, when a user creates # a new table from scratch, they *might* pass in a column containing # unicode strings (dtype 'U'). Therefore the output_field of the # raw array is actually a unicode array. But we still want to make # sure the data is encodable as ASCII. Later when we write out the # array we use, in the dtype 'U' case, a different write routine # that writes row by row and encodes any 'U' columns to ASCII. # If the output_field is non-ASCII we will worry about ASCII encoding # later when writing; otherwise we can do it right here if input_field.dtype.kind == 'U' and output_field.dtype.kind == 'S': try: _ascii_encode(input_field, out=output_field) except _UnicodeArrayEncodeError as exc: raise ValueError( "Could not save column '{0}': Contains characters that " "cannot be encoded as ASCII as required by FITS, starting " "at the index {1!r} of the column, and the index {2} of " "the string at that location.".format( self._coldefs[col_idx].name, exc.index[0] if len(exc.index) == 1 else exc.index, exc.start)) else: # Otherwise go ahead and do a direct copy into--if both are type # 'U' we'll handle encoding later input_field = input_field.flatten().view(output_field.dtype) output_field.flat[:] = input_field # Ensure that blanks at the end of each string are # converted to nulls instead of spaces, see Trac #15 # and #111 _rstrip_inplace(output_field) def _scale_back_ascii(self, col_idx, input_field, output_field): """ Convert internal array values back to ASCII table representation. The ``input_field`` is the internal representation of the values, and the ``output_field`` is the character array representing the ASCII output that will be written. """ starts = self._coldefs.starts[:] spans = self._coldefs.spans format = self._coldefs[col_idx].format # The the index of the "end" column of the record, beyond # which we can't write end = super().field(-1).itemsize starts.append(end + starts[-1]) if col_idx > 0: lead = starts[col_idx] - starts[col_idx - 1] - spans[col_idx - 1] else: lead = 0 if lead < 0: warnings.warn('Column {!r} starting point overlaps the previous ' 'column.'.format(col_idx + 1)) trail = starts[col_idx + 1] - starts[col_idx] - spans[col_idx] if trail < 0: warnings.warn('Column {!r} ending point overlaps the next ' 'column.'.format(col_idx + 1)) # TODO: It would be nice if these string column formatting # details were left to a specialized class, as is the case # with FormatX and FormatP if 'A' in format: _pc = '{:' else: _pc = '{:>' fmt = ''.join([_pc, format[1:], ASCII2STR[format[0]], '}', (' ' * trail)]) # Even if the format precision is 0, we should output a decimal point # as long as there is space to do so--not including a decimal point in # a float value is discouraged by the FITS Standard trailing_decimal = (format.precision == 0 and format.format in ('F', 'E', 'D')) # not using numarray.strings's num2char because the # result is not allowed to expand (as C/Python does). for jdx, value in enumerate(input_field): value = fmt.format(value) if len(value) > starts[col_idx + 1] - starts[col_idx]: raise ValueError( "Value {!r} does not fit into the output's itemsize of " "{}.".format(value, spans[col_idx])) if trailing_decimal and value[0] == ' ': # We have some extra space in the field for the trailing # decimal point value = value[1:] + '.' output_field[jdx] = value # Replace exponent separator in floating point numbers if 'D' in format: output_field[:] = output_field.replace(b'E', b'D') def _get_recarray_field(array, key): """ Compatibility function for using the recarray base class's field method. This incorporates the legacy functionality of returning string arrays as Numeric-style chararray objects. """ # Numpy >= 1.10.dev recarray no longer returns chararrays for strings # This is currently needed for backwards-compatibility and for # automatic truncation of trailing whitespace field = np.recarray.field(array, key) if (field.dtype.char in ('S', 'U') and not isinstance(field, chararray.chararray)): field = field.view(chararray.chararray) return field class _UnicodeArrayEncodeError(UnicodeEncodeError): def __init__(self, encoding, object_, start, end, reason, index): super().__init__(encoding, object_, start, end, reason) self.index = index def _ascii_encode(inarray, out=None): """ Takes a unicode array and fills the output string array with the ASCII encodings (if possible) of the elements of the input array. The two arrays must be the same size (though not necessarily the same shape). This is like an inplace version of `np.char.encode` though simpler since it's only limited to ASCII, and hence the size of each character is guaranteed to be 1 byte. If any strings are non-ASCII an UnicodeArrayEncodeError is raised--this is just a `UnicodeEncodeError` with an additional attribute for the index of the item that couldn't be encoded. """ out_dtype = np.dtype(('S{0}'.format(inarray.dtype.itemsize // 4), inarray.dtype.shape)) if out is not None: out = out.view(out_dtype) op_dtypes = [inarray.dtype, out_dtype] op_flags = [['readonly'], ['writeonly', 'allocate']] it = np.nditer([inarray, out], op_dtypes=op_dtypes, op_flags=op_flags, flags=['zerosize_ok']) try: for initem, outitem in it: outitem[...] = initem.item().encode('ascii') except UnicodeEncodeError as exc: index = np.unravel_index(it.iterindex, inarray.shape) raise _UnicodeArrayEncodeError(*(exc.args + (index,))) return it.operands[1] def _has_unicode_fields(array): """ Returns True if any fields in a structured array have Unicode dtype. """ dtypes = (d[0] for d in array.dtype.fields.values()) return any(d.kind == 'U' for d in dtypes)
ab2289acbc429bbd99de6472642cff4e4ad57f6e0ec7ed87cd92551373012a11
# Licensed under a 3-clause BSD style license - see LICENSE.rst import re import warnings from collections import defaultdict, OrderedDict import numpy as np from . import Header, Card from astropy import units as u from astropy.coordinates import EarthLocation from astropy.table import Column from astropy.time import Time, TimeDelta from astropy.time.core import BARYCENTRIC_SCALES from astropy.time.formats import FITS_DEPRECATED_SCALES from astropy.utils.exceptions import AstropyUserWarning # The following is based on the FITS WCS Paper IV, "Representations of time # coordinates in FITS". # http://adsabs.harvard.edu/abs/2015A%26A...574A..36R # FITS WCS standard specified "4-3" form for non-linear coordinate types TCTYP_RE_TYPE = re.compile(r'(?P<type>[A-Z]+)[-]+') TCTYP_RE_ALGO = re.compile(r'(?P<algo>[A-Z]+)\s*') # FITS Time standard specified time units FITS_TIME_UNIT = ['s', 'd', 'a', 'cy', 'min', 'h', 'yr', 'ta', 'Ba'] # Global time reference coordinate keywords TIME_KEYWORDS = ('TIMESYS', 'MJDREF', 'JDREF', 'DATEREF', 'TREFPOS', 'TREFDIR', 'TIMEUNIT', 'TIMEOFFS', 'OBSGEO-X', 'OBSGEO-Y', 'OBSGEO-Z', 'OBSGEO-L', 'OBSGEO-B', 'OBSGEO-H', 'DATE', 'DATE-OBS', 'DATE-AVG', 'DATE-BEG', 'DATE-END', 'MJD-OBS', 'MJD-AVG', 'MJD-BEG', 'MJD-END') # Column-specific time override keywords COLUMN_TIME_KEYWORDS = ('TCTYP', 'TCUNI', 'TRPOS') # Column-specific keywords regex COLUMN_TIME_KEYWORD_REGEXP = '({0})[0-9]+'.format( '|'.join(COLUMN_TIME_KEYWORDS)) def is_time_column_keyword(keyword): """ Check if the FITS header keyword is a time column-specific keyword. Parameters ---------- keyword : str FITS keyword. """ return re.match(COLUMN_TIME_KEYWORD_REGEXP, keyword) is not None # Set astropy time global information GLOBAL_TIME_INFO = {'TIMESYS': ('UTC', 'Default time scale'), 'JDREF': (0.0, 'Time columns are jd = jd1 + jd2'), 'TREFPOS': ('TOPOCENTER', 'Time reference position')} def _verify_global_info(global_info): """ Given the global time reference frame information, verify that each global time coordinate attribute will be given a valid value. Parameters ---------- global_info : dict Global time reference frame information. """ # Translate FITS deprecated scale into astropy scale, or else just convert # to lower case for further checks. global_info['scale'] = FITS_DEPRECATED_SCALES.get(global_info['TIMESYS'], global_info['TIMESYS'].lower()) # Verify global time scale if global_info['scale'] not in Time.SCALES: # 'GPS' and 'LOCAL' are FITS recognized time scale values # but are not supported by astropy. if global_info['scale'] == 'gps': warnings.warn( 'Global time scale (TIMESYS) has a FITS recognized time scale ' 'value "GPS". In Astropy, "GPS" is a time from epoch format ' 'which runs synchronously with TAI; GPS is approximately 19 s ' 'ahead of TAI. Hence, this format will be used.', AstropyUserWarning) # Assume that the values are in GPS format global_info['scale'] = 'tai' global_info['format'] = 'gps' if global_info['scale'] == 'local': warnings.warn( 'Global time scale (TIMESYS) has a FITS recognized time scale ' 'value "LOCAL". However, the standard states that "LOCAL" should be ' 'tied to one of the existing scales because it is intrinsically ' 'unreliable and/or ill-defined. Astropy will thus use the default ' 'global time scale "UTC" instead of "LOCAL".', AstropyUserWarning) # Default scale 'UTC' global_info['scale'] = 'utc' global_info['format'] = None else: raise AssertionError( 'Global time scale (TIMESYS) should have a FITS recognized ' 'time scale value (got {!r}). The FITS standard states that ' 'the use of local time scales should be restricted to alternate ' 'coordinates.'.format(global_info['TIMESYS'])) else: # Scale is already set global_info['format'] = None # Check if geocentric global location is specified obs_geo = [global_info[attr] for attr in ('OBSGEO-X', 'OBSGEO-Y', 'OBSGEO-Z') if attr in global_info] # Location full specification is (X, Y, Z) if len(obs_geo) == 3: global_info['location'] = EarthLocation.from_geocentric(*obs_geo, unit=u.m) else: # Check if geodetic global location is specified (since geocentric failed) # First warn the user if geocentric location is partially specified if obs_geo: warnings.warn( 'The geocentric observatory location {} is not completely ' 'specified (X, Y, Z) and will be ignored.'.format(obs_geo), AstropyUserWarning) # Check geodetic location obs_geo = [global_info[attr] for attr in ('OBSGEO-L', 'OBSGEO-B', 'OBSGEO-H') if attr in global_info] if len(obs_geo) == 3: global_info['location'] = EarthLocation.from_geodetic(*obs_geo) else: # Since both geocentric and geodetic locations are not specified, # location will be None. # Warn the user if geodetic location is partially specified if obs_geo: warnings.warn( 'The geodetic observatory location {} is not completely ' 'specified (lon, lat, alt) and will be ignored.'.format(obs_geo), AstropyUserWarning) global_info['location'] = None # Get global time reference # Keywords are listed in order of precedence, as stated by the standard for key, format_ in (('MJDREF', 'mjd'), ('JDREF', 'jd'), ('DATEREF', 'fits')): if key in global_info: global_info['ref_time'] = {'val': global_info[key], 'format': format_} break else: # If none of the three keywords is present, MJDREF = 0.0 must be assumed global_info['ref_time'] = {'val': 0, 'format': 'mjd'} def _verify_column_info(column_info, global_info): """ Given the column-specific time reference frame information, verify that each column-specific time coordinate attribute has a valid value. Return True if the coordinate column is time, or else return False. Parameters ---------- global_info : dict Global time reference frame information. column_info : dict Column-specific time reference frame override information. """ scale = column_info.get('TCTYP', None) unit = column_info.get('TCUNI', None) location = column_info.get('TRPOS', None) if scale is not None: # Non-linear coordinate types have "4-3" form and are not time coordinates if TCTYP_RE_TYPE.match(scale[:5]) and TCTYP_RE_ALGO.match(scale[5:]): return False elif scale.lower() in Time.SCALES: column_info['scale'] = scale.lower() column_info['format'] = None elif scale in FITS_DEPRECATED_SCALES.keys(): column_info['scale'] = FITS_DEPRECATED_SCALES[scale] column_info['format'] = None # TCTYPn (scale) = 'TIME' indicates that the column scale is # controlled by the global scale. elif scale == 'TIME': column_info['scale'] = global_info['scale'] column_info['format'] = global_info['format'] elif scale == 'GPS': warnings.warn( 'Table column "{}" has a FITS recognized time scale value "GPS". ' 'In Astropy, "GPS" is a time from epoch format which runs ' 'synchronously with TAI; GPS runs ahead of TAI approximately ' 'by 19 s. Hence, this format will be used.'.format(column_info), AstropyUserWarning) column_info['scale'] = 'tai' column_info['format'] = 'gps' elif scale == 'LOCAL': warnings.warn( 'Table column "{}" has a FITS recognized time scale value "LOCAL". ' 'However, the standard states that "LOCAL" should be tied to one ' 'of the existing scales because it is intrinsically unreliable ' 'and/or ill-defined. Astropy will thus use the global time scale ' '(TIMESYS) as the default.'. format(column_info), AstropyUserWarning) column_info['scale'] = global_info['scale'] column_info['format'] = global_info['format'] else: # Coordinate type is either an unrecognized local time scale # or a linear coordinate type return False # If TCUNIn is a time unit or TRPOSn is specified, the column is a time # coordinate. This has to be tested since TCTYP (scale) is not specified. elif (unit is not None and unit in FITS_TIME_UNIT) or location is not None: column_info['scale'] = global_info['scale'] column_info['format'] = global_info['format'] # None of the conditions for time coordinate columns is satisfied else: return False # Check if column-specific reference position TRPOSn is specified if location is not None: # Observatory position (location) needs to be specified only # for 'TOPOCENTER'. if location == 'TOPOCENTER': column_info['location'] = global_info['location'] if column_info['location'] is None: warnings.warn( 'Time column reference position "TRPOSn" value is "TOPOCENTER". ' 'However, the observatory position is not properly specified. ' 'The FITS standard does not support this and hence reference ' 'position will be ignored.', AstropyUserWarning) else: column_info['location'] = None # Since TRPOSn is not specified, global reference position is # considered. elif global_info['TREFPOS'] == 'TOPOCENTER': column_info['location'] = global_info['location'] if column_info['location'] is None: warnings.warn( 'Time column reference position "TRPOSn" is not specified. The ' 'default value for it is "TOPOCENTER", but due to unspecified ' 'observatory position, reference position will be ignored.', AstropyUserWarning) else: column_info['location'] = None # Get reference time column_info['ref_time'] = global_info['ref_time'] return True def _get_info_if_time_column(col, global_info): """ Check if a column without corresponding time column keywords in the FITS header represents time or not. If yes, return the time column information needed for its conversion to Time. This is only applicable to the special-case where a column has the name 'TIME' and a time unit. """ # Column with TTYPEn = 'TIME' and lacking any TC*n or time # specific keywords will be controlled by the global keywords. if col.info.name.upper() == 'TIME' and col.info.unit in FITS_TIME_UNIT: column_info = {'scale': global_info['scale'], 'format': global_info['format'], 'ref_time': global_info['ref_time'], 'location': None} if global_info['TREFPOS'] == 'TOPOCENTER': column_info['location'] = global_info['location'] if column_info['location'] is None: warnings.warn( 'Time column "{}" reference position will be ignored ' 'due to unspecified observatory position.'.format(col.info.name), AstropyUserWarning) return column_info return None def _convert_global_time(table, global_info): """ Convert the table metadata for time informational keywords to astropy Time. Parameters ---------- table : `~astropy.table.Table` The table whose time metadata is to be converted. global_info : dict Global time reference frame information. """ # Read in Global Informational keywords as Time for key, value in global_info.items(): # FITS uses a subset of ISO-8601 for DATE-xxx if key.startswith('DATE'): if key not in table.meta: scale = 'utc' if key == 'DATE' else global_info['scale'] try: precision = len(value.split('.')[-1]) if '.' in value else 0 value = Time(value, format='fits', scale=scale, precision=precision) except ValueError: pass table.meta[key] = value # MJD-xxx in MJD according to TIMESYS elif key.startswith('MJD-'): if key not in table.meta: try: value = Time(value, format='mjd', scale=global_info['scale']) except ValueError: pass table.meta[key] = value def _convert_time_column(col, column_info): """ Convert time columns to astropy Time columns. Parameters ---------- col : `~astropy.table.Column` The time coordinate column to be converted to Time. column_info : dict Column-specific time reference frame override information. """ # The code might fail while attempting to read FITS files not written by astropy. try: # ISO-8601 is the only string representation of time in FITS if col.info.dtype.kind in ['S', 'U']: # [+/-C]CCYY-MM-DD[Thh:mm:ss[.s...]] where the number of characters # from index 20 to the end of string represents the precision precision = max(int(col.info.dtype.str[2:]) - 20, 0) return Time(col, format='fits', scale=column_info['scale'], precision=precision, location=column_info['location']) if column_info['format'] == 'gps': return Time(col, format='gps', location=column_info['location']) # If reference value is 0 for JD or MJD, the column values can be # directly converted to Time, as they are absolute (relative # to a globally accepted zero point). if (column_info['ref_time']['val'] == 0 and column_info['ref_time']['format'] in ['jd', 'mjd']): # (jd1, jd2) where jd = jd1 + jd2 if col.shape[-1] == 2 and col.ndim > 1: return Time(col[..., 0], col[..., 1], scale=column_info['scale'], format=column_info['ref_time']['format'], location=column_info['location']) else: return Time(col, scale=column_info['scale'], format=column_info['ref_time']['format'], location=column_info['location']) # Reference time ref_time = Time(column_info['ref_time']['val'], scale=column_info['scale'], format=column_info['ref_time']['format'], location=column_info['location']) # Elapsed time since reference time if col.shape[-1] == 2 and col.ndim > 1: delta_time = TimeDelta(col[..., 0], col[..., 1]) else: delta_time = TimeDelta(col) return ref_time + delta_time except Exception as err: warnings.warn( 'The exception "{}" was encountered while trying to convert the time ' 'column "{}" to Astropy Time.'.format(err, col.info.name), AstropyUserWarning) return col def fits_to_time(hdr, table): """ Read FITS binary table time columns as `~astropy.time.Time`. This method reads the metadata associated with time coordinates, as stored in a FITS binary table header, converts time columns into `~astropy.time.Time` columns and reads global reference times as `~astropy.time.Time` instances. Parameters ---------- hdr : `~astropy.io.fits.header.Header` FITS Header table : `~astropy.table.Table` The table whose time columns are to be read as Time Returns ------- hdr : `~astropy.io.fits.header.Header` Modified FITS Header (time metadata removed) """ # Set defaults for global time scale, reference, etc. global_info = {'TIMESYS': 'UTC', 'TREFPOS': 'TOPOCENTER'} # Set default dictionary for time columns time_columns = defaultdict(OrderedDict) # Make a "copy" (not just a view) of the input header, since it # may get modified. the data is still a "view" (for now) hcopy = hdr.copy(strip=True) # Scan the header for global and column-specific time keywords for key, value, comment in hdr.cards: if key in TIME_KEYWORDS: global_info[key] = value hcopy.remove(key) elif is_time_column_keyword(key): base, idx = re.match(r'([A-Z]+)([0-9]+)', key).groups() time_columns[int(idx)][base] = value hcopy.remove(key) elif (value in ('OBSGEO-X', 'OBSGEO-Y', 'OBSGEO-Z') and re.match('TTYPE[0-9]+', key)): global_info[value] = table[value] # Verify and get the global time reference frame information _verify_global_info(global_info) _convert_global_time(table, global_info) # Columns with column-specific time (coordinate) keywords if time_columns: for idx, column_info in time_columns.items(): # Check if the column is time coordinate (not spatial) if _verify_column_info(column_info, global_info): colname = table.colnames[idx - 1] # Convert to Time table[colname] = _convert_time_column(table[colname], column_info) # Check for special-cases of time coordinate columns for idx, colname in enumerate(table.colnames): if (idx + 1) not in time_columns: column_info = _get_info_if_time_column(table[colname], global_info) if column_info: table[colname] = _convert_time_column(table[colname], column_info) return hcopy def time_to_fits(table): """ Replace Time columns in a Table with non-mixin columns containing each element as a vector of two doubles (jd1, jd2) and return a FITS header with appropriate time coordinate keywords. jd = jd1 + jd2 represents time in the Julian Date format with high-precision. Parameters ---------- table : `~astropy.table.Table` The table whose Time columns are to be replaced. Returns ------- table : `~astropy.table.Table` The table with replaced Time columns hdr : `~astropy.io.fits.header.Header` Header containing global time reference frame FITS keywords """ # Shallow copy of the input table newtable = table.copy(copy_data=False) # Global time coordinate frame keywords hdr = Header([Card(keyword=key, value=val[0], comment=val[1]) for key, val in GLOBAL_TIME_INFO.items()]) # Store coordinate column-specific metadata newtable.meta['__coordinate_columns__'] = defaultdict(OrderedDict) coord_meta = newtable.meta['__coordinate_columns__'] time_cols = table.columns.isinstance(Time) # Geocentric location location = None for col in time_cols: # By default, Time objects are written in full precision, i.e. we store both # jd1 and jd2 (serialize_method['fits'] = 'jd1_jd2'). Formatted values for # Time can be stored if the user explicitly chooses to do so. if col.info.serialize_method['fits'] == 'formatted_value': newtable.replace_column(col.info.name, Column(col.value)) continue # The following is necessary to deal with multi-dimensional ``Time`` objects # (i.e. where Time.shape is non-trivial). jd12 = np.array([col.jd1, col.jd2]) # Roll the 0th (innermost) axis backwards, until it lies in the last position # (jd12.ndim) jd12 = np.rollaxis(jd12, 0, jd12.ndim) newtable.replace_column(col.info.name, Column(jd12, unit='d')) # Get column position(index) n = table.colnames.index(col.info.name) + 1 # Time column-specific override keywords coord_meta[col.info.name]['coord_type'] = col.scale.upper() coord_meta[col.info.name]['coord_unit'] = 'd' # Time column reference position if getattr(col, 'location') is None: if location is not None: warnings.warn( 'Time Column "{}" has no specified location, but global Time ' 'Position is present, which will be the default for this column ' 'in FITS specification.'.format(col.info.name), AstropyUserWarning) else: coord_meta[col.info.name]['time_ref_pos'] = 'TOPOCENTER' # Compatibility of Time Scales and Reference Positions if col.scale in BARYCENTRIC_SCALES: warnings.warn( 'Earth Location "TOPOCENTER" for Time Column "{}" is incompatabile ' 'with scale "{}".'.format(col.info.name, col.scale.upper()), AstropyUserWarning) if location is None: # Set global geocentric location location = col.location if location.size > 1: for dim in ('x', 'y', 'z'): newtable.add_column(Column(getattr(location, dim).to_value(u.m)), name='OBSGEO-{}'.format(dim.upper())) else: hdr.extend([Card(keyword='OBSGEO-{}'.format(dim.upper()), value=getattr(location, dim).to_value(u.m)) for dim in ('x', 'y', 'z')]) elif location != col.location: raise ValueError('Multiple Time Columns with different geocentric ' 'observatory locations ({}, {}) encountered.' 'This is not supported by the FITS standard.' .format(location, col.location)) return newtable, hdr
e27e499ce980e1131b4831d0071095781384a3a28b15a8758c56c192d9d01c95
# Licensed under a 3-clause BSD style license - see PYFITS.rst """ Convenience functions ===================== The functions in this module provide shortcuts for some of the most basic operations on FITS files, such as reading and updating the header. They are included directly in the 'astropy.io.fits' namespace so that they can be used like:: astropy.io.fits.getheader(...) These functions are primarily for convenience when working with FITS files in the command-line interpreter. If performing several operations on the same file, such as in a script, it is better to *not* use these functions, as each one must open and re-parse the file. In such cases it is better to use :func:`astropy.io.fits.open` and work directly with the :class:`astropy.io.fits.HDUList` object and underlying HDU objects. Several of the convenience functions, such as `getheader` and `getdata` support special arguments for selecting which extension HDU to use when working with a multi-extension FITS file. There are a few supported argument formats for selecting the extension. See the documentation for `getdata` for an explanation of all the different formats. .. warning:: All arguments to convenience functions other than the filename that are *not* for selecting the extension HDU should be passed in as keyword arguments. This is to avoid ambiguity and conflicts with the extension arguments. For example, to set NAXIS=1 on the Primary HDU: Wrong:: astropy.io.fits.setval('myimage.fits', 'NAXIS', 1) The above example will try to set the NAXIS value on the first extension HDU to blank. That is, the argument '1' is assumed to specify an extension HDU. Right:: astropy.io.fits.setval('myimage.fits', 'NAXIS', value=1) This will set the NAXIS keyword to 1 on the primary HDU (the default). To specify the first extension HDU use:: astropy.io.fits.setval('myimage.fits', 'NAXIS', value=1, ext=1) This complexity arises out of the attempt to simultaneously support multiple argument formats that were used in past versions of PyFITS. Unfortunately, it is not possible to support all formats without introducing some ambiguity. A future Astropy release may standardize around a single format and officially deprecate the other formats. """ import operator import os import warnings import numpy as np from .diff import FITSDiff, HDUDiff from .file import FILE_MODES, _File from .hdu.base import _BaseHDU, _ValidHDU from .hdu.hdulist import fitsopen, HDUList from .hdu.image import PrimaryHDU, ImageHDU from .hdu.table import BinTableHDU from .header import Header from .util import fileobj_closed, fileobj_name, fileobj_mode, _is_int from astropy.utils.exceptions import AstropyUserWarning from astropy.utils.decorators import deprecated_renamed_argument __all__ = ['getheader', 'getdata', 'getval', 'setval', 'delval', 'writeto', 'append', 'update', 'info', 'tabledump', 'tableload', 'table_to_hdu', 'printdiff'] def getheader(filename, *args, **kwargs): """ Get the header from an extension of a FITS file. Parameters ---------- filename : file path, file object, or file like object File to get header from. If an opened file object, its mode must be one of the following rb, rb+, or ab+). ext, extname, extver The rest of the arguments are for extension specification. See the `getdata` documentation for explanations/examples. kwargs Any additional keyword arguments to be passed to `astropy.io.fits.open`. Returns ------- header : `Header` object """ mode, closed = _get_file_mode(filename) hdulist, extidx = _getext(filename, mode, *args, **kwargs) try: hdu = hdulist[extidx] header = hdu.header finally: hdulist.close(closed=closed) return header def getdata(filename, *args, header=None, lower=None, upper=None, view=None, **kwargs): """ Get the data from an extension of a FITS file (and optionally the header). Parameters ---------- filename : file path, file object, or file like object File to get data from. If opened, mode must be one of the following rb, rb+, or ab+. ext The rest of the arguments are for extension specification. They are flexible and are best illustrated by examples. No extra arguments implies the primary header:: getdata('in.fits') By extension number:: getdata('in.fits', 0) # the primary header getdata('in.fits', 2) # the second extension getdata('in.fits', ext=2) # the second extension By name, i.e., ``EXTNAME`` value (if unique):: getdata('in.fits', 'sci') getdata('in.fits', extname='sci') # equivalent Note ``EXTNAME`` values are not case sensitive By combination of ``EXTNAME`` and EXTVER`` as separate arguments or as a tuple:: getdata('in.fits', 'sci', 2) # EXTNAME='SCI' & EXTVER=2 getdata('in.fits', extname='sci', extver=2) # equivalent getdata('in.fits', ('sci', 2)) # equivalent Ambiguous or conflicting specifications will raise an exception:: getdata('in.fits', ext=('sci',1), extname='err', extver=2) header : bool, optional If `True`, return the data and the header of the specified HDU as a tuple. lower, upper : bool, optional If ``lower`` or ``upper`` are `True`, the field names in the returned data object will be converted to lower or upper case, respectively. view : ndarray, optional When given, the data will be returned wrapped in the given ndarray subclass by calling:: data.view(view) kwargs Any additional keyword arguments to be passed to `astropy.io.fits.open`. Returns ------- array : array, record array or groups data object Type depends on the type of the extension being referenced. If the optional keyword ``header`` is set to `True`, this function will return a (``data``, ``header``) tuple. """ mode, closed = _get_file_mode(filename) hdulist, extidx = _getext(filename, mode, *args, **kwargs) try: hdu = hdulist[extidx] data = hdu.data if data is None and extidx == 0: try: hdu = hdulist[1] data = hdu.data except IndexError: raise IndexError('No data in this HDU.') if data is None: raise IndexError('No data in this HDU.') if header: hdr = hdu.header finally: hdulist.close(closed=closed) # Change case of names if requested trans = None if lower: trans = operator.methodcaller('lower') elif upper: trans = operator.methodcaller('upper') if trans: if data.dtype.names is None: # this data does not have fields return if data.dtype.descr[0][0] == '': # this data does not have fields return data.dtype.names = [trans(n) for n in data.dtype.names] # allow different views into the underlying ndarray. Keep the original # view just in case there is a problem if isinstance(view, type) and issubclass(view, np.ndarray): data = data.view(view) if header: return data, hdr else: return data def getval(filename, keyword, *args, **kwargs): """ Get a keyword's value from a header in a FITS file. Parameters ---------- filename : file path, file object, or file like object Name of the FITS file, or file object (if opened, mode must be one of the following rb, rb+, or ab+). keyword : str Keyword name ext, extname, extver The rest of the arguments are for extension specification. See `getdata` for explanations/examples. kwargs Any additional keyword arguments to be passed to `astropy.io.fits.open`. *Note:* This function automatically specifies ``do_not_scale_image_data = True`` when opening the file so that values can be retrieved from the unmodified header. Returns ------- keyword value : str, int, or float """ if 'do_not_scale_image_data' not in kwargs: kwargs['do_not_scale_image_data'] = True hdr = getheader(filename, *args, **kwargs) return hdr[keyword] def setval(filename, keyword, *args, value=None, comment=None, before=None, after=None, savecomment=False, **kwargs): """ Set a keyword's value from a header in a FITS file. If the keyword already exists, it's value/comment will be updated. If it does not exist, a new card will be created and it will be placed before or after the specified location. If no ``before`` or ``after`` is specified, it will be appended at the end. When updating more than one keyword in a file, this convenience function is a much less efficient approach compared with opening the file for update, modifying the header, and closing the file. Parameters ---------- filename : file path, file object, or file like object Name of the FITS file, or file object If opened, mode must be update (rb+). An opened file object or `~gzip.GzipFile` object will be closed upon return. keyword : str Keyword name value : str, int, float, optional Keyword value (default: `None`, meaning don't modify) comment : str, optional Keyword comment, (default: `None`, meaning don't modify) before : str, int, optional Name of the keyword, or index of the card before which the new card will be placed. The argument ``before`` takes precedence over ``after`` if both are specified (default: `None`). after : str, int, optional Name of the keyword, or index of the card after which the new card will be placed. (default: `None`). savecomment : bool, optional When `True`, preserve the current comment for an existing keyword. The argument ``savecomment`` takes precedence over ``comment`` if both specified. If ``comment`` is not specified then the current comment will automatically be preserved (default: `False`). ext, extname, extver The rest of the arguments are for extension specification. See `getdata` for explanations/examples. kwargs Any additional keyword arguments to be passed to `astropy.io.fits.open`. *Note:* This function automatically specifies ``do_not_scale_image_data = True`` when opening the file so that values can be retrieved from the unmodified header. """ if 'do_not_scale_image_data' not in kwargs: kwargs['do_not_scale_image_data'] = True closed = fileobj_closed(filename) hdulist, extidx = _getext(filename, 'update', *args, **kwargs) try: if keyword in hdulist[extidx].header and savecomment: comment = None hdulist[extidx].header.set(keyword, value, comment, before, after) finally: hdulist.close(closed=closed) def delval(filename, keyword, *args, **kwargs): """ Delete all instances of keyword from a header in a FITS file. Parameters ---------- filename : file path, file object, or file like object Name of the FITS file, or file object If opened, mode must be update (rb+). An opened file object or `~gzip.GzipFile` object will be closed upon return. keyword : str, int Keyword name or index ext, extname, extver The rest of the arguments are for extension specification. See `getdata` for explanations/examples. kwargs Any additional keyword arguments to be passed to `astropy.io.fits.open`. *Note:* This function automatically specifies ``do_not_scale_image_data = True`` when opening the file so that values can be retrieved from the unmodified header. """ if 'do_not_scale_image_data' not in kwargs: kwargs['do_not_scale_image_data'] = True closed = fileobj_closed(filename) hdulist, extidx = _getext(filename, 'update', *args, **kwargs) try: del hdulist[extidx].header[keyword] finally: hdulist.close(closed=closed) @deprecated_renamed_argument('clobber', 'overwrite', '2.0') def writeto(filename, data, header=None, output_verify='exception', overwrite=False, checksum=False): """ Create a new FITS file using the supplied data/header. Parameters ---------- filename : file path, file object, or file like object File to write to. If opened, must be opened in a writeable binary mode such as 'wb' or 'ab+'. data : array, record array, or groups data object data to write to the new file header : `Header` object, optional the header associated with ``data``. If `None`, a header of the appropriate type is created for the supplied data. This argument is optional. output_verify : str Output verification option. Must be one of ``"fix"``, ``"silentfix"``, ``"ignore"``, ``"warn"``, or ``"exception"``. May also be any combination of ``"fix"`` or ``"silentfix"`` with ``"+ignore"``, ``+warn``, or ``+exception" (e.g. ``"fix+warn"``). See :ref:`verify` for more info. overwrite : bool, optional If ``True``, overwrite the output file if it exists. Raises an ``OSError`` if ``False`` and the output file exists. Default is ``False``. .. versionchanged:: 1.3 ``overwrite`` replaces the deprecated ``clobber`` argument. checksum : bool, optional If `True`, adds both ``DATASUM`` and ``CHECKSUM`` cards to the headers of all HDU's written to the file. """ hdu = _makehdu(data, header) if hdu.is_image and not isinstance(hdu, PrimaryHDU): hdu = PrimaryHDU(data, header=header) hdu.writeto(filename, overwrite=overwrite, output_verify=output_verify, checksum=checksum) def table_to_hdu(table, character_as_bytes=False): """ Convert an `~astropy.table.Table` object to a FITS `~astropy.io.fits.BinTableHDU`. Parameters ---------- table : astropy.table.Table The table to convert. character_as_bytes : bool Whether to return bytes for string columns when accessed from the HDU. By default this is `False` and (unicode) strings are returned, but for large tables this may use up a lot of memory. Returns ------- table_hdu : `~astropy.io.fits.BinTableHDU` The FITS binary table HDU. """ # Avoid circular imports from .connect import is_column_keyword, REMOVE_KEYWORDS from .column import python_to_tdisp # Header to store Time related metadata hdr = None # Not all tables with mixin columns are supported if table.has_mixin_columns: # Import is done here, in order to avoid it at build time as erfa is not # yet available then. from astropy.table.column import BaseColumn from astropy.time import Time from astropy.units import Quantity from .fitstime import time_to_fits # Only those columns which are instances of BaseColumn, Quantity or Time can # be written unsupported_cols = table.columns.not_isinstance((BaseColumn, Quantity, Time)) if unsupported_cols: unsupported_names = [col.info.name for col in unsupported_cols] raise ValueError('cannot write table with mixin column(s) {0}' .format(unsupported_names)) time_cols = table.columns.isinstance(Time) if time_cols: table, hdr = time_to_fits(table) # Create a new HDU object if table.masked: # float column's default mask value needs to be Nan for column in table.columns.values(): fill_value = column.get_fill_value() if column.dtype.kind == 'f' and np.allclose(fill_value, 1e20): column.set_fill_value(np.nan) # TODO: it might be better to construct the FITS table directly from # the Table columns, rather than go via a structured array. table_hdu = BinTableHDU.from_columns(np.array(table.filled()), header=hdr, character_as_bytes=True) for col in table_hdu.columns: # Binary FITS tables support TNULL *only* for integer data columns # TODO: Determine a schema for handling non-integer masked columns # in FITS (if at all possible) int_formats = ('B', 'I', 'J', 'K') if not (col.format in int_formats or col.format.p_format in int_formats): continue # The astype is necessary because if the string column is less # than one character, the fill value will be N/A by default which # is too long, and so no values will get masked. fill_value = table[col.name].get_fill_value() col.null = fill_value.astype(table[col.name].dtype) else: table_hdu = BinTableHDU.from_columns(np.array(table.filled()), header=hdr, character_as_bytes=character_as_bytes) # Set units and format display for output HDU for col in table_hdu.columns: if table[col.name].info.format is not None: # check for boolean types, special format case logical = table[col.name].info.dtype == bool tdisp_format = python_to_tdisp(table[col.name].info.format, logical_dtype=logical) if tdisp_format is not None: col.disp = tdisp_format unit = table[col.name].unit if unit is not None: # Local imports to avoid importing units when it is not required, # e.g. for command-line scripts from astropy.units import Unit from astropy.units.format.fits import UnitScaleError try: col.unit = unit.to_string(format='fits') except UnitScaleError: scale = unit.scale raise UnitScaleError( "The column '{0}' could not be stored in FITS format " "because it has a scale '({1})' that " "is not recognized by the FITS standard. Either scale " "the data or change the units.".format(col.name, str(scale))) except ValueError: warnings.warn( "The unit '{0}' could not be saved to FITS format".format( unit.to_string()), AstropyUserWarning) # Try creating a Unit to issue a warning if the unit is not FITS compliant Unit(col.unit, format='fits', parse_strict='warn') # Column-specific override keywords for coordinate columns coord_meta = table.meta.pop('__coordinate_columns__', {}) for col_name, col_info in coord_meta.items(): col = table_hdu.columns[col_name] # Set the column coordinate attributes from data saved earlier. # Note: have to set all three, even if we have no data. for attr in 'coord_type', 'coord_unit', 'time_ref_pos': setattr(col, attr, col_info.get(attr, None)) for key, value in table.meta.items(): if is_column_keyword(key.upper()) or key.upper() in REMOVE_KEYWORDS: warnings.warn( "Meta-data keyword {0} will be ignored since it conflicts " "with a FITS reserved keyword".format(key), AstropyUserWarning) # Convert to FITS format if key == 'comments': key = 'comment' if isinstance(value, list): for item in value: try: table_hdu.header.append((key, item)) except ValueError: warnings.warn( "Attribute `{0}` of type {1} cannot be added to " "FITS Header - skipping".format(key, type(value)), AstropyUserWarning) else: try: table_hdu.header[key] = value except ValueError: warnings.warn( "Attribute `{0}` of type {1} cannot be added to FITS " "Header - skipping".format(key, type(value)), AstropyUserWarning) return table_hdu def append(filename, data, header=None, checksum=False, verify=True, **kwargs): """ Append the header/data to FITS file if filename exists, create if not. If only ``data`` is supplied, a minimal header is created. Parameters ---------- filename : file path, file object, or file like object File to write to. If opened, must be opened for update (rb+) unless it is a new file, then it must be opened for append (ab+). A file or `~gzip.GzipFile` object opened for update will be closed after return. data : array, table, or group data object the new data used for appending header : `Header` object, optional The header associated with ``data``. If `None`, an appropriate header will be created for the data object supplied. checksum : bool, optional When `True` adds both ``DATASUM`` and ``CHECKSUM`` cards to the header of the HDU when written to the file. verify : bool, optional When `True`, the existing FITS file will be read in to verify it for correctness before appending. When `False`, content is simply appended to the end of the file. Setting ``verify`` to `False` can be much faster. kwargs Additional arguments are passed to: - `~astropy.io.fits.writeto` if the file does not exist or is empty. In this case ``output_verify`` is the only possible argument. - `~astropy.io.fits.open` if ``verify`` is True or if ``filename`` is a file object. - Otherwise no additional arguments can be used. """ name, closed, noexist_or_empty = _stat_filename_or_fileobj(filename) if noexist_or_empty: # # The input file or file like object either doesn't exits or is # empty. Use the writeto convenience function to write the # output to the empty object. # writeto(filename, data, header, checksum=checksum, **kwargs) else: hdu = _makehdu(data, header) if isinstance(hdu, PrimaryHDU): hdu = ImageHDU(data, header) if verify or not closed: f = fitsopen(filename, mode='append', **kwargs) try: f.append(hdu) # Set a flag in the HDU so that only this HDU gets a checksum # when writing the file. hdu._output_checksum = checksum finally: f.close(closed=closed) else: f = _File(filename, mode='append') try: hdu._output_checksum = checksum hdu._writeto(f) finally: f.close() def update(filename, data, *args, **kwargs): """ Update the specified extension with the input data/header. Parameters ---------- filename : file path, file object, or file like object File to update. If opened, mode must be update (rb+). An opened file object or `~gzip.GzipFile` object will be closed upon return. data : array, table, or group data object the new data used for updating header : `Header` object, optional The header associated with ``data``. If `None`, an appropriate header will be created for the data object supplied. ext, extname, extver The rest of the arguments are flexible: the 3rd argument can be the header associated with the data. If the 3rd argument is not a `Header`, it (and other positional arguments) are assumed to be the extension specification(s). Header and extension specs can also be keyword arguments. For example:: update(file, dat, hdr, 'sci') # update the 'sci' extension update(file, dat, 3) # update the 3rd extension update(file, dat, hdr, 3) # update the 3rd extension update(file, dat, 'sci', 2) # update the 2nd SCI extension update(file, dat, 3, header=hdr) # update the 3rd extension update(file, dat, header=hdr, ext=5) # update the 5th extension kwargs Any additional keyword arguments to be passed to `astropy.io.fits.open`. """ # The arguments to this function are a bit trickier to deal with than others # in this module, since the documentation has promised that the header # argument can be an optional positional argument. if args and isinstance(args[0], Header): header = args[0] args = args[1:] else: header = None # The header can also be a keyword argument--if both are provided the # keyword takes precedence header = kwargs.pop('header', header) new_hdu = _makehdu(data, header) closed = fileobj_closed(filename) hdulist, _ext = _getext(filename, 'update', *args, **kwargs) try: hdulist[_ext] = new_hdu finally: hdulist.close(closed=closed) def info(filename, output=None, **kwargs): """ Print the summary information on a FITS file. This includes the name, type, length of header, data shape and type for each extension. Parameters ---------- filename : file path, file object, or file like object FITS file to obtain info from. If opened, mode must be one of the following: rb, rb+, or ab+ (i.e. the file must be readable). output : file, bool, optional A file-like object to write the output to. If ``False``, does not output to a file and instead returns a list of tuples representing the HDU info. Writes to ``sys.stdout`` by default. kwargs Any additional keyword arguments to be passed to `astropy.io.fits.open`. *Note:* This function sets ``ignore_missing_end=True`` by default. """ mode, closed = _get_file_mode(filename, default='readonly') # Set the default value for the ignore_missing_end parameter if 'ignore_missing_end' not in kwargs: kwargs['ignore_missing_end'] = True f = fitsopen(filename, mode=mode, **kwargs) try: ret = f.info(output=output) finally: if closed: f.close() return ret def printdiff(inputa, inputb, *args, **kwargs): """ Compare two parts of a FITS file, including entire FITS files, FITS `HDUList` objects and FITS ``HDU`` objects. Parameters ---------- inputa : str, `HDUList` object, or ``HDU`` object The filename of a FITS file, `HDUList`, or ``HDU`` object to compare to ``inputb``. inputb : str, `HDUList` object, or ``HDU`` object The filename of a FITS file, `HDUList`, or ``HDU`` object to compare to ``inputa``. ext, extname, extver Additional positional arguments are for extension specification if your inputs are string filenames (will not work if ``inputa`` and ``inputb`` are ``HDU`` objects or `HDUList` objects). They are flexible and are best illustrated by examples. In addition to using these arguments positionally you can directly call the keyword parameters ``ext``, ``extname``. By extension number:: printdiff('inA.fits', 'inB.fits', 0) # the primary HDU printdiff('inA.fits', 'inB.fits', 2) # the second extension printdiff('inA.fits', 'inB.fits', ext=2) # the second extension By name, i.e., ``EXTNAME`` value (if unique). ``EXTNAME`` values are not case sensitive: printdiff('inA.fits', 'inB.fits', 'sci') printdiff('inA.fits', 'inB.fits', extname='sci') # equivalent By combination of ``EXTNAME`` and ``EXTVER`` as separate arguments or as a tuple:: printdiff('inA.fits', 'inB.fits', 'sci', 2) # EXTNAME='SCI' # & EXTVER=2 printdiff('inA.fits', 'inB.fits', extname='sci', extver=2) # equivalent printdiff('inA.fits', 'inB.fits', ('sci', 2)) # equivalent Ambiguous or conflicting specifications will raise an exception:: printdiff('inA.fits', 'inB.fits', ext=('sci', 1), extname='err', extver=2) kwargs Any additional keyword arguments to be passed to `~astropy.io.fits.FITSDiff`. Notes ----- The primary use for the `printdiff` function is to allow quick print out of a FITS difference report and will write to ``sys.stdout``. To save the diff report to a file please use `~astropy.io.fits.FITSDiff` directly. """ # Pop extension keywords extension = {key: kwargs.pop(key) for key in ['ext', 'extname', 'extver'] if key in kwargs} has_extensions = args or extension if isinstance(inputa, str) and has_extensions: # Use handy _getext to interpret any ext keywords, but # will need to close a if fails modea, closeda = _get_file_mode(inputa) modeb, closedb = _get_file_mode(inputb) hdulista, extidxa = _getext(inputa, modea, *args, **extension) # Have to close a if b doesn't make it try: hdulistb, extidxb = _getext(inputb, modeb, *args, **extension) except Exception: hdulista.close(closed=closeda) raise try: hdua = hdulista[extidxa] hdub = hdulistb[extidxb] # See below print for note print(HDUDiff(hdua, hdub, **kwargs).report()) finally: hdulista.close(closed=closeda) hdulistb.close(closed=closedb) # If input is not a string, can feed HDU objects or HDUList directly, # but can't currently handle extensions elif isinstance(inputa, _ValidHDU) and has_extensions: raise ValueError("Cannot use extension keywords when providing an " "HDU object.") elif isinstance(inputa, _ValidHDU) and not has_extensions: print(HDUDiff(inputa, inputb, **kwargs).report()) elif isinstance(inputa, HDUList) and has_extensions: raise NotImplementedError("Extension specification with HDUList " "objects not implemented.") # This function is EXCLUSIVELY for printing the diff report to screen # in a one-liner call, hence the use of print instead of logging else: print(FITSDiff(inputa, inputb, **kwargs).report()) @deprecated_renamed_argument('clobber', 'overwrite', '2.0') def tabledump(filename, datafile=None, cdfile=None, hfile=None, ext=1, overwrite=False): """ Dump a table HDU to a file in ASCII format. The table may be dumped in three separate files, one containing column definitions, one containing header parameters, and one for table data. Parameters ---------- filename : file path, file object or file-like object Input fits file. datafile : file path, file object or file-like object, optional Output data file. The default is the root name of the input fits file appended with an underscore, followed by the extension number (ext), followed by the extension ``.txt``. cdfile : file path, file object or file-like object, optional Output column definitions file. The default is `None`, no column definitions output is produced. hfile : file path, file object or file-like object, optional Output header parameters file. The default is `None`, no header parameters output is produced. ext : int The number of the extension containing the table HDU to be dumped. overwrite : bool, optional If ``True``, overwrite the output file if it exists. Raises an ``OSError`` if ``False`` and the output file exists. Default is ``False``. .. versionchanged:: 1.3 ``overwrite`` replaces the deprecated ``clobber`` argument. Notes ----- The primary use for the `tabledump` function is to allow editing in a standard text editor of the table data and parameters. The `tableload` function can be used to reassemble the table from the three ASCII files. """ # allow file object to already be opened in any of the valid modes # and leave the file in the same state (opened or closed) as when # the function was called mode, closed = _get_file_mode(filename, default='readonly') f = fitsopen(filename, mode=mode) # Create the default data file name if one was not provided try: if not datafile: root, tail = os.path.splitext(f._file.name) datafile = root + '_' + repr(ext) + '.txt' # Dump the data from the HDU to the files f[ext].dump(datafile, cdfile, hfile, overwrite) finally: if closed: f.close() if isinstance(tabledump.__doc__, str): tabledump.__doc__ += BinTableHDU._tdump_file_format.replace('\n', '\n ') def tableload(datafile, cdfile, hfile=None): """ Create a table from the input ASCII files. The input is from up to three separate files, one containing column definitions, one containing header parameters, and one containing column data. The header parameters file is not required. When the header parameters file is absent a minimal header is constructed. Parameters ---------- datafile : file path, file object or file-like object Input data file containing the table data in ASCII format. cdfile : file path, file object or file-like object Input column definition file containing the names, formats, display formats, physical units, multidimensional array dimensions, undefined values, scale factors, and offsets associated with the columns in the table. hfile : file path, file object or file-like object, optional Input parameter definition file containing the header parameter definitions to be associated with the table. If `None`, a minimal header is constructed. Notes ----- The primary use for the `tableload` function is to allow the input of ASCII data that was edited in a standard text editor of the table data and parameters. The tabledump function can be used to create the initial ASCII files. """ return BinTableHDU.load(datafile, cdfile, hfile, replace=True) if isinstance(tableload.__doc__, str): tableload.__doc__ += BinTableHDU._tdump_file_format.replace('\n', '\n ') def _getext(filename, mode, *args, ext=None, extname=None, extver=None, **kwargs): """ Open the input file, return the `HDUList` and the extension. This supports several different styles of extension selection. See the :func:`getdata()` documentation for the different possibilities. """ err_msg = ('Redundant/conflicting extension arguments(s): {}'.format( {'args': args, 'ext': ext, 'extname': extname, 'extver': extver})) # This code would be much simpler if just one way of specifying an # extension were picked. But now we need to support all possible ways for # the time being. if len(args) == 1: # Must be either an extension number, an extension name, or an # (extname, extver) tuple if _is_int(args[0]) or (isinstance(ext, tuple) and len(ext) == 2): if ext is not None or extname is not None or extver is not None: raise TypeError(err_msg) ext = args[0] elif isinstance(args[0], str): # The first arg is an extension name; it could still be valid # to provide an extver kwarg if ext is not None or extname is not None: raise TypeError(err_msg) extname = args[0] else: # Take whatever we have as the ext argument; we'll validate it # below ext = args[0] elif len(args) == 2: # Must be an extname and extver if ext is not None or extname is not None or extver is not None: raise TypeError(err_msg) extname = args[0] extver = args[1] elif len(args) > 2: raise TypeError('Too many positional arguments.') if (ext is not None and not (_is_int(ext) or (isinstance(ext, tuple) and len(ext) == 2 and isinstance(ext[0], str) and _is_int(ext[1])))): raise ValueError( 'The ext keyword must be either an extension number ' '(zero-indexed) or a (extname, extver) tuple.') if extname is not None and not isinstance(extname, str): raise ValueError('The extname argument must be a string.') if extver is not None and not _is_int(extver): raise ValueError('The extver argument must be an integer.') if ext is None and extname is None and extver is None: ext = 0 elif ext is not None and (extname is not None or extver is not None): raise TypeError(err_msg) elif extname: if extver: ext = (extname, extver) else: ext = (extname, 1) elif extver and extname is None: raise TypeError('extver alone cannot specify an extension.') hdulist = fitsopen(filename, mode=mode, **kwargs) return hdulist, ext def _makehdu(data, header): if header is None: header = Header() hdu = _BaseHDU(data, header) if hdu.__class__ in (_BaseHDU, _ValidHDU): # The HDU type was unrecognized, possibly due to a # nonexistent/incomplete header if ((isinstance(data, np.ndarray) and data.dtype.fields is not None) or isinstance(data, np.recarray)): hdu = BinTableHDU(data, header=header) elif isinstance(data, np.ndarray): hdu = ImageHDU(data, header=header) else: raise KeyError('Data must be a numpy array.') return hdu def _stat_filename_or_fileobj(filename): closed = fileobj_closed(filename) name = fileobj_name(filename) or '' try: loc = filename.tell() except AttributeError: loc = 0 noexist_or_empty = ((name and (not os.path.exists(name) or (os.path.getsize(name) == 0))) or (not name and loc == 0)) return name, closed, noexist_or_empty def _get_file_mode(filename, default='readonly'): """ Allow file object to already be opened in any of the valid modes and and leave the file in the same state (opened or closed) as when the function was called. """ mode = default closed = fileobj_closed(filename) fmode = fileobj_mode(filename) if fmode is not None: mode = FILE_MODES.get(fmode) if mode is None: raise OSError( "File mode of the input file object ({!r}) cannot be used to " "read/write FITS files.".format(fmode)) return mode, closed
84966053fae3e007c4c48a4d32aac54facbc325bf4441541d9e4fee4b46e77da
# Licensed under a 3-clause BSD style license - see PYFITS.rst import os from distutils.core import Extension from glob import glob from astropy_helpers import setup_helpers from astropy_helpers.distutils_helpers import get_distutils_build_option def _get_compression_extension(): # 'numpy' will be replaced with the proper path to the numpy includes cfg = setup_helpers.DistutilsExtensionArgs() cfg['include_dirs'].append('numpy') cfg['sources'].append(os.path.join(os.path.dirname(__file__), 'src', 'compressionmodule.c')) if not setup_helpers.use_system_library('cfitsio'): if setup_helpers.get_compiler_option() == 'msvc': # These come from the CFITSIO vcc makefile, except the last # which ensures on windows we do not include unistd.h (in regular # compilation of cfitsio, an empty file would be generated) cfg['extra_compile_args'].extend( ['/D', '"WIN32"', '/D', '"_WINDOWS"', '/D', '"_MBCS"', '/D', '"_USRDLL"', '/D', '"_CRT_SECURE_NO_DEPRECATE"', '/D', '"FF_NO_UNISTD_H"']) else: cfg['extra_compile_args'].extend([ '-Wno-declaration-after-statement' ]) if not get_distutils_build_option('debug'): # these switches are to silence warnings from compiling CFITSIO # For full silencing, some are added that only are used in # later versions of gcc (versions approximate; see #6474) cfg['extra_compile_args'].extend([ '-Wno-strict-prototypes', '-Wno-unused', '-Wno-uninitialized', '-Wno-unused-result', # gcc >~4.8 '-Wno-misleading-indentation', # gcc >~7.2 '-Wno-format-overflow', # gcc >~7.2 ]) cfitsio_lib_path = os.path.join('cextern', 'cfitsio', 'lib') cfitsio_zlib_path = os.path.join('cextern', 'cfitsio', 'zlib') cfitsio_files = glob(os.path.join(cfitsio_lib_path, '*.c')) cfitsio_zlib_files = glob(os.path.join(cfitsio_zlib_path, '*.c')) cfg['include_dirs'].append(cfitsio_lib_path) cfg['include_dirs'].append(cfitsio_zlib_path) cfg['sources'].extend(cfitsio_files) cfg['sources'].extend(cfitsio_zlib_files) else: cfg.update(setup_helpers.pkg_config(['cfitsio'], ['cfitsio'])) return Extension('astropy.io.fits.compression', **cfg) def get_extensions(): return [_get_compression_extension()] def get_external_libraries(): return ['cfitsio']
8005b0253f9f3b0c34b34f5e3e62b46b27d9062392d895b4b8100a3368ad9519
# Licensed under a 3-clause BSD style license - see PYFITS.rst import gzip import itertools import io import mmap import operator import os import platform import signal import sys import tempfile import textwrap import threading import warnings import weakref from contextlib import contextmanager, suppress from functools import wraps from astropy.utils import data from distutils.version import LooseVersion import numpy as np from astropy.utils.exceptions import AstropyUserWarning cmp = lambda a, b: (a > b) - (a < b) all_integer_types = (int, np.integer) class NotifierMixin: """ Mixin class that provides services by which objects can register listeners to changes on that object. All methods provided by this class are underscored, since this is intended for internal use to communicate between classes in a generic way, and is not machinery that should be exposed to users of the classes involved. Use the ``_add_listener`` method to register a listener on an instance of the notifier. This registers the listener with a weak reference, so if no other references to the listener exist it is automatically dropped from the list and does not need to be manually removed. Call the ``_notify`` method on the notifier to update all listeners upon changes. ``_notify('change_type', *args, **kwargs)`` results in calling ``listener._update_change_type(*args, **kwargs)`` on all listeners subscribed to that notifier. If a particular listener does not have the appropriate update method it is ignored. Examples -------- >>> class Widget(NotifierMixin): ... state = 1 ... def __init__(self, name): ... self.name = name ... def update_state(self): ... self.state += 1 ... self._notify('widget_state_changed', self) ... >>> class WidgetListener: ... def _update_widget_state_changed(self, widget): ... print('Widget {0} changed state to {1}'.format( ... widget.name, widget.state)) ... >>> widget = Widget('fred') >>> listener = WidgetListener() >>> widget._add_listener(listener) >>> widget.update_state() Widget fred changed state to 2 """ _listeners = None def _add_listener(self, listener): """ Add an object to the list of listeners to notify of changes to this object. This adds a weakref to the list of listeners that is removed from the listeners list when the listener has no other references to it. """ if self._listeners is None: self._listeners = weakref.WeakValueDictionary() self._listeners[id(listener)] = listener def _remove_listener(self, listener): """ Removes the specified listener from the listeners list. This relies on object identity (i.e. the ``is`` operator). """ if self._listeners is None: return with suppress(KeyError): del self._listeners[id(listener)] def _notify(self, notification, *args, **kwargs): """ Notify all listeners of some particular state change by calling their ``_update_<notification>`` method with the given ``*args`` and ``**kwargs``. The notification does not by default include the object that actually changed (``self``), but it certainly may if required. """ if self._listeners is None: return method_name = '_update_{0}'.format(notification) for listener in self._listeners.valuerefs(): # Use valuerefs instead of itervaluerefs; see # https://github.com/astropy/astropy/issues/4015 listener = listener() # dereference weakref if listener is None: continue if hasattr(listener, method_name): method = getattr(listener, method_name) if callable(method): method(*args, **kwargs) def __getstate__(self): """ Exclude listeners when saving the listener's state, since they may be ephemeral. """ # TODO: This hasn't come up often, but if anyone needs to pickle HDU # objects it will be necessary when HDU objects' states are restored to # re-register themselves as listeners on their new column instances. try: state = super().__getstate__() except AttributeError: # Chances are the super object doesn't have a getstate state = self.__dict__.copy() state['_listeners'] = None return state def first(iterable): """ Returns the first item returned by iterating over an iterable object. Example: >>> a = [1, 2, 3] >>> first(a) 1 """ return next(iter(iterable)) def itersubclasses(cls, _seen=None): """ Generator over all subclasses of a given class, in depth first order. >>> class A: pass >>> class B(A): pass >>> class C(A): pass >>> class D(B,C): pass >>> class E(D): pass >>> >>> for cls in itersubclasses(A): ... print(cls.__name__) B D E C >>> # get ALL classes currently defined >>> [cls.__name__ for cls in itersubclasses(object)] [...'tuple', ...'type', ...] From http://code.activestate.com/recipes/576949/ """ if _seen is None: _seen = set() try: subs = cls.__subclasses__() except TypeError: # fails only when cls is type subs = cls.__subclasses__(cls) for sub in sorted(subs, key=operator.attrgetter('__name__')): if sub not in _seen: _seen.add(sub) yield sub for sub in itersubclasses(sub, _seen): yield sub def ignore_sigint(func): """ This decorator registers a custom SIGINT handler to catch and ignore SIGINT until the wrapped function is completed. """ @wraps(func) def wrapped(*args, **kwargs): # Get the name of the current thread and determine if this is a single # threaded application curr_thread = threading.currentThread() single_thread = (threading.activeCount() == 1 and curr_thread.getName() == 'MainThread') class SigintHandler: def __init__(self): self.sigint_received = False def __call__(self, signum, frame): warnings.warn('KeyboardInterrupt ignored until {} is ' 'complete!'.format(func.__name__), AstropyUserWarning) self.sigint_received = True sigint_handler = SigintHandler() # Define new signal interput handler if single_thread: # Install new handler old_handler = signal.signal(signal.SIGINT, sigint_handler) try: func(*args, **kwargs) finally: if single_thread: if old_handler is not None: signal.signal(signal.SIGINT, old_handler) else: signal.signal(signal.SIGINT, signal.SIG_DFL) if sigint_handler.sigint_received: raise KeyboardInterrupt return wrapped def pairwise(iterable): """Return the items of an iterable paired with its next item. Ex: s -> (s0,s1), (s1,s2), (s2,s3), .... """ a, b = itertools.tee(iterable) for _ in b: # Just a little trick to advance b without having to catch # StopIter if b happens to be empty break return zip(a, b) def encode_ascii(s): if isinstance(s, str): return s.encode('ascii') elif (isinstance(s, np.ndarray) and issubclass(s.dtype.type, np.str_)): ns = np.char.encode(s, 'ascii').view(type(s)) if ns.dtype.itemsize != s.dtype.itemsize / 4: ns = ns.astype((np.bytes_, s.dtype.itemsize / 4)) return ns elif (isinstance(s, np.ndarray) and not issubclass(s.dtype.type, np.bytes_)): raise TypeError('string operation on non-string array') return s def decode_ascii(s): if isinstance(s, bytes): try: return s.decode('ascii') except UnicodeDecodeError: warnings.warn('non-ASCII characters are present in the FITS ' 'file header and have been replaced by "?" ' 'characters', AstropyUserWarning) s = s.decode('ascii', errors='replace') return s.replace(u'\ufffd', '?') elif (isinstance(s, np.ndarray) and issubclass(s.dtype.type, np.bytes_)): # np.char.encode/decode annoyingly don't preserve the type of the # array, hence the view() call # It also doesn't necessarily preserve widths of the strings, # hence the astype() if s.size == 0: # Numpy apparently also has a bug that if a string array is # empty calling np.char.decode on it returns an empty float64 # array wth dt = s.dtype.str.replace('S', 'U') ns = np.array([], dtype=dt).view(type(s)) else: ns = np.char.decode(s, 'ascii').view(type(s)) if ns.dtype.itemsize / 4 != s.dtype.itemsize: ns = ns.astype((np.str_, s.dtype.itemsize)) return ns elif (isinstance(s, np.ndarray) and not issubclass(s.dtype.type, np.str_)): # Don't silently pass through on non-string arrays; we don't want # to hide errors where things that are not stringy are attempting # to be decoded raise TypeError('string operation on non-string array') return s def isreadable(f): """ Returns True if the file-like object can be read from. This is a common- sense approximation of io.IOBase.readable. """ if hasattr(f, 'readable'): return f.readable() if hasattr(f, 'closed') and f.closed: # This mimics the behavior of io.IOBase.readable raise ValueError('I/O operation on closed file') if not hasattr(f, 'read'): return False if hasattr(f, 'mode') and not any(c in f.mode for c in 'r+'): return False # Not closed, has a 'read()' method, and either has no known mode or a # readable mode--should be good enough to assume 'readable' return True def iswritable(f): """ Returns True if the file-like object can be written to. This is a common- sense approximation of io.IOBase.writable. """ if hasattr(f, 'writable'): return f.writable() if hasattr(f, 'closed') and f.closed: # This mimics the behavior of io.IOBase.writable raise ValueError('I/O operation on closed file') if not hasattr(f, 'write'): return False if hasattr(f, 'mode') and not any(c in f.mode for c in 'wa+'): return False # Note closed, has a 'write()' method, and either has no known mode or a # mode that supports writing--should be good enough to assume 'writable' return True def isfile(f): """ Returns True if the given object represents an OS-level file (that is, ``isinstance(f, file)``). On Python 3 this also returns True if the given object is higher level wrapper on top of a FileIO object, such as a TextIOWrapper. """ if isinstance(f, io.FileIO): return True elif hasattr(f, 'buffer'): return isfile(f.buffer) elif hasattr(f, 'raw'): return isfile(f.raw) return False def fileobj_open(filename, mode): """ A wrapper around the `open()` builtin. This exists because `open()` returns an `io.BufferedReader` by default. This is bad, because `io.BufferedReader` doesn't support random access, which we need in some cases. We must call open with buffering=0 to get a raw random-access file reader. """ return open(filename, mode, buffering=0) def fileobj_name(f): """ Returns the 'name' of file-like object f, if it has anything that could be called its name. Otherwise f's class or type is returned. If f is a string f itself is returned. """ if isinstance(f, str): return f elif isinstance(f, gzip.GzipFile): # The .name attribute on GzipFiles does not always represent the name # of the file being read/written--it can also represent the original # name of the file being compressed # See the documentation at # https://docs.python.org/3/library/gzip.html#gzip.GzipFile # As such, for gzip files only return the name of the underlying # fileobj, if it exists return fileobj_name(f.fileobj) elif hasattr(f, 'name'): return f.name elif hasattr(f, 'filename'): return f.filename elif hasattr(f, '__class__'): return str(f.__class__) else: return str(type(f)) def fileobj_closed(f): """ Returns True if the given file-like object is closed or if f is a string (and assumed to be a pathname). Returns False for all other types of objects, under the assumption that they are file-like objects with no sense of a 'closed' state. """ if isinstance(f, str): return True if hasattr(f, 'closed'): return f.closed elif hasattr(f, 'fileobj') and hasattr(f.fileobj, 'closed'): return f.fileobj.closed elif hasattr(f, 'fp') and hasattr(f.fp, 'closed'): return f.fp.closed else: return False def fileobj_mode(f): """ Returns the 'mode' string of a file-like object if such a thing exists. Otherwise returns None. """ # Go from most to least specific--for example gzip objects have a 'mode' # attribute, but it's not analogous to the file.mode attribute # gzip.GzipFile -like if hasattr(f, 'fileobj') and hasattr(f.fileobj, 'mode'): fileobj = f.fileobj # astropy.io.fits._File -like, doesn't need additional checks because it's # already validated elif hasattr(f, 'fileobj_mode'): return f.fileobj_mode # PIL-Image -like investigate the fp (filebuffer) elif hasattr(f, 'fp') and hasattr(f.fp, 'mode'): fileobj = f.fp # FILEIO -like (normal open(...)), keep as is. elif hasattr(f, 'mode'): fileobj = f # Doesn't look like a file-like object, for example strings, urls or paths. else: return None return _fileobj_normalize_mode(fileobj) def _fileobj_normalize_mode(f): """Takes care of some corner cases in Python where the mode string is either oddly formatted or does not truly represent the file mode. """ mode = f.mode # Special case: Gzip modes: if isinstance(f, gzip.GzipFile): # GzipFiles can be either readonly or writeonly if mode == gzip.READ: return 'rb' elif mode == gzip.WRITE: return 'wb' else: return None # This shouldn't happen? # Sometimes Python can produce modes like 'r+b' which will be normalized # here to 'rb+' if '+' in mode: mode = mode.replace('+', '') mode += '+' return mode def fileobj_is_binary(f): """ Returns True if the give file or file-like object has a file open in binary mode. When in doubt, returns True by default. """ # This is kind of a hack for this to work correctly with _File objects, # which, for the time being, are *always* binary if hasattr(f, 'binary'): return f.binary if isinstance(f, io.TextIOBase): return False mode = fileobj_mode(f) if mode: return 'b' in mode else: return True def translate(s, table, deletechars): if deletechars: table = table.copy() for c in deletechars: table[ord(c)] = None return s.translate(table) def fill(text, width, **kwargs): """ Like :func:`textwrap.wrap` but preserves existing paragraphs which :func:`textwrap.wrap` does not otherwise handle well. Also handles section headers. """ paragraphs = text.split('\n\n') def maybe_fill(t): if all(len(l) < width for l in t.splitlines()): return t else: return textwrap.fill(t, width, **kwargs) return '\n\n'.join(maybe_fill(p) for p in paragraphs) # On MacOS X 10.8 and earlier, there is a bug that causes numpy.fromfile to # fail when reading over 2Gb of data. If we detect these versions of MacOS X, # we can instead read the data in chunks. To avoid performance penalties at # import time, we defer the setting of this global variable until the first # time it is needed. CHUNKED_FROMFILE = None def _array_from_file(infile, dtype, count): """Create a numpy array from a file or a file-like object.""" if isfile(infile): global CHUNKED_FROMFILE if CHUNKED_FROMFILE is None: if (sys.platform == 'darwin' and LooseVersion(platform.mac_ver()[0]) < LooseVersion('10.9')): CHUNKED_FROMFILE = True else: CHUNKED_FROMFILE = False if CHUNKED_FROMFILE: chunk_size = int(1024 ** 3 / dtype.itemsize) # 1Gb to be safe if count < chunk_size: return np.fromfile(infile, dtype=dtype, count=count) else: array = np.empty(count, dtype=dtype) for beg in range(0, count, chunk_size): end = min(count, beg + chunk_size) array[beg:end] = np.fromfile(infile, dtype=dtype, count=end - beg) return array else: return np.fromfile(infile, dtype=dtype, count=count) else: # treat as file-like object with "read" method; this includes gzip file # objects, because numpy.fromfile just reads the compressed bytes from # their underlying file object, instead of the decompressed bytes read_size = np.dtype(dtype).itemsize * count s = infile.read(read_size) array = np.frombuffer(s, dtype=dtype, count=count) # copy is needed because np.frombuffer returns a read-only view of the # underlying buffer array = array.copy() return array _OSX_WRITE_LIMIT = (2 ** 32) - 1 _WIN_WRITE_LIMIT = (2 ** 31) - 1 def _array_to_file(arr, outfile): """ Write a numpy array to a file or a file-like object. Parameters ---------- arr : `~numpy.ndarray` The Numpy array to write. outfile : file-like A file-like object such as a Python file object, an `io.BytesIO`, or anything else with a ``write`` method. The file object must support the buffer interface in its ``write``. If writing directly to an on-disk file this delegates directly to `ndarray.tofile`. Otherwise a slower Python implementation is used. """ if isfile(outfile) and not isinstance(outfile, io.BufferedIOBase): write = lambda a, f: a.tofile(f) else: write = _array_to_file_like # Implements a workaround for a bug deep in OSX's stdlib file writing # functions; on 64-bit OSX it is not possible to correctly write a number # of bytes greater than 2 ** 32 and divisible by 4096 (or possibly 8192-- # whatever the default blocksize for the filesystem is). # This issue should have a workaround in Numpy too, but hasn't been # implemented there yet: https://github.com/astropy/astropy/issues/839 # # Apparently Windows has its own fwrite bug: # https://github.com/numpy/numpy/issues/2256 if (sys.platform == 'darwin' and arr.nbytes >= _OSX_WRITE_LIMIT + 1 and arr.nbytes % 4096 == 0): # chunksize is a count of elements in the array, not bytes chunksize = _OSX_WRITE_LIMIT // arr.itemsize elif sys.platform.startswith('win'): chunksize = _WIN_WRITE_LIMIT // arr.itemsize else: # Just pass the whole array to the write routine return write(arr, outfile) # Write one chunk at a time for systems whose fwrite chokes on large # writes. idx = 0 arr = arr.view(np.ndarray).flatten() while idx < arr.nbytes: write(arr[idx:idx + chunksize], outfile) idx += chunksize def _array_to_file_like(arr, fileobj): """ Write a `~numpy.ndarray` to a file-like object (which is not supported by `numpy.ndarray.tofile`). """ # If the array is empty, we can simply take a shortcut and return since # there is nothing to write. if len(arr) == 0: return if arr.flags.contiguous: # It suffices to just pass the underlying buffer directly to the # fileobj's write (assuming it supports the buffer interface). If # it does not have the buffer interface, a TypeError should be returned # in which case we can fall back to the other methods. try: fileobj.write(arr.data) except TypeError: pass else: return if hasattr(np, 'nditer'): # nditer version for non-contiguous arrays for item in np.nditer(arr, order='C'): fileobj.write(item.tostring()) else: # Slower version for Numpy versions without nditer; # The problem with flatiter is it doesn't preserve the original # byteorder byteorder = arr.dtype.byteorder if ((sys.byteorder == 'little' and byteorder == '>') or (sys.byteorder == 'big' and byteorder == '<')): for item in arr.flat: fileobj.write(item.byteswap().tostring()) else: for item in arr.flat: fileobj.write(item.tostring()) def _write_string(f, s): """ Write a string to a file, encoding to ASCII if the file is open in binary mode, or decoding if the file is open in text mode. """ # Assume if the file object doesn't have a specific mode, that the mode is # binary binmode = fileobj_is_binary(f) if binmode and isinstance(s, str): s = encode_ascii(s) elif not binmode and not isinstance(f, str): s = decode_ascii(s) f.write(s) def _convert_array(array, dtype): """ Converts an array to a new dtype--if the itemsize of the new dtype is the same as the old dtype and both types are not numeric, a view is returned. Otherwise a new array must be created. """ if array.dtype == dtype: return array elif (array.dtype.itemsize == dtype.itemsize and not (np.issubdtype(array.dtype, np.number) and np.issubdtype(dtype, np.number))): # Includes a special case when both dtypes are at least numeric to # account for ticket #218: https://aeon.stsci.edu/ssb/trac/pyfits/ticket/218 return array.view(dtype) else: return array.astype(dtype) def _unsigned_zero(dtype): """ Given a numpy dtype, finds its "zero" point, which is exactly in the middle of its range. """ assert dtype.kind == 'u' return 1 << (dtype.itemsize * 8 - 1) def _is_pseudo_unsigned(dtype): return dtype.kind == 'u' and dtype.itemsize >= 2 def _is_int(val): return isinstance(val, all_integer_types) def _str_to_num(val): """Converts a given string to either an int or a float if necessary.""" try: num = int(val) except ValueError: # If this fails then an exception should be raised anyways num = float(val) return num def _words_group(input, strlen): """ Split a long string into parts where each part is no longer than ``strlen`` and no word is cut into two pieces. But if there is one single word which is longer than ``strlen``, then it will be split in the middle of the word. """ words = [] nblanks = input.count(' ') nmax = max(nblanks, len(input) // strlen + 1) arr = np.frombuffer((input + ' ').encode('utf8'), dtype=(bytes, 1)) # locations of the blanks blank_loc = np.nonzero(arr == b' ')[0] offset = 0 xoffset = 0 for idx in range(nmax): try: loc = np.nonzero(blank_loc >= strlen + offset)[0][0] offset = blank_loc[loc - 1] + 1 if loc == 0: offset = -1 except Exception: offset = len(input) # check for one word longer than strlen, break in the middle if offset <= xoffset: offset = xoffset + strlen # collect the pieces in a list words.append(input[xoffset:offset]) if len(input) == offset: break xoffset = offset return words def _tmp_name(input): """ Create a temporary file name which should not already exist. Use the directory of the input file as the base name of the mkstemp() output. """ if input is not None: input = os.path.dirname(input) f, fn = tempfile.mkstemp(dir=input) os.close(f) return fn def _get_array_mmap(array): """ If the array has an mmap.mmap at base of its base chain, return the mmap object; otherwise return None. """ if isinstance(array, mmap.mmap): return array base = array while hasattr(base, 'base') and base.base is not None: if isinstance(base.base, mmap.mmap): return base.base base = base.base @contextmanager def _free_space_check(hdulist, dirname=None): try: yield except OSError as exc: error_message = '' if not isinstance(hdulist, list): hdulist = [hdulist, ] if dirname is None: dirname = os.path.dirname(hdulist._file.name) if os.path.isdir(dirname): free_space = data.get_free_space_in_dir(dirname) hdulist_size = sum(hdu.size for hdu in hdulist) if free_space < hdulist_size: error_message = ("Not enough space on disk: requested {}, " "available {}. ".format(hdulist_size, free_space)) for hdu in hdulist: hdu._close() raise OSError(error_message + str(exc)) def _extract_number(value, default): """ Attempts to extract an integer number from the given value. If the extraction fails, the value of the 'default' argument is returned. """ try: # The _str_to_num method converts the value to string/float # so we need to perform one additional conversion to int on top return int(_str_to_num(value)) except (TypeError, ValueError): return default def get_testdata_filepath(filename): """ Return a string representing the path to the file requested from the io.fits test data set. .. versionadded:: 2.0.3 Parameters ---------- filename : str The filename of the test data file. Returns ------- filepath : str The path to the requested file. """ return data.get_pkg_data_filename( 'io/fits/tests/data/{}'.format(filename), 'astropy') def _rstrip_inplace(array): """ Performs an in-place rstrip operation on string arrays. This is necessary since the built-in `np.char.rstrip` in Numpy does not perform an in-place calculation. """ # The following implementation convert the string to unsigned integers of # the right length. Trailing spaces (which are represented as 32) are then # converted to null characters (represented as zeros). To avoid creating # large temporary mask arrays, we loop over chunks (attempting to do that # on a 1-D version of the array; large memory may still be needed in the # unlikely case that a string array has small first dimension and cannot # be represented as a contiguous 1-D array in memory). dt = array.dtype if dt.kind not in 'SU': raise TypeError("This function can only be used on string arrays") # View the array as appropriate integers. The last dimension will # equal the number of characters in each string. bpc = 1 if dt.kind == 'S' else 4 dt_int = "{0}{1}u{2}".format(dt.itemsize // bpc, dt.byteorder, bpc) b = array.view(dt_int, np.ndarray) # For optimal speed, work in chunks of the internal ufunc buffer size. bufsize = np.getbufsize() # Attempt to have the strings as a 1-D array to give the chunk known size. # Note: the code will work if this fails; the chunks will just be larger. if b.ndim > 2: try: b.shape = -1, b.shape[-1] except AttributeError: # can occur for non-contiguous arrays pass for j in range(0, b.shape[0], bufsize): c = b[j:j + bufsize] # Mask which will tell whether we're in a sequence of trailing spaces. mask = np.ones(c.shape[:-1], dtype=bool) # Loop over the characters in the strings, in reverse order. We process # the i-th character of all strings in the chunk at the same time. If # the character is 32, this corresponds to a space, and we then change # this to 0. We then construct a new mask to find rows where the # i-th character is 0 (null) and the i-1-th is 32 (space) and repeat. for i in range(-1, -c.shape[-1], -1): mask &= c[..., i] == 32 c[..., i][mask] = 0 mask = c[..., i] == 0 return array
5412cc3d84284b0256863c121b59c6dba6ebe9484ef210ffcb0f168ca11a93e3
# Licensed under a 3-clause BSD style license - see PYFITS.rst import re import warnings import numpy as np from .util import _str_to_num, _is_int, translate, _words_group from .verify import _Verify, _ErrList, VerifyError, VerifyWarning from . import conf from astropy.utils.exceptions import AstropyUserWarning __all__ = ['Card', 'Undefined'] FIX_FP_TABLE = str.maketrans('de', 'DE') FIX_FP_TABLE2 = str.maketrans('dD', 'eE') CARD_LENGTH = 80 BLANK_CARD = ' ' * CARD_LENGTH KEYWORD_LENGTH = 8 # The max length for FITS-standard keywords VALUE_INDICATOR = '= ' # The standard FITS value indicator VALUE_INDICATOR_LEN = len(VALUE_INDICATOR) HIERARCH_VALUE_INDICATOR = '=' # HIERARCH cards may use a shortened indicator class Undefined: """Undefined value.""" def __init__(self): # This __init__ is required to be here for Sphinx documentation pass UNDEFINED = Undefined() class Card(_Verify): length = CARD_LENGTH """The length of a Card image; should always be 80 for valid FITS files.""" # String for a FITS standard compliant (FSC) keyword. _keywd_FSC_RE = re.compile(r'^[A-Z0-9_-]{0,%d}$' % KEYWORD_LENGTH) # This will match any printable ASCII character excluding '=' _keywd_hierarch_RE = re.compile(r'^(?:HIERARCH +)?(?:^[ -<>-~]+ ?)+$', re.I) # A number sub-string, either an integer or a float in fixed or # scientific notation. One for FSC and one for non-FSC (NFSC) format: # NFSC allows lower case of DE for exponent, allows space between sign, # digits, exponent sign, and exponents _digits_FSC = r'(\.\d+|\d+(\.\d*)?)([DE][+-]?\d+)?' _digits_NFSC = r'(\.\d+|\d+(\.\d*)?) *([deDE] *[+-]? *\d+)?' _numr_FSC = r'[+-]?' + _digits_FSC _numr_NFSC = r'[+-]? *' + _digits_NFSC # This regex helps delete leading zeros from numbers, otherwise # Python might evaluate them as octal values (this is not-greedy, however, # so it may not strip leading zeros from a float, which is fine) _number_FSC_RE = re.compile(r'(?P<sign>[+-])?0*?(?P<digt>{})'.format( _digits_FSC)) _number_NFSC_RE = re.compile(r'(?P<sign>[+-])? *0*?(?P<digt>{})'.format( _digits_NFSC)) # FSC commentary card string which must contain printable ASCII characters. # Note: \Z matches the end of the string without allowing newlines _ascii_text_re = re.compile(r'[ -~]*\Z') # Checks for a valid value/comment string. It returns a match object # for a valid value/comment string. # The valu group will return a match if a FITS string, boolean, # number, or complex value is found, otherwise it will return # None, meaning the keyword is undefined. The comment field will # return a match if the comment separator is found, though the # comment maybe an empty string. _value_FSC_RE = re.compile( r'(?P<valu_field> *' r'(?P<valu>' # The <strg> regex is not correct for all cases, but # it comes pretty darn close. It appears to find the # end of a string rather well, but will accept # strings with an odd number of single quotes, # instead of issuing an error. The FITS standard # appears vague on this issue and only states that a # string should not end with two single quotes, # whereas it should not end with an even number of # quotes to be precise. # # Note that a non-greedy match is done for a string, # since a greedy match will find a single-quote after # the comment separator resulting in an incorrect # match. r'\'(?P<strg>([ -~]+?|\'\'|)) *?\'(?=$|/| )|' r'(?P<bool>[FT])|' r'(?P<numr>' + _numr_FSC + r')|' r'(?P<cplx>\( *' r'(?P<real>' + _numr_FSC + r') *, *' r'(?P<imag>' + _numr_FSC + r') *\))' r')? *)' r'(?P<comm_field>' r'(?P<sepr>/ *)' r'(?P<comm>[!-~][ -~]*)?' r')?$') _value_NFSC_RE = re.compile( r'(?P<valu_field> *' r'(?P<valu>' r'\'(?P<strg>([ -~]+?|\'\'|) *?)\'(?=$|/| )|' r'(?P<bool>[FT])|' r'(?P<numr>' + _numr_NFSC + r')|' r'(?P<cplx>\( *' r'(?P<real>' + _numr_NFSC + r') *, *' r'(?P<imag>' + _numr_NFSC + r') *\))' r')? *)' r'(?P<comm_field>' r'(?P<sepr>/ *)' r'(?P<comm>(.|\n)*)' r')?$') _rvkc_identifier = r'[a-zA-Z_]\w*' _rvkc_field = _rvkc_identifier + r'(\.\d+)?' _rvkc_field_specifier_s = r'{}(\.{})*'.format(_rvkc_field, _rvkc_field) _rvkc_field_specifier_val = (r'(?P<keyword>{}): (?P<val>{})'.format( _rvkc_field_specifier_s, _numr_FSC)) _rvkc_keyword_val = r'\'(?P<rawval>{})\''.format(_rvkc_field_specifier_val) _rvkc_keyword_val_comm = (r' *{} *(/ *(?P<comm>[ -~]*))?$'.format( _rvkc_keyword_val)) _rvkc_field_specifier_val_RE = re.compile(_rvkc_field_specifier_val + '$') # regular expression to extract the key and the field specifier from a # string that is being used to index into a card list that contains # record value keyword cards (ex. 'DP1.AXIS.1') _rvkc_keyword_name_RE = ( re.compile(r'(?P<keyword>{})\.(?P<field_specifier>{})$'.format( _rvkc_identifier, _rvkc_field_specifier_s))) # regular expression to extract the field specifier and value and comment # from the string value of a record value keyword card # (ex "'AXIS.1: 1' / a comment") _rvkc_keyword_val_comm_RE = re.compile(_rvkc_keyword_val_comm) _commentary_keywords = {'', 'COMMENT', 'HISTORY', 'END'} _special_keywords = _commentary_keywords.union(['CONTINUE']) # The default value indicator; may be changed if required by a convention # (namely HIERARCH cards) _value_indicator = VALUE_INDICATOR def __init__(self, keyword=None, value=None, comment=None, **kwargs): # For backwards compatibility, support the 'key' keyword argument: if keyword is None and 'key' in kwargs: keyword = kwargs['key'] self._keyword = None self._value = None self._comment = None self._valuestring = None self._image = None # This attribute is set to False when creating the card from a card # image to ensure that the contents of the image get verified at some # point self._verified = True # A flag to conveniently mark whether or not this was a valid HIERARCH # card self._hierarch = False # If the card could not be parsed according the the FITS standard or # any recognized non-standard conventions, this will be True self._invalid = False self._field_specifier = None # These are used primarily only by RVKCs self._rawkeyword = None self._rawvalue = None if not (keyword is not None and value is not None and self._check_if_rvkc(keyword, value)): # If _check_if_rvkc passes, it will handle setting the keyword and # value if keyword is not None: self.keyword = keyword if value is not None: self.value = value if comment is not None: self.comment = comment self._modified = False self._valuemodified = False def __repr__(self): return repr((self.keyword, self.value, self.comment)) def __str__(self): return self.image def __len__(self): return 3 def __getitem__(self, index): return (self.keyword, self.value, self.comment)[index] @property def keyword(self): """Returns the keyword name parsed from the card image.""" if self._keyword is not None: return self._keyword elif self._image: self._keyword = self._parse_keyword() return self._keyword else: self.keyword = '' return '' @keyword.setter def keyword(self, keyword): """Set the key attribute; once set it cannot be modified.""" if self._keyword is not None: raise AttributeError( 'Once set, the Card keyword may not be modified') elif isinstance(keyword, str): # Be nice and remove trailing whitespace--some FITS code always # pads keywords out with spaces; leading whitespace, however, # should be strictly disallowed. keyword = keyword.rstrip() keyword_upper = keyword.upper() if (len(keyword) <= KEYWORD_LENGTH and self._keywd_FSC_RE.match(keyword_upper)): # For keywords with length > 8 they will be HIERARCH cards, # and can have arbitrary case keywords if keyword_upper == 'END': raise ValueError("Keyword 'END' not allowed.") keyword = keyword_upper elif self._keywd_hierarch_RE.match(keyword): # In prior versions of PyFITS (*) HIERARCH cards would only be # created if the user-supplied keyword explicitly started with # 'HIERARCH '. Now we will create them automatically for long # keywords, but we still want to support the old behavior too; # the old behavior makes it possible to create HEIRARCH cards # that would otherwise be recognized as RVKCs # (*) This has never affected Astropy, because it was changed # before PyFITS was merged into Astropy! self._hierarch = True self._value_indicator = HIERARCH_VALUE_INDICATOR if keyword_upper[:9] == 'HIERARCH ': # The user explicitly asked for a HIERARCH card, so don't # bug them about it... keyword = keyword[9:].strip() else: # We'll gladly create a HIERARCH card, but a warning is # also displayed warnings.warn( 'Keyword name {!r} is greater than 8 characters or ' 'contains characters not allowed by the FITS ' 'standard; a HIERARCH card will be created.'.format( keyword), VerifyWarning) else: raise ValueError('Illegal keyword name: {!r}.'.format(keyword)) self._keyword = keyword self._modified = True else: raise ValueError('Keyword name {!r} is not a string.'.format(keyword)) @property def value(self): """The value associated with the keyword stored in this card.""" if self.field_specifier: return float(self._value) if self._value is not None: value = self._value elif self._valuestring is not None or self._image: value = self._value = self._parse_value() else: if self._keyword == '': self._value = value = '' else: self._value = value = UNDEFINED if conf.strip_header_whitespace and isinstance(value, str): value = value.rstrip() return value @value.setter def value(self, value): if self._invalid: raise ValueError( 'The value of invalid/unparseable cards cannot set. Either ' 'delete this card from the header or replace it.') if value is None: value = UNDEFINED try: oldvalue = self.value except VerifyError: # probably a parsing error, falling back to the internal _value # which should be None. This may happen while calling _fix_value. oldvalue = self._value if oldvalue is None: oldvalue = UNDEFINED if not isinstance(value, (str, int, float, complex, bool, Undefined, np.floating, np.integer, np.complexfloating, np.bool_)): raise ValueError('Illegal value: {!r}.'.format(value)) if isinstance(value, float) and (np.isnan(value) or np.isinf(value)): raise ValueError("Floating point {!r} values are not allowed " "in FITS headers.".format(value)) elif isinstance(value, str): m = self._ascii_text_re.match(value) if not m: raise ValueError( 'FITS header values must contain standard printable ASCII ' 'characters; {!r} contains characters not representable in ' 'ASCII or non-printable characters.'.format(value)) elif isinstance(value, bytes): # Allow str, but only if they can be decoded to ASCII text; note # this is not even allowed on Python 3 since the `bytes` type is # not included in `str`. Presently we simply don't # allow bytes to be assigned to headers, as doing so would too # easily mask potential user error valid = True try: text_value = value.decode('ascii') except UnicodeDecodeError: valid = False else: # Check against the printable characters regexp as well m = self._ascii_text_re.match(text_value) valid = m is not None if not valid: raise ValueError( 'FITS header values must contain standard printable ASCII ' 'characters; {!r} contains characters/bytes that do not ' 'represent printable characters in ASCII.'.format(value)) elif isinstance(value, np.bool_): value = bool(value) if (conf.strip_header_whitespace and (isinstance(oldvalue, str) and isinstance(value, str))): # Ignore extra whitespace when comparing the new value to the old different = oldvalue.rstrip() != value.rstrip() elif isinstance(oldvalue, bool) or isinstance(value, bool): different = oldvalue is not value else: different = (oldvalue != value or not isinstance(value, type(oldvalue))) if different: self._value = value self._rawvalue = None self._modified = True self._valuestring = None self._valuemodified = True if self.field_specifier: try: self._value = _int_or_float(self._value) except ValueError: raise ValueError('value {} is not a float'.format( self._value)) @value.deleter def value(self): if self._invalid: raise ValueError( 'The value of invalid/unparseable cards cannot deleted. ' 'Either delete this card from the header or replace it.') if not self.field_specifier: self.value = '' else: raise AttributeError('Values cannot be deleted from record-valued ' 'keyword cards') @property def rawkeyword(self): """On record-valued keyword cards this is the name of the standard <= 8 character FITS keyword that this RVKC is stored in. Otherwise it is the card's normal keyword. """ if self._rawkeyword is not None: return self._rawkeyword elif self.field_specifier is not None: self._rawkeyword = self.keyword.split('.', 1)[0] return self._rawkeyword else: return self.keyword @property def rawvalue(self): """On record-valued keyword cards this is the raw string value in the ``<field-specifier>: <value>`` format stored in the card in order to represent a RVKC. Otherwise it is the card's normal value. """ if self._rawvalue is not None: return self._rawvalue elif self.field_specifier is not None: self._rawvalue = '{}: {}'.format(self.field_specifier, self.value) return self._rawvalue else: return self.value @property def comment(self): """Get the comment attribute from the card image if not already set.""" if self._comment is not None: return self._comment elif self._image: self._comment = self._parse_comment() return self._comment else: self._comment = '' return '' @comment.setter def comment(self, comment): if self._invalid: raise ValueError( 'The comment of invalid/unparseable cards cannot set. Either ' 'delete this card from the header or replace it.') if comment is None: comment = '' if isinstance(comment, str): m = self._ascii_text_re.match(comment) if not m: raise ValueError( 'FITS header comments must contain standard printable ' 'ASCII characters; {!r} contains characters not ' 'representable in ASCII or non-printable characters.' .format(comment)) try: oldcomment = self.comment except VerifyError: # probably a parsing error, falling back to the internal _comment # which should be None. oldcomment = self._comment if oldcomment is None: oldcomment = '' if comment != oldcomment: self._comment = comment self._modified = True @comment.deleter def comment(self): if self._invalid: raise ValueError( 'The comment of invalid/unparseable cards cannot deleted. ' 'Either delete this card from the header or replace it.') self.comment = '' @property def field_specifier(self): """ The field-specifier of record-valued keyword cards; always `None` on normal cards. """ # Ensure that the keyword exists and has been parsed--the will set the # internal _field_specifier attribute if this is a RVKC. if self.keyword: return self._field_specifier else: return None @field_specifier.setter def field_specifier(self, field_specifier): if not field_specifier: raise ValueError('The field-specifier may not be blank in ' 'record-valued keyword cards.') elif not self.field_specifier: raise AttributeError('Cannot coerce cards to be record-valued ' 'keyword cards by setting the ' 'field_specifier attribute') elif field_specifier != self.field_specifier: self._field_specifier = field_specifier # The keyword need also be updated keyword = self._keyword.split('.', 1)[0] self._keyword = '.'.join([keyword, field_specifier]) self._modified = True @field_specifier.deleter def field_specifier(self): raise AttributeError('The field_specifier attribute may not be ' 'deleted from record-valued keyword cards.') @property def image(self): """ The card "image", that is, the 80 byte character string that represents this card in an actual FITS header. """ if self._image and not self._verified: self.verify('fix+warn') if self._image is None or self._modified: self._image = self._format_image() return self._image @property def is_blank(self): """ `True` if the card is completely blank--that is, it has no keyword, value, or comment. It appears in the header as 80 spaces. Returns `False` otherwise. """ if not self._verified: # The card image has not been parsed yet; compare directly with the # string representation of a blank card return self._image == BLANK_CARD # If the keyword, value, and comment are all empty (for self.value # explicitly check that it is a string value, since a blank value is # returned as '') return (not self.keyword and (isinstance(self.value, str) and not self.value) and not self.comment) @classmethod def fromstring(cls, image): """ Construct a `Card` object from a (raw) string. It will pad the string if it is not the length of a card image (80 columns). If the card image is longer than 80 columns, assume it contains ``CONTINUE`` card(s). """ card = cls() card._image = _pad(image) card._verified = False return card @classmethod def normalize_keyword(cls, keyword): """ `classmethod` to convert a keyword value that may contain a field-specifier to uppercase. The effect is to raise the key to uppercase and leave the field specifier in its original case. Parameters ---------- keyword : or str A keyword value or a ``keyword.field-specifier`` value """ # Test first for the most common case: a standard FITS keyword provided # in standard all-caps if (len(keyword) <= KEYWORD_LENGTH and cls._keywd_FSC_RE.match(keyword)): return keyword # Test if this is a record-valued keyword match = cls._rvkc_keyword_name_RE.match(keyword) if match: return '.'.join((match.group('keyword').strip().upper(), match.group('field_specifier'))) elif len(keyword) > 9 and keyword[:9].upper() == 'HIERARCH ': # Remove 'HIERARCH' from HIERARCH keywords; this could lead to # ambiguity if there is actually a keyword card containing # "HIERARCH HIERARCH", but shame on you if you do that. return keyword[9:].strip().upper() else: # A normal FITS keyword, but provided in non-standard case return keyword.strip().upper() def _check_if_rvkc(self, *args): """ Determine whether or not the card is a record-valued keyword card. If one argument is given, that argument is treated as a full card image and parsed as such. If two arguments are given, the first is treated as the card keyword (including the field-specifier if the card is intended as a RVKC), and the second as the card value OR the first value can be the base keyword, and the second value the 'field-specifier: value' string. If the check passes the ._keyword, ._value, and .field_specifier keywords are set. Examples -------- :: self._check_if_rvkc('DP1', 'AXIS.1: 2') self._check_if_rvkc('DP1.AXIS.1', 2) self._check_if_rvkc('DP1 = AXIS.1: 2') """ if not conf.enable_record_valued_keyword_cards: return False if len(args) == 1: return self._check_if_rvkc_image(*args) elif len(args) == 2: keyword, value = args if not isinstance(keyword, str): return False if keyword in self._commentary_keywords: return False match = self._rvkc_keyword_name_RE.match(keyword) if match and isinstance(value, (int, float)): self._init_rvkc(match.group('keyword'), match.group('field_specifier'), None, value) return True # Testing for ': ' is a quick way to avoid running the full regular # expression, speeding this up for the majority of cases if isinstance(value, str) and value.find(': ') > 0: match = self._rvkc_field_specifier_val_RE.match(value) if match and self._keywd_FSC_RE.match(keyword): self._init_rvkc(keyword, match.group('keyword'), value, match.group('val')) return True def _check_if_rvkc_image(self, *args): """ Implements `Card._check_if_rvkc` for the case of an unparsed card image. If given one argument this is the full intact image. If given two arguments the card has already been split between keyword and value+comment at the standard value indicator '= '. """ if len(args) == 1: image = args[0] eq_idx = image.find(VALUE_INDICATOR) if eq_idx < 0 or eq_idx > 9: return False keyword = image[:eq_idx] rest = image[eq_idx + VALUE_INDICATOR_LEN:] else: keyword, rest = args rest = rest.lstrip() # This test allows us to skip running the full regular expression for # the majority of cards that do not contain strings or that definitely # do not contain RVKC field-specifiers; it's very much a # micro-optimization but it does make a measurable difference if not rest or rest[0] != "'" or rest.find(': ') < 2: return False match = self._rvkc_keyword_val_comm_RE.match(rest) if match: self._init_rvkc(keyword, match.group('keyword'), match.group('rawval'), match.group('val')) return True def _init_rvkc(self, keyword, field_specifier, field, value): """ Sort of addendum to Card.__init__ to set the appropriate internal attributes if the card was determined to be a RVKC. """ keyword_upper = keyword.upper() self._keyword = '.'.join((keyword_upper, field_specifier)) self._rawkeyword = keyword_upper self._field_specifier = field_specifier self._value = _int_or_float(value) self._rawvalue = field def _parse_keyword(self): keyword = self._image[:KEYWORD_LENGTH].strip() keyword_upper = keyword.upper() if keyword_upper in self._special_keywords: return keyword_upper elif (keyword_upper == 'HIERARCH' and self._image[8] == ' ' and HIERARCH_VALUE_INDICATOR in self._image): # This is valid HIERARCH card as described by the HIERARCH keyword # convention: # http://fits.gsfc.nasa.gov/registry/hierarch_keyword.html self._hierarch = True self._value_indicator = HIERARCH_VALUE_INDICATOR keyword = self._image.split(HIERARCH_VALUE_INDICATOR, 1)[0][9:] return keyword.strip() else: val_ind_idx = self._image.find(VALUE_INDICATOR) if 0 <= val_ind_idx <= KEYWORD_LENGTH: # The value indicator should appear in byte 8, but we are # flexible and allow this to be fixed if val_ind_idx < KEYWORD_LENGTH: keyword = keyword[:val_ind_idx] keyword_upper = keyword_upper[:val_ind_idx] rest = self._image[val_ind_idx + VALUE_INDICATOR_LEN:] # So far this looks like a standard FITS keyword; check whether # the value represents a RVKC; if so then we pass things off to # the RVKC parser if self._check_if_rvkc_image(keyword, rest): return self._keyword return keyword_upper else: warnings.warn( 'The following header keyword is invalid or follows an ' 'unrecognized non-standard convention:\n{}' .format(self._image), AstropyUserWarning) self._invalid = True return keyword def _parse_value(self): """Extract the keyword value from the card image.""" # for commentary cards, no need to parse further # Likewise for invalid cards if self.keyword.upper() in self._commentary_keywords or self._invalid: return self._image[KEYWORD_LENGTH:].rstrip() if self._check_if_rvkc(self._image): return self._value if len(self._image) > self.length: values = [] for card in self._itersubcards(): value = card.value.rstrip().replace("''", "'") if value and value[-1] == '&': value = value[:-1] values.append(value) value = ''.join(values) self._valuestring = value return value m = self._value_NFSC_RE.match(self._split()[1]) if m is None: raise VerifyError("Unparsable card ({}), fix it first with " ".verify('fix').".format(self.keyword)) if m.group('bool') is not None: value = m.group('bool') == 'T' elif m.group('strg') is not None: value = re.sub("''", "'", m.group('strg')) elif m.group('numr') is not None: # Check for numbers with leading 0s. numr = self._number_NFSC_RE.match(m.group('numr')) digt = translate(numr.group('digt'), FIX_FP_TABLE2, ' ') if numr.group('sign') is None: sign = '' else: sign = numr.group('sign') value = _str_to_num(sign + digt) elif m.group('cplx') is not None: # Check for numbers with leading 0s. real = self._number_NFSC_RE.match(m.group('real')) rdigt = translate(real.group('digt'), FIX_FP_TABLE2, ' ') if real.group('sign') is None: rsign = '' else: rsign = real.group('sign') value = _str_to_num(rsign + rdigt) imag = self._number_NFSC_RE.match(m.group('imag')) idigt = translate(imag.group('digt'), FIX_FP_TABLE2, ' ') if imag.group('sign') is None: isign = '' else: isign = imag.group('sign') value += _str_to_num(isign + idigt) * 1j else: value = UNDEFINED if not self._valuestring: self._valuestring = m.group('valu') return value def _parse_comment(self): """Extract the keyword value from the card image.""" # for commentary cards, no need to parse further # likewise for invalid/unparseable cards if self.keyword in Card._commentary_keywords or self._invalid: return '' if len(self._image) > self.length: comments = [] for card in self._itersubcards(): if card.comment: comments.append(card.comment) comment = '/ ' + ' '.join(comments).rstrip() m = self._value_NFSC_RE.match(comment) else: m = self._value_NFSC_RE.match(self._split()[1]) if m is not None: comment = m.group('comm') if comment: return comment.rstrip() return '' def _split(self): """ Split the card image between the keyword and the rest of the card. """ if self._image is not None: # If we already have a card image, don't try to rebuild a new card # image, which self.image would do image = self._image else: image = self.image if self.keyword in self._special_keywords: keyword, valuecomment = image.split(' ', 1) else: try: delim_index = image.index(self._value_indicator) except ValueError: delim_index = None # The equal sign may not be any higher than column 10; anything # past that must be considered part of the card value if delim_index is None: keyword = image[:KEYWORD_LENGTH] valuecomment = image[KEYWORD_LENGTH:] elif delim_index > 10 and image[:9] != 'HIERARCH ': keyword = image[:8] valuecomment = image[8:] else: keyword, valuecomment = image.split(self._value_indicator, 1) return keyword.strip(), valuecomment.strip() def _fix_keyword(self): if self.field_specifier: keyword, field_specifier = self._keyword.split('.', 1) self._keyword = '.'.join([keyword.upper(), field_specifier]) else: self._keyword = self._keyword.upper() self._modified = True def _fix_value(self): """Fix the card image for fixable non-standard compliance.""" value = None keyword, valuecomment = self._split() m = self._value_NFSC_RE.match(valuecomment) # for the unparsable case if m is None: try: value, comment = valuecomment.split('/', 1) self.value = value.strip() self.comment = comment.strip() except (ValueError, IndexError): self.value = valuecomment self._valuestring = self._value return elif m.group('numr') is not None: numr = self._number_NFSC_RE.match(m.group('numr')) value = translate(numr.group('digt'), FIX_FP_TABLE, ' ') if numr.group('sign') is not None: value = numr.group('sign') + value elif m.group('cplx') is not None: real = self._number_NFSC_RE.match(m.group('real')) rdigt = translate(real.group('digt'), FIX_FP_TABLE, ' ') if real.group('sign') is not None: rdigt = real.group('sign') + rdigt imag = self._number_NFSC_RE.match(m.group('imag')) idigt = translate(imag.group('digt'), FIX_FP_TABLE, ' ') if imag.group('sign') is not None: idigt = imag.group('sign') + idigt value = '({}, {})'.format(rdigt, idigt) self._valuestring = value # The value itself has not been modified, but its serialized # representation (as stored in self._valuestring) has been changed, so # still set this card as having been modified (see ticket #137) self._modified = True def _format_keyword(self): if self.keyword: if self.field_specifier: return '{:{len}}'.format(self.keyword.split('.', 1)[0], len=KEYWORD_LENGTH) elif self._hierarch: return 'HIERARCH {} '.format(self.keyword) else: return '{:{len}}'.format(self.keyword, len=KEYWORD_LENGTH) else: return ' ' * KEYWORD_LENGTH def _format_value(self): # value string float_types = (float, np.floating, complex, np.complexfloating) # Force the value to be parsed out first value = self.value # But work with the underlying raw value instead (to preserve # whitespace, for now...) value = self._value if self.keyword in self._commentary_keywords: # The value of a commentary card must be just a raw unprocessed # string value = str(value) elif (self._valuestring and not self._valuemodified and isinstance(self.value, float_types)): # Keep the existing formatting for float/complex numbers value = '{:>20}'.format(self._valuestring) elif self.field_specifier: value = _format_value(self._value).strip() value = "'{}: {}'".format(self.field_specifier, value) else: value = _format_value(value) # For HIERARCH cards the value should be shortened to conserve space if not self.field_specifier and len(self.keyword) > KEYWORD_LENGTH: value = value.strip() return value def _format_comment(self): if not self.comment: return '' else: return ' / {}'.format(self._comment) def _format_image(self): keyword = self._format_keyword() value = self._format_value() is_commentary = keyword.strip() in self._commentary_keywords if is_commentary: comment = '' else: comment = self._format_comment() # equal sign string # by default use the standard value indicator even for HIERARCH cards; # later we may abbreviate it if necessary delimiter = VALUE_INDICATOR if is_commentary: delimiter = '' # put all parts together output = ''.join([keyword, delimiter, value, comment]) # For HIERARCH cards we can save a bit of space if necessary by # removing the space between the keyword and the equals sign; I'm # guessing this is part of the HIEARCH card specification keywordvalue_length = len(keyword) + len(delimiter) + len(value) if (keywordvalue_length > self.length and keyword.startswith('HIERARCH')): if (keywordvalue_length == self.length + 1 and keyword[-1] == ' '): output = ''.join([keyword[:-1], delimiter, value, comment]) else: # I guess the HIERARCH card spec is incompatible with CONTINUE # cards raise ValueError('The header keyword {!r} with its value is ' 'too long'.format(self.keyword)) if len(output) <= self.length: output = '{:80}'.format(output) else: # longstring case (CONTINUE card) # try not to use CONTINUE if the string value can fit in one line. # Instead, just truncate the comment if (isinstance(self.value, str) and len(value) > (self.length - 10)): output = self._format_long_image() else: warnings.warn('Card is too long, comment will be truncated.', VerifyWarning) output = output[:Card.length] return output def _format_long_image(self): """ Break up long string value/comment into ``CONTINUE`` cards. This is a primitive implementation: it will put the value string in one block and the comment string in another. Also, it does not break at the blank space between words. So it may not look pretty. """ if self.keyword in Card._commentary_keywords: return self._format_long_commentary_image() value_length = 67 comment_length = 64 output = [] # do the value string value = self._value.replace("'", "''") words = _words_group(value, value_length) for idx, word in enumerate(words): if idx == 0: headstr = '{:{len}}= '.format(self.keyword, len=KEYWORD_LENGTH) else: headstr = 'CONTINUE ' # If this is the final CONTINUE remove the '&' if not self.comment and idx == len(words) - 1: value_format = "'{}'" else: value_format = "'{}&'" value = value_format.format(word) output.append('{:80}'.format(headstr + value)) # do the comment string comment_format = "{}" if self.comment: words = _words_group(self.comment, comment_length) for idx, word in enumerate(words): # If this is the final CONTINUE remove the '&' if idx == len(words) - 1: headstr = "CONTINUE '' / " else: headstr = "CONTINUE '&' / " comment = headstr + comment_format.format(word) output.append('{:80}'.format(comment)) return ''.join(output) def _format_long_commentary_image(self): """ If a commentary card's value is too long to fit on a single card, this will render the card as multiple consecutive commentary card of the same type. """ maxlen = Card.length - KEYWORD_LENGTH value = self._format_value() output = [] idx = 0 while idx < len(value): output.append(str(Card(self.keyword, value[idx:idx + maxlen]))) idx += maxlen return ''.join(output) def _verify(self, option='warn'): self._verified = True errs = _ErrList([]) fix_text = ('Fixed {!r} card to meet the FITS ' 'standard.'.format(self.keyword)) # Don't try to verify cards that already don't meet any recognizable # standard if self._invalid: return errs # verify the equal sign position if (self.keyword not in self._commentary_keywords and (self._image and self._image[:9].upper() != 'HIERARCH ' and self._image.find('=') != 8)): errs.append(self.run_option( option, err_text='Card {!r} is not FITS standard (equal sign not ' 'at column 8).'.format(self.keyword), fix_text=fix_text, fix=self._fix_value)) # verify the key, it is never fixable # always fix silently the case where "=" is before column 9, # since there is no way to communicate back to the _keys. if ((self._image and self._image[:8].upper() == 'HIERARCH') or self._hierarch): pass else: if self._image: # PyFITS will auto-uppercase any standard keyword, so lowercase # keywords can only occur if they came from the wild keyword = self._split()[0] if keyword != keyword.upper(): # Keyword should be uppercase unless it's a HIERARCH card errs.append(self.run_option( option, err_text='Card keyword {!r} is not upper case.'.format( keyword), fix_text=fix_text, fix=self._fix_keyword)) keyword = self.keyword if self.field_specifier: keyword = keyword.split('.', 1)[0] if not self._keywd_FSC_RE.match(keyword): errs.append(self.run_option( option, err_text='Illegal keyword name {!r}'.format(keyword), fixable=False)) # verify the value, it may be fixable keyword, valuecomment = self._split() if self.keyword in self._commentary_keywords: # For commentary keywords all that needs to be ensured is that it # contains only printable ASCII characters if not self._ascii_text_re.match(valuecomment): errs.append(self.run_option( option, err_text='Unprintable string {!r}; commentary cards may ' 'only contain printable ASCII characters'.format( valuecomment), fixable=False)) else: m = self._value_FSC_RE.match(valuecomment) if not m: errs.append(self.run_option( option, err_text='Card {!r} is not FITS standard (invalid value ' 'string: {!r}).'.format(self.keyword, valuecomment), fix_text=fix_text, fix=self._fix_value)) # verify the comment (string), it is never fixable m = self._value_NFSC_RE.match(valuecomment) if m is not None: comment = m.group('comm') if comment is not None: if not self._ascii_text_re.match(comment): errs.append(self.run_option( option, err_text=('Unprintable string {!r}; header comments ' 'may only contain printable ASCII ' 'characters'.format(comment)), fixable=False)) return errs def _itersubcards(self): """ If the card image is greater than 80 characters, it should consist of a normal card followed by one or more CONTINUE card. This method returns the subcards that make up this logical card. """ ncards = len(self._image) // Card.length for idx in range(0, Card.length * ncards, Card.length): card = Card.fromstring(self._image[idx:idx + Card.length]) if idx > 0 and card.keyword.upper() != 'CONTINUE': raise VerifyError( 'Long card images must have CONTINUE cards after ' 'the first card.') if not isinstance(card.value, str): raise VerifyError('CONTINUE cards must have string values.') yield card def _int_or_float(s): """ Converts an a string to an int if possible, or to a float. If the string is neither a string or a float a value error is raised. """ if isinstance(s, float): # Already a float so just pass through return s try: return int(s) except (ValueError, TypeError): try: return float(s) except (ValueError, TypeError) as e: raise ValueError(str(e)) def _format_value(value): """ Converts a card value to its appropriate string representation as defined by the FITS format. """ # string value should occupies at least 8 columns, unless it is # a null string if isinstance(value, str): if value == '': return "''" else: exp_val_str = value.replace("'", "''") val_str = "'{:8}'".format(exp_val_str) return '{:20}'.format(val_str) # must be before int checking since bool is also int elif isinstance(value, (bool, np.bool_)): return '{:>20}'.format(repr(value)[0]) # T or F elif _is_int(value): return '{:>20d}'.format(value) elif isinstance(value, (float, np.floating)): return '{:>20}'.format(_format_float(value)) elif isinstance(value, (complex, np.complexfloating)): val_str = '({}, {})'.format(_format_float(value.real), _format_float(value.imag)) return '{:>20}'.format(val_str) elif isinstance(value, Undefined): return '' else: return '' def _format_float(value): """Format a floating number to make sure it gets the decimal point.""" value_str = '{:.16G}'.format(value) if '.' not in value_str and 'E' not in value_str: value_str += '.0' elif 'E' in value_str: # On some Windows builds of Python (and possibly other platforms?) the # exponent is zero-padded out to, it seems, three digits. Normalize # the format to pad only to two digits. significand, exponent = value_str.split('E') if exponent[0] in ('+', '-'): sign = exponent[0] exponent = exponent[1:] else: sign = '' value_str = '{}E{}{:02d}'.format(significand, sign, int(exponent)) # Limit the value string to at most 20 characters. str_len = len(value_str) if str_len > 20: idx = value_str.find('E') if idx < 0: value_str = value_str[:20] else: value_str = value_str[:20 - (str_len - idx)] + value_str[idx:] return value_str def _pad(input): """Pad blank space to the input string to be multiple of 80.""" _len = len(input) if _len == Card.length: return input elif _len > Card.length: strlen = _len % Card.length if strlen == 0: return input else: return input + ' ' * (Card.length - strlen) # minimum length is 80 else: strlen = _len % Card.length return input + ' ' * (Card.length - strlen)
c5b2a7e48d3d693294ae49082db4e3697bf30397ca74faf24987423a57c00b21
# Licensed under a 3-clause BSD style license - see PYFITS.rst import operator import warnings from astropy.utils import indent from astropy.utils.exceptions import AstropyUserWarning class VerifyError(Exception): """ Verify exception class. """ class VerifyWarning(AstropyUserWarning): """ Verify warning class. """ VERIFY_OPTIONS = ['ignore', 'warn', 'exception', 'fix', 'silentfix', 'fix+ignore', 'fix+warn', 'fix+exception', 'silentfix+ignore', 'silentfix+warn', 'silentfix+exception'] class _Verify: """ Shared methods for verification. """ def run_option(self, option='warn', err_text='', fix_text='Fixed.', fix=None, fixable=True): """ Execute the verification with selected option. """ text = err_text if option in ['warn', 'exception']: fixable = False # fix the value elif not fixable: text = 'Unfixable error: {}'.format(text) else: if fix: fix() text += ' ' + fix_text return (fixable, text) def verify(self, option='warn'): """ Verify all values in the instance. Parameters ---------- option : str Output verification option. Must be one of ``"fix"``, ``"silentfix"``, ``"ignore"``, ``"warn"``, or ``"exception"``. May also be any combination of ``"fix"`` or ``"silentfix"`` with ``"+ignore"``, ``+warn``, or ``+exception" (e.g. ``"fix+warn"``). See :ref:`verify` for more info. """ opt = option.lower() if opt not in VERIFY_OPTIONS: raise ValueError('Option {!r} not recognized.'.format(option)) if opt == 'ignore': return errs = self._verify(opt) # Break the verify option into separate options related to reporting of # errors, and fixing of fixable errors if '+' in opt: fix_opt, report_opt = opt.split('+') elif opt in ['fix', 'silentfix']: # The original default behavior for 'fix' and 'silentfix' was to # raise an exception for unfixable errors fix_opt, report_opt = opt, 'exception' else: fix_opt, report_opt = None, opt if fix_opt == 'silentfix' and report_opt == 'ignore': # Fixable errors were fixed, but don't report anything return if fix_opt == 'silentfix': # Don't print out fixable issues; the first element of each verify # item is a boolean indicating whether or not the issue was fixable line_filter = lambda x: not x[0] elif fix_opt == 'fix' and report_opt == 'ignore': # Don't print *unfixable* issues, but do print fixed issues; this # is probably not very useful but the option exists for # completeness line_filter = operator.itemgetter(0) else: line_filter = None unfixable = False messages = [] for fixable, message in errs.iter_lines(filter=line_filter): if fixable is not None: unfixable = not fixable messages.append(message) if messages: messages.insert(0, 'Verification reported errors:') messages.append('Note: astropy.io.fits uses zero-based indexing.\n') if fix_opt == 'silentfix' and not unfixable: return elif report_opt == 'warn' or (fix_opt == 'fix' and not unfixable): for line in messages: warnings.warn(line, VerifyWarning) else: raise VerifyError('\n' + '\n'.join(messages)) class _ErrList(list): """ Verification errors list class. It has a nested list structure constructed by error messages generated by verifications at different class levels. """ def __init__(self, val=(), unit='Element'): super().__init__(val) self.unit = unit def __str__(self): return '\n'.join(item[1] for item in self.iter_lines()) def iter_lines(self, filter=None, shift=0): """ Iterate the nested structure as a list of strings with appropriate indentations for each level of structure. """ element = 0 # go through the list twice, first time print out all top level # messages for item in self: if not isinstance(item, _ErrList): if filter is None or filter(item): yield item[0], indent(item[1], shift=shift) # second time go through the next level items, each of the next level # must present, even it has nothing. for item in self: if isinstance(item, _ErrList): next_lines = item.iter_lines(filter=filter, shift=shift + 1) try: first_line = next(next_lines) except StopIteration: first_line = None if first_line is not None: if self.unit: # This line is sort of a header for the next level in # the hierarchy yield None, indent('{} {}:'.format(self.unit, element), shift=shift) yield first_line for line in next_lines: yield line element += 1
ecb86193458442f2ecabb14b8c24ee172aa8705c1af3fc88b9474698acd70d0d
# Licensed under a 3-clause BSD style license - see LICENSE.rst """An extensible ASCII table reader and writer. cds.py: Classes to read CDS / Vizier table format :Copyright: Smithsonian Astrophysical Observatory (2011) :Author: Tom Aldcroft ([email protected]) """ import fnmatch import itertools import re import os from contextlib import suppress from . import core from . import fixedwidth from astropy.units import Unit __doctest_skip__ = ['*'] class CdsHeader(core.BaseHeader): col_type_map = {'e': core.FloatType, 'f': core.FloatType, 'i': core.IntType, 'a': core.StrType} 'The ReadMe file to construct header from.' readme = None def get_type_map_key(self, col): match = re.match(r'\d*(\S)', col.raw_type.lower()) if not match: raise ValueError('Unrecognized CDS format "{}" for column "{}"'.format( col.raw_type, col.name)) return match.group(1) def get_cols(self, lines): """ Initialize the header Column objects from the table ``lines`` for a CDS header. Parameters ---------- lines : list List of table lines """ # Read header block for the table ``self.data.table_name`` from the read # me file ``self.readme``. if self.readme and self.data.table_name: in_header = False readme_inputter = core.BaseInputter() f = readme_inputter.get_lines(self.readme) # Header info is not in data lines but in a separate file. lines = [] comment_lines = 0 for line in f: line = line.strip() if in_header: lines.append(line) if line.startswith(('------', '=======')): comment_lines += 1 if comment_lines == 3: break else: match = re.match(r'Byte-by-byte Description of file: (?P<name>.+)$', line, re.IGNORECASE) if match: # Split 'name' in case in contains multiple files names = [s for s in re.split('[, ]+', match.group('name')) if s] # Iterate on names to find if one matches the tablename # including wildcards. for pattern in names: if fnmatch.fnmatch(self.data.table_name, pattern): in_header = True lines.append(line) break else: raise core.InconsistentTableError("Can't find table {0} in {1}".format( self.data.table_name, self.readme)) found_line = False for i_col_def, line in enumerate(lines): if re.match(r'Byte-by-byte Description', line, re.IGNORECASE): found_line = True elif found_line: # First line after list of file descriptions i_col_def -= 1 # Set i_col_def to last description line break re_col_def = re.compile(r"""\s* (?P<start> \d+ \s* -)? \s* (?P<end> \d+) \s+ (?P<format> [\w.]+) \s+ (?P<units> \S+) \s+ (?P<name> \S+) (\s+ (?P<descr> \S.*))?""", re.VERBOSE) cols = [] for line in itertools.islice(lines, i_col_def+4, None): if line.startswith(('------', '=======')): break match = re_col_def.match(line) if match: col = core.Column(name=match.group('name')) col.start = int(re.sub(r'[-\s]', '', match.group('start') or match.group('end'))) - 1 col.end = int(match.group('end')) unit = match.group('units') if unit == '---': col.unit = None # "---" is the marker for no unit in CDS table else: col.unit = Unit(unit, format='cds', parse_strict='warn') col.description = (match.group('descr') or '').strip() col.raw_type = match.group('format') col.type = self.get_col_type(col) match = re.match( r'\? (?P<equal> =)? (?P<nullval> \S*) (\s+ (?P<descriptiontext> \S.*))?', col.description, re.VERBOSE) if match: col.description = (match.group('descriptiontext') or '').strip() if issubclass(col.type, core.FloatType): fillval = 'nan' else: fillval = '0' if match.group('nullval') == '-': col.null = '---' # CDS tables can use -, --, ---, or ---- to mark missing values # see https://github.com/astropy/astropy/issues/1335 for i in [1, 2, 3, 4]: self.data.fill_values.append(('-'*i, fillval, col.name)) else: col.null = match.group('nullval') self.data.fill_values.append((col.null, fillval, col.name)) cols.append(col) else: # could be a continuation of the previous col's description if cols: cols[-1].description += line.strip() else: raise ValueError('Line "{}" not parsable as CDS header'.format(line)) self.names = [x.name for x in cols] self.cols = cols class CdsData(core.BaseData): """CDS table data reader """ splitter_class = fixedwidth.FixedWidthSplitter def process_lines(self, lines): """Skip over CDS header by finding the last section delimiter""" # If the header has a ReadMe and data has a filename # then no need to skip, as the data lines do not have header # info. The ``read`` method adds the table_name to the ``data`` # attribute. if self.header.readme and self.table_name: return lines i_sections = [i for i, x in enumerate(lines) if x.startswith(('------', '======='))] if not i_sections: raise core.InconsistentTableError('No CDS section delimiter found') return lines[i_sections[-1]+1:] class Cds(core.BaseReader): """CDS format table. See: http://vizier.u-strasbg.fr/doc/catstd.htx Example:: Table: Table name here = ============================================================================== Catalog reference paper Bibliography info here ================================================================================ ADC_Keywords: Keyword ; Another keyword ; etc Description: Catalog description here. ================================================================================ Byte-by-byte Description of file: datafile3.txt -------------------------------------------------------------------------------- Bytes Format Units Label Explanations -------------------------------------------------------------------------------- 1- 3 I3 --- Index Running identification number 5- 6 I2 h RAh Hour of Right Ascension (J2000) 8- 9 I2 min RAm Minute of Right Ascension (J2000) 11- 15 F5.2 s RAs Second of Right Ascension (J2000) -------------------------------------------------------------------------------- Note (1): A CDS file can contain sections with various metadata. Notes can be multiple lines. Note (2): Another note. -------------------------------------------------------------------------------- 1 03 28 39.09 2 04 18 24.11 **About parsing the CDS format** The CDS format consists of a table description and the table data. These can be in separate files as a ``ReadMe`` file plus data file(s), or combined in a single file. Different subsections within the description are separated by lines of dashes or equal signs ("------" or "======"). The table which specifies the column information must be preceded by a line starting with "Byte-by-byte Description of file:". In the case where the table description is combined with the data values, the data must be in the last section and must be preceded by a section delimiter line (dashes or equal signs only). **Basic usage** Use the ``ascii.read()`` function as normal, with an optional ``readme`` parameter indicating the CDS ReadMe file. If not supplied it is assumed that the header information is at the top of the given table. Examples:: >>> from astropy.io import ascii >>> table = ascii.read("data/cds.dat") >>> table = ascii.read("data/vizier/table1.dat", readme="data/vizier/ReadMe") >>> table = ascii.read("data/cds/multi/lhs2065.dat", readme="data/cds/multi/ReadMe") >>> table = ascii.read("data/cds/glob/lmxbrefs.dat", readme="data/cds/glob/ReadMe") The table name and the CDS ReadMe file can be entered as URLs. This can be used to directly load tables from the Internet. For example, Vizier tables from the CDS:: >>> table = ascii.read("ftp://cdsarc.u-strasbg.fr/pub/cats/VII/253/snrs.dat", ... readme="ftp://cdsarc.u-strasbg.fr/pub/cats/VII/253/ReadMe") If the header (ReadMe) and data are stored in a single file and there is content between the header and the data (for instance Notes), then the parsing process may fail. In this case you can instruct the reader to guess the actual start of the data by supplying ``data_start='guess'`` in the call to the ``ascii.read()`` function. You should verify that the output data table matches expectation based on the input CDS file. **Using a reader object** When ``Cds`` reader object is created with a ``readme`` parameter passed to it at initialization, then when the ``read`` method is executed with a table filename, the header information for the specified table is taken from the ``readme`` file. An ``InconsistentTableError`` is raised if the ``readme`` file does not have header information for the given table. >>> readme = "data/vizier/ReadMe" >>> r = ascii.get_reader(ascii.Cds, readme=readme) >>> table = r.read("data/vizier/table1.dat") >>> # table5.dat has the same ReadMe file >>> table = r.read("data/vizier/table5.dat") If no ``readme`` parameter is specified, then the header information is assumed to be at the top of the given table. >>> r = ascii.get_reader(ascii.Cds) >>> table = r.read("data/cds.dat") >>> #The following gives InconsistentTableError, since no >>> #readme file was given and table1.dat does not have a header. >>> table = r.read("data/vizier/table1.dat") Traceback (most recent call last): ... InconsistentTableError: No CDS section delimiter found Caveats: * The Units and Explanations are available in the column ``unit`` and ``description`` attributes, respectively. * The other metadata defined by this format is not available in the output table. """ _format_name = 'cds' _io_registry_format_aliases = ['cds'] _io_registry_can_write = False _description = 'CDS format table' data_class = CdsData header_class = CdsHeader def __init__(self, readme=None): super().__init__() self.header.readme = readme def write(self, table=None): """Not available for the Cds class (raises NotImplementedError)""" raise NotImplementedError def read(self, table): # If the read kwarg `data_start` is 'guess' then the table may have extraneous # lines between the end of the header and the beginning of data. if self.data.start_line == 'guess': # Replicate the first part of BaseReader.read up to the point where # the table lines are initially read in. with suppress(TypeError): # For strings only if os.linesep not in table + '': self.data.table_name = os.path.basename(table) self.data.header = self.header self.header.data = self.data # Get a list of the lines (rows) in the table lines = self.inputter.get_lines(table) # Now try increasing data.start_line by one until the table reads successfully. # For efficiency use the in-memory list of lines instead of `table`, which # could be a file. for data_start in range(len(lines)): self.data.start_line = data_start with suppress(Exception): table = super().read(lines) return table else: return super().read(table)
25dced53fc9413ff52387ed96b2e163e3addb792decaf775184e5eb049741ccc
# Licensed under a 3-clause BSD style license - see LICENSE.rst """An extensible ASCII table reader and writer. basic.py: Basic table read / write functionality for simple character delimited files with various options for column header definition. :Copyright: Smithsonian Astrophysical Observatory (2011) :Author: Tom Aldcroft ([email protected]) """ import re from . import core class BasicHeader(core.BaseHeader): """ Basic table Header Reader Set a few defaults for common ascii table formats (start at line 0, comments begin with ``#`` and possibly white space) """ start_line = 0 comment = r'\s*#' write_comment = '# ' class BasicData(core.BaseData): """ Basic table Data Reader Set a few defaults for common ascii table formats (start at line 1, comments begin with ``#`` and possibly white space) """ start_line = 1 comment = r'\s*#' write_comment = '# ' class Basic(core.BaseReader): r"""Character-delimited table with a single header line at the top. Lines beginning with a comment character (default='#') as the first non-whitespace character are comments. Example table:: # Column definition is the first uncommented line # Default delimiter is the space character. apples oranges pears # Data starts after the header column definition, blank lines ignored 1 2 3 4 5 6 """ _format_name = 'basic' _description = 'Basic table with custom delimiters' _io_registry_format_aliases = ['ascii'] header_class = BasicHeader data_class = BasicData class NoHeaderHeader(BasicHeader): """ Reader for table header without a header Set the start of header line number to `None`, which tells the basic reader there is no header line. """ start_line = None class NoHeaderData(BasicData): """ Reader for table data without a header Data starts at first uncommented line since there is no header line. """ start_line = 0 class NoHeader(Basic): """Character-delimited table with no header line. When reading, columns are autonamed using header.auto_format which defaults to "col%d". Otherwise this reader the same as the :class:`Basic` class from which it is derived. Example:: # Table data 1 2 "hello there" 3 4 world """ _format_name = 'no_header' _description = 'Basic table with no headers' header_class = NoHeaderHeader data_class = NoHeaderData class CommentedHeaderHeader(BasicHeader): """ Header class for which the column definition line starts with the comment character. See the :class:`CommentedHeader` class for an example. """ def process_lines(self, lines): """ Return only lines that start with the comment regexp. For these lines strip out the matching characters. """ re_comment = re.compile(self.comment) for line in lines: match = re_comment.match(line) if match: yield line[match.end():] def write(self, lines): lines.append(self.write_comment + self.splitter.join(self.colnames)) class CommentedHeader(Basic): """Character-delimited table with column names in a comment line. When reading, ``header_start`` can be used to specify the line index of column names, and it can be a negative index (for example -1 for the last commented line). The default delimiter is the <space> character. Example:: # col1 col2 col3 # Comment line 1 2 3 4 5 6 """ _format_name = 'commented_header' _description = 'Column names in a commented line' header_class = CommentedHeaderHeader data_class = NoHeaderData def read(self, table): """ Read input data (file-like object, filename, list of strings, or single string) into a Table and return the result. """ out = super().read(table) # Strip off the comment line set as the header line for # commented_header format (first by default). if 'comments' in out.meta: idx = self.header.start_line if idx < 0: idx = len(out.meta['comments']) + idx out.meta['comments'] = out.meta['comments'][:idx] + out.meta['comments'][idx+1:] if not out.meta['comments']: del out.meta['comments'] return out def write_header(self, lines, meta): """ Write comment lines after, rather than before, the header. """ self.header.write(lines) self.header.write_comments(lines, meta) class TabHeaderSplitter(core.DefaultSplitter): """Split lines on tab and do not remove whitespace""" delimiter = '\t' process_line = None class TabDataSplitter(TabHeaderSplitter): """ Don't strip data value whitespace since that is significant in TSV tables """ process_val = None skipinitialspace = False class TabHeader(BasicHeader): """ Reader for header of tables with tab separated header """ splitter_class = TabHeaderSplitter class TabData(BasicData): """ Reader for data of tables with tab separated data """ splitter_class = TabDataSplitter class Tab(Basic): """Tab-separated table. Unlike the :class:`Basic` reader, whitespace is not stripped from the beginning and end of either lines or individual column values. Example:: col1 <tab> col2 <tab> col3 # Comment line 1 <tab> 2 <tab> 5 """ _format_name = 'tab' _description = 'Basic table with tab-separated values' header_class = TabHeader data_class = TabData class CsvSplitter(core.DefaultSplitter): """ Split on comma for CSV (comma-separated-value) tables """ delimiter = ',' class CsvHeader(BasicHeader): """ Header that uses the :class:`astropy.io.ascii.basic.CsvSplitter` """ splitter_class = CsvSplitter comment = None write_comment = None class CsvData(BasicData): """ Data that uses the :class:`astropy.io.ascii.basic.CsvSplitter` """ splitter_class = CsvSplitter fill_values = [(core.masked, '')] comment = None write_comment = None class Csv(Basic): """CSV (comma-separated-values) table. This file format may contain rows with fewer entries than the number of columns, a situation that occurs in output from some spreadsheet editors. The missing entries are marked as masked in the output table. Masked values (indicated by an empty '' field value when reading) are written out in the same way with an empty ('') field. This is different from the typical default for `astropy.io.ascii` in which missing values are indicated by ``--``. Example:: num,ra,dec,radius,mag 1,32.23222,10.1211 2,38.12321,-88.1321,2.2,17.0 """ _format_name = 'csv' _io_registry_format_aliases = ['csv'] _io_registry_can_write = True _io_registry_suffix = '.csv' _description = 'Comma-separated-values' header_class = CsvHeader data_class = CsvData def inconsistent_handler(self, str_vals, ncols): """ Adjust row if it is too short. If a data row is shorter than the header, add empty values to make it the right length. Note that this will *not* be called if the row already matches the header. Parameters ---------- str_vals : list A list of value strings from the current row of the table. ncols : int The expected number of entries from the table header. Returns ------- str_vals : list List of strings to be parsed into data entries in the output table. """ if len(str_vals) < ncols: str_vals.extend((ncols - len(str_vals)) * ['']) return str_vals class RdbHeader(TabHeader): """ Header for RDB tables """ col_type_map = {'n': core.NumType, 's': core.StrType} def get_type_map_key(self, col): return col.raw_type[-1] def get_cols(self, lines): """ Initialize the header Column objects from the table ``lines``. This is a specialized get_cols for the RDB type: Line 0: RDB col names Line 1: RDB col definitions Line 2+: RDB data rows Parameters ---------- lines : list List of table lines Returns ------- None """ header_lines = self.process_lines(lines) # this is a generator header_vals_list = [hl for _, hl in zip(range(2), self.splitter(header_lines))] if len(header_vals_list) != 2: raise ValueError('RDB header requires 2 lines') self.names, raw_types = header_vals_list if len(self.names) != len(raw_types): raise core.InconsistentTableError('RDB header mismatch between number of column names and column types.') if any(not re.match(r'\d*(N|S)$', x, re.IGNORECASE) for x in raw_types): raise core.InconsistentTableError('RDB types definitions do not all match [num](N|S): {}'.format(raw_types)) self._set_cols_from_names() for col, raw_type in zip(self.cols, raw_types): col.raw_type = raw_type col.type = self.get_col_type(col) def write(self, lines): lines.append(self.splitter.join(self.colnames)) rdb_types = [] for col in self.cols: # Check if dtype.kind is string or unicode. See help(np.core.numerictypes) rdb_type = 'S' if col.info.dtype.kind in ('S', 'U') else 'N' rdb_types.append(rdb_type) lines.append(self.splitter.join(rdb_types)) class RdbData(TabData): """ Data reader for RDB data. Starts reading at line 2. """ start_line = 2 class Rdb(Tab): """Tab-separated file with an extra line after the column definition line that specifies either numeric (N) or string (S) data. See: https://compbio.soe.ucsc.edu/rdb/ Example:: col1 <tab> col2 <tab> col3 N <tab> S <tab> N 1 <tab> 2 <tab> 5 """ _format_name = 'rdb' _io_registry_format_aliases = ['rdb'] _io_registry_suffix = '.rdb' _description = 'Tab-separated with a type definition header line' header_class = RdbHeader data_class = RdbData
b0b93d39226e24b3eb6d6b3eb31d287491985ecc2538c837281cc5684d7393b1
# Licensed under a 3-clause BSD style license - see LICENSE.rst # This file connects the readers/writers to the astropy.table.Table class import re import functools from astropy.io import registry as io_registry from astropy.table import Table __all__ = [] def io_read(format, filename, **kwargs): from .ui import read if format != 'ascii': format = re.sub(r'^ascii\.', '', format) kwargs['format'] = format return read(filename, **kwargs) def io_write(format, table, filename, **kwargs): from .ui import write if format != 'ascii': format = re.sub(r'^ascii\.', '', format) kwargs['format'] = format return write(table, filename, **kwargs) def io_identify(suffix, origin, filepath, fileobj, *args, **kwargs): return filepath is not None and filepath.endswith(suffix) def _get_connectors_table(): from .core import FORMAT_CLASSES rows = [] rows.append(('ascii', '', 'Yes', 'ASCII table in any supported format (uses guessing)')) for format in sorted(FORMAT_CLASSES): cls = FORMAT_CLASSES[format] io_format = 'ascii.' + cls._format_name description = getattr(cls, '_description', '') class_link = ':class:`~{0}.{1}`'.format(cls.__module__, cls.__name__) suffix = getattr(cls, '_io_registry_suffix', '') can_write = 'Yes' if getattr(cls, '_io_registry_can_write', True) else '' rows.append((io_format, suffix, can_write, '{0}: {1}'.format(class_link, description))) out = Table(list(zip(*rows)), names=('Format', 'Suffix', 'Write', 'Description')) for colname in ('Format', 'Description'): width = max(len(x) for x in out[colname]) out[colname].format = '%-{0}s'.format(width) return out